galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- 15302 discussions
details: http://www.bx.psu.edu/hg/galaxy/rev/076f572d7c9d
changeset: 3674:076f572d7c9d
user: rc
date: Wed Apr 21 10:41:30 2010 -0400
description:
lims:
- data transfer now uses rabbitmq
- datasets can now be renamed before transfering from the sequencer
- data transfer code refactored
diffstat:
lib/galaxy/config.py | 4 +
lib/galaxy/model/__init__.py | 28 +-
lib/galaxy/web/controllers/requests_admin.py | 121 +++++++-
lib/galaxy/web/framework/__init__.py | 1 +
run_galaxy_listener.sh | 2 +-
scripts/galaxy_messaging/client/amqp_publisher.py | 4 +-
scripts/galaxy_messaging/server/amqp_consumer.py | 66 +++-
scripts/galaxy_messaging/server/data_transfer.py | 241 +++++++++-------
scripts/galaxy_messaging/server/galaxydb_interface.py | 17 +-
scripts/galaxy_messaging/server/galaxyweb_interface.py | 132 +++++++++
templates/admin/requests/dataset.mako | 71 +++++
templates/admin/requests/get_data.mako | 67 ++-
universe_wsgi.ini.sample | 2 +-
13 files changed, 577 insertions(+), 179 deletions(-)
diffs (1118 lines):
diff -r 207d0d70483b -r 076f572d7c9d lib/galaxy/config.py
--- a/lib/galaxy/config.py Tue Apr 20 15:36:03 2010 -0400
+++ b/lib/galaxy/config.py Wed Apr 21 10:41:30 2010 -0400
@@ -123,6 +123,10 @@
self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'True' ) )
else:
self.enable_cloud_execution = string_as_bool( kwargs.get( 'enable_cloud_execution', 'False' ) )
+ # Galaxy messaging (AMQP) configuration options
+ self.amqp = {}
+ for k, v in global_conf_parser.items("galaxy_amqp"):
+ self.amqp[k] = v
def get( self, key, default ):
return self.config_dict.get( key, default )
def get_bool( self, key, default ):
diff -r 207d0d70483b -r 076f572d7c9d lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py Tue Apr 20 15:36:03 2010 -0400
+++ b/lib/galaxy/model/__init__.py Wed Apr 21 10:41:30 2010 -0400
@@ -18,6 +18,7 @@
import logging
log = logging.getLogger( __name__ )
from sqlalchemy.orm import object_session
+import pexpect
datatypes_registry = galaxy.datatypes.registry.Registry() #Default Value Required for unit tests
@@ -1455,7 +1456,9 @@
class Sample( object ):
transfer_status = Bunch( NOT_STARTED = 'Not started',
- IN_PROGRESS = 'In progress',
+ IN_QUEUE = 'In queue',
+ TRANSFERRING = 'Transferring dataset',
+ ADD_TO_LIBRARY = 'Adding to data library',
COMPLETE = 'Complete',
ERROR = 'Error')
def __init__(self, name=None, desc=None, request=None, form_values=None,
@@ -1474,22 +1477,33 @@
return None
def untransferred_dataset_files(self):
count = 0
- for df, status in self.dataset_files:
- if status == self.transfer_status.NOT_STARTED:
+ for df in self.dataset_files:
+ if df['status'] == self.transfer_status.NOT_STARTED:
count = count + 1
return count
def inprogress_dataset_files(self):
count = 0
- for df, status in self.dataset_files:
- if status == self.transfer_status.IN_PROGRESS:
+ for df in self.dataset_files:
+ if df['status'] not in [self.transfer_status.NOT_STARTED, self.transfer_status.COMPLETE]:
count = count + 1
return count
def transferred_dataset_files(self):
count = 0
- for df, status in self.dataset_files:
- if status == self.transfer_status.COMPLETE:
+ for df in self.dataset_files:
+ if df['status'] == self.transfer_status.COMPLETE:
count = count + 1
return count
+ def dataset_size(self, filepath):
+ def print_ticks(d):
+ pass
+ datatx_info = self.request.type.datatx_info
+ cmd = 'ssh %s@%s "du -sh %s"' % ( datatx_info['username'],
+ datatx_info['host'],
+ filepath)
+ output = pexpect.run(cmd, events={'.ssword:*': datatx_info['password']+'\r\n',
+ pexpect.TIMEOUT:print_ticks},
+ timeout=10)
+ return output.split('\t')[0]
class SampleState( object ):
def __init__(self, name=None, desc=None, request_type=None):
diff -r 207d0d70483b -r 076f572d7c9d lib/galaxy/web/controllers/requests_admin.py
--- a/lib/galaxy/web/controllers/requests_admin.py Tue Apr 20 15:36:03 2010 -0400
+++ b/lib/galaxy/web/controllers/requests_admin.py Wed Apr 21 10:41:30 2010 -0400
@@ -12,6 +12,7 @@
from sqlalchemy.sql import select
import pexpect
import ConfigParser, threading, time
+from amqplib import client_0_8 as amqp
log = logging.getLogger( __name__ )
@@ -64,7 +65,6 @@
.filter( self.event_class.table.c.id.in_(select(columns=[func.max(self.event_class.table.c.id)],
from_obj=self.event_class.table,
group_by=self.event_class.table.c.request_id)))
- #print column_filter, q
return q
def get_accepted_filters( self ):
""" Returns a list of accepted filters for this column. """
@@ -1509,8 +1509,11 @@
params = util.Params( kwd )
message = util.restore_text( params.get( 'message', '' ) )
status = params.get( 'status', 'done' )
- folder_path = util.restore_text( params.get( 'folder_path', '' ) )
+ folder_path = util.restore_text( params.get( 'folder_path',
+ sample.request.type.datatx_info['data_dir'] ) )
files_list = util.listify( params.get( 'files_list', '' ) )
+ if params.get( 'start_transfer_button', False ) == 'True':
+ return self.__start_datatx(trans, sample)
if not folder_path:
return trans.fill_template( '/admin/requests/get_data.mako',
sample=sample, files=[],
@@ -1544,32 +1547,43 @@
dataset_files=sample.dataset_files,
folder_path=folder_path )
elif params.get( 'remove_dataset_button', False ):
+ # get the filenames from the remote host
+ files = self.__get_files(trans, sample, folder_path)
dataset_index = int(params.get( 'dataset_index', 0 ))
del sample.dataset_files[dataset_index]
trans.sa_session.add( sample )
trans.sa_session.flush()
return trans.fill_template( '/admin/requests/get_data.mako',
- sample=sample,
- dataset_files=sample.dataset_files)
- elif params.get( 'start_transfer_button', False ):
+ sample=sample, files=files,
+ dataset_files=sample.dataset_files,
+ folder_path=folder_path)
+ elif params.get( 'select_files_button', False ):
folder_files = []
if len(files_list):
for f in files_list:
+ filepath = os.path.join(folder_path, f)
if f[-1] == os.sep:
# the selected item is a folder so transfer all the
# folder contents
- self.__get_files_in_dir(trans, sample, os.path.join(folder_path, f))
+ self.__get_files_in_dir(trans, sample, filepath)
else:
- sample.dataset_files.append([os.path.join(folder_path, f),
- sample.transfer_status.NOT_STARTED])
+ sample.dataset_files.append(dict(filepath=filepath,
+ status=sample.transfer_status.NOT_STARTED,
+ name=filepath.split('/')[-1],
+ error_msg='',
+ size=sample.dataset_size(filepath)))
trans.sa_session.add( sample )
trans.sa_session.flush()
- return self.__start_datatx(trans, sample)
return trans.response.send_redirect( web.url_for( controller='requests_admin',
action='show_datatx_page',
sample_id=trans.security.encode_id(sample.id),
folder_path=folder_path))
+ return trans.response.send_redirect( web.url_for( controller='requests_admin',
+ action='show_datatx_page',
+ sample_id=trans.security.encode_id(sample.id),
+ folder_path=folder_path))
+
def __setup_datatx_user(self, trans, library, folder):
'''
This method sets up the datatx user:
@@ -1620,7 +1634,62 @@
trans.sa_session.add( dp )
trans.sa_session.flush()
return datatx_user
-
+
+ def __send_message(self, trans, datatx_info, sample):
+ '''
+ This method creates the xml message and sends it to the rabbitmq server
+ '''
+ # first create the xml message based on the following template
+ xml = \
+ ''' <data_transfer>
+ <data_host>%(DATA_HOST)s</data_host>
+ <data_user>%(DATA_USER)s</data_user>
+ <data_password>%(DATA_PASSWORD)s</data_password>
+ <sample_id>%(SAMPLE_ID)s</sample_id>
+ <library_id>%(LIBRARY_ID)s</library_id>
+ <folder_id>%(FOLDER_ID)s</folder_id>
+ %(DATASETS)s
+ </data_transfer>'''
+ dataset_xml = \
+ '''<dataset>
+ <index>%(INDEX)s</index>
+ <name>%(NAME)s</name>
+ <file>%(FILE)s</file>
+ </dataset>'''
+ datasets = ''
+ for index, dataset in enumerate(sample.dataset_files):
+ if dataset['status'] == sample.transfer_status.NOT_STARTED:
+ datasets = datasets + dataset_xml % dict(INDEX=str(index),
+ NAME=dataset['name'],
+ FILE=dataset['filepath'])
+ sample.dataset_files[index]['status'] = sample.transfer_status.IN_QUEUE
+
+ trans.sa_session.add( sample )
+ trans.sa_session.flush()
+ data = xml % dict(DATA_HOST=datatx_info['host'],
+ DATA_USER=datatx_info['username'],
+ DATA_PASSWORD=datatx_info['password'],
+ SAMPLE_ID=str(sample.id),
+ LIBRARY_ID=str(sample.library.id),
+ FOLDER_ID=str(sample.folder.id),
+ DATASETS=datasets)
+ # now send this message
+ conn = amqp.Connection(host=trans.app.config.amqp['host']+":"+trans.app.config.amqp['port'],
+ userid=trans.app.config.amqp['userid'],
+ password=trans.app.config.amqp['password'],
+ virtual_host=trans.app.config.amqp['virtual_host'],
+ insist=False)
+ chan = conn.channel()
+ msg = amqp.Message(data,
+ content_type='text/plain',
+ application_headers={'msg_type': 'data_transfer'})
+ msg.properties["delivery_mode"] = 2
+ chan.basic_publish(msg,
+ exchange=trans.app.config.amqp['exchange'],
+ routing_key=trans.app.config.amqp['routing_key'])
+ chan.close()
+ conn.close()
+
def __start_datatx(self, trans, sample):
# data transfer user
datatx_user = self.__setup_datatx_user(trans, sample.library, sample.folder)
@@ -1635,6 +1704,11 @@
sample_id=trans.security.encode_id(sample.id),
status='error',
message=message))
+ self.__send_message(trans, datatx_info, sample)
+ return trans.response.send_redirect( web.url_for( controller='requests_admin',
+ action='show_datatx_page',
+ sample_id=trans.security.encode_id(sample.id),
+ folder_path=datatx_info['data_dir']))
error_message = ''
transfer_script = "scripts/galaxy_messaging/server/data_transfer.py"
for index, dataset in enumerate(sample.dataset_files):
@@ -1670,6 +1744,33 @@
action='show_datatx_page',
sample_id=trans.security.encode_id(sample.id),
folder_path=os.path.dirname(dfile)))
+
+ @web.expose
+ @web.require_admin
+ def dataset_details( self, trans, **kwd ):
+ try:
+ sample = trans.sa_session.query( trans.app.model.Sample ).get( trans.security.decode_id(kwd['sample_id']) )
+ except:
+ return trans.response.send_redirect( web.url_for( controller='requests_admin',
+ action='list',
+ status='error',
+ message="Invalid sample ID" ) )
+ params = util.Params( kwd )
+ message = util.restore_text( params.get( 'message', '' ) )
+ status = params.get( 'status', 'done' )
+ dataset_index = int( params.get( 'dataset_index', '' ) )
+ if params.get('save', '') == 'Save':
+ sample.dataset_files[dataset_index]['name'] = util.restore_text( params.get( 'name',
+ sample.dataset_files[dataset_index]['name'] ) )
+ trans.sa_session.add( sample )
+ trans.sa_session.flush()
+ status = 'done'
+ message = 'Saved the changes made to the dataset.'
+ return trans.fill_template( '/admin/requests/dataset.mako',
+ sample=sample,
+ dataset_index=dataset_index,
+ message=message,
+ status=status)
##
#### Request Type Stuff ###################################################
##
diff -r 207d0d70483b -r 076f572d7c9d lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py Tue Apr 20 15:36:03 2010 -0400
+++ b/lib/galaxy/web/framework/__init__.py Wed Apr 21 10:41:30 2010 -0400
@@ -32,6 +32,7 @@
from sqlalchemy import and_
pkg_resources.require( "pexpect" )
+pkg_resources.require( "amqplib" )
import logging
log = logging.getLogger( __name__ )
diff -r 207d0d70483b -r 076f572d7c9d run_galaxy_listener.sh
--- a/run_galaxy_listener.sh Tue Apr 20 15:36:03 2010 -0400
+++ b/run_galaxy_listener.sh Wed Apr 21 10:41:30 2010 -0400
@@ -1,4 +1,4 @@
#!/bin/sh
cd `dirname $0`
-python scripts/galaxy_messaging/server/amqp_consumer.py universe_wsgi.ini >> galaxy_listener.log 2>&1
\ No newline at end of file
+python scripts/galaxy_messaging/server/amqp_consumer.py universe_wsgi.ini 2>&1
\ No newline at end of file
diff -r 207d0d70483b -r 076f572d7c9d scripts/galaxy_messaging/client/amqp_publisher.py
--- a/scripts/galaxy_messaging/client/amqp_publisher.py Tue Apr 20 15:36:03 2010 -0400
+++ b/scripts/galaxy_messaging/client/amqp_publisher.py Wed Apr 21 10:41:30 2010 -0400
@@ -35,7 +35,9 @@
virtual_host=amqp_config['virtual_host'],
insist=False)
chan = conn.channel()
- msg = amqp.Message(data)
+ msg = amqp.Message(data,
+ content_type='text/plain',
+ application_headers={'msg_type': 'sample_state_update'})
msg.properties["delivery_mode"] = 2
chan.basic_publish(msg,
exchange=amqp_config['exchange'],
diff -r 207d0d70483b -r 076f572d7c9d scripts/galaxy_messaging/server/amqp_consumer.py
--- a/scripts/galaxy_messaging/server/amqp_consumer.py Tue Apr 20 15:36:03 2010 -0400
+++ b/scripts/galaxy_messaging/server/amqp_consumer.py Wed Apr 21 10:41:30 2010 -0400
@@ -13,6 +13,7 @@
import sys, os
import optparse
import xml.dom.minidom
+import subprocess
from galaxydb_interface import GalaxyDbInterface
assert sys.version_info[:2] >= ( 2, 4 )
@@ -27,8 +28,13 @@
from amqplib import client_0_8 as amqp
import logging
-logging.basicConfig(level=logging.DEBUG)
-log = logging.getLogger( 'GalaxyAMQP' )
+log = logging.getLogger("GalaxyAMQP")
+log.setLevel(logging.DEBUG)
+fh = logging.FileHandler("galaxy_listener.log")
+fh.setLevel(logging.DEBUG)
+formatter = logging.Formatter("%(asctime)s - %(name)s - %(message)s")
+fh.setFormatter(formatter)
+log.addHandler(fh)
global dbconnstr
@@ -43,19 +49,47 @@
rc = rc + node.data
return rc
+def get_value_index(dom, tag_name, index):
+ '''
+ This method extracts the tag value from the xml message
+ '''
+ try:
+ nodelist = dom.getElementsByTagName(tag_name)[index].childNodes
+ except:
+ return None
+ rc = ""
+ for node in nodelist:
+ if node.nodeType == node.TEXT_NODE:
+ rc = rc + node.data
+ return rc
+
def recv_callback(msg):
- dom = xml.dom.minidom.parseString(msg.body)
- barcode = get_value(dom, 'barcode')
- state = get_value(dom, 'state')
- log.debug('Barcode: '+barcode)
- log.debug('State: '+state)
- # update the galaxy db
- galaxy = GalaxyDbInterface(dbconnstr)
- sample_id = galaxy.get_sample_id(field_name='bar_code', value=barcode)
- if sample_id == -1:
- log.debug('Invalid barcode.')
- return
- galaxy.change_state(sample_id, state)
+ # check the meesage type.
+ msg_type = msg.properties['application_headers'].get('msg_type')
+ log.debug('\nMESSAGE RECVD: '+str(msg_type))
+ if msg_type == 'data_transfer':
+ log.debug('DATA TRANSFER')
+ # fork a new process to transfer datasets
+ transfer_script = "scripts/galaxy_messaging/server/data_transfer.py"
+ cmd = ( "python",
+ transfer_script,
+ msg.body )
+ pid = subprocess.Popen(cmd).pid
+ log.debug('Started process (%i): %s' % (pid, str(cmd)))
+ elif msg_type == 'sample_state_update':
+ log.debug('SAMPLE STATE UPDATE')
+ dom = xml.dom.minidom.parseString(msg.body)
+ barcode = get_value(dom, 'barcode')
+ state = get_value(dom, 'state')
+ log.debug('Barcode: '+barcode)
+ log.debug('State: '+state)
+ # update the galaxy db
+ galaxy = GalaxyDbInterface(dbconnstr)
+ sample_id = galaxy.get_sample_id(field_name='bar_code', value=barcode)
+ if sample_id == -1:
+ log.debug('Invalid barcode.')
+ return
+ galaxy.change_state(sample_id, state)
def main():
if len(sys.argv) < 2:
@@ -66,8 +100,8 @@
global dbconnstr
dbconnstr = config.get("app:main", "database_connection")
amqp_config = {}
- for option in config.options("galaxy:amqp"):
- amqp_config[option] = config.get("galaxy:amqp", option)
+ for option in config.options("galaxy_amqp"):
+ amqp_config[option] = config.get("galaxy_amqp", option)
log.debug(str(amqp_config))
conn = amqp.Connection(host=amqp_config['host']+":"+amqp_config['port'],
userid=amqp_config['userid'],
diff -r 207d0d70483b -r 076f572d7c9d scripts/galaxy_messaging/server/data_transfer.py
--- a/scripts/galaxy_messaging/server/data_transfer.py Tue Apr 20 15:36:03 2010 -0400
+++ b/scripts/galaxy_messaging/server/data_transfer.py Wed Apr 21 10:41:30 2010 -0400
@@ -8,28 +8,36 @@
Usage:
-python data_transfer.py <sequencer_host>
- <username>
- <password>
- <source_file>
- <sample_id>
- <dataset_index>
- <library_id>
- <folder_id>
+python data_transfer.py <data_transfer_xml>
+
+
"""
import ConfigParser
import sys, os, time, traceback
import optparse
import urllib,urllib2, cookielib, shutil
import logging, time
+import xml.dom.minidom
+
+sp = sys.path[0]
+
from galaxydb_interface import GalaxyDbInterface
assert sys.version_info[:2] >= ( 2, 4 )
+new_path = [ sp ]
+new_path.extend( sys.path )
+sys.path = new_path
+
+from galaxyweb_interface import GalaxyWebInterface
+
+assert sys.version_info[:2] >= ( 2, 4 )
new_path = [ os.path.join( os.getcwd(), "lib" ) ]
new_path.extend( sys.path[1:] ) # remove scripts/ from the path
sys.path = new_path
+
from galaxy.util.json import from_json_string, to_json_string
+from galaxy.model import Sample
from galaxy import eggs
import pkg_resources
pkg_resources.require( "pexpect" )
@@ -38,28 +46,39 @@
pkg_resources.require( "simplejson" )
import simplejson
-logging.basicConfig(filename=sys.stderr, level=logging.DEBUG,
- format="%(asctime)s [%(levelname)s] %(message)s")
-
-class DataTransferException(Exception):
- def __init__(self, value):
- self.msg = value
- def __str__(self):
- return repr(self.msg)
+log = logging.getLogger("datatx_"+str(os.getpid()))
+log.setLevel(logging.DEBUG)
+fh = logging.FileHandler("data_transfer.log")
+fh.setLevel(logging.DEBUG)
+formatter = logging.Formatter("%(asctime)s - %(name)s - %(message)s")
+fh.setFormatter(formatter)
+log.addHandler(fh)
class DataTransfer(object):
- def __init__(self, host, username, password, remote_file, sample_id,
- dataset_index, library_id, folder_id):
- self.host = host
- self.username = username
- self.password = password
- self.remote_file = remote_file
- self.sample_id = sample_id
- self.dataset_index = dataset_index
- self.library_id = library_id
- self.folder_id = folder_id
+ def __init__(self, msg):
+ log.info(msg)
+ self.dom = xml.dom.minidom.parseString(msg)
+ self.host = self.get_value(self.dom, 'data_host')
+ self.username = self.get_value(self.dom, 'data_user')
+ self.password = self.get_value(self.dom, 'data_password')
+ self.sample_id = self.get_value(self.dom, 'sample_id')
+ self.library_id = self.get_value(self.dom, 'library_id')
+ self.folder_id = self.get_value(self.dom, 'folder_id')
+ self.dataset_files = []
+ count=0
+ while True:
+ index = self.get_value_index(self.dom, 'index', count)
+ file = self.get_value_index(self.dom, 'file', count)
+ name = self.get_value_index(self.dom, 'name', count)
+ if file:
+ self.dataset_files.append(dict(name=name,
+ index=int(index),
+ file=file))
+ else:
+ break
+ count=count+1
try:
# Retrieve the upload user login information from the config file
config = ConfigParser.ConfigParser()
@@ -75,11 +94,13 @@
os.mkdir(self.server_dir)
if not os.path.exists(self.server_dir):
raise Exception
+ # connect to db
+ self.galaxydb = GalaxyDbInterface(self.database_connection)
except:
- logging.error(traceback.format_exc())
- logging.error('FATAL ERROR')
+ log.error(traceback.format_exc())
+ log.error('FATAL ERROR')
if self.database_connection:
- self.update_status('Error')
+ self.error_and_exit('Error')
sys.exit(1)
def start(self):
@@ -88,13 +109,13 @@
to the data library & finally updates the data transfer status in the db
'''
# datatx
- self.transfer_file()
+ self.transfer_files()
# add the dataset to the given library
self.add_to_library()
# update the data transfer status in the db
- self.update_status('Complete')
+ self.update_status(Sample.transfer_status.COMPLETE)
# cleanup
- self.cleanup()
+ #self.cleanup()
sys.exit(0)
def cleanup(self):
@@ -114,34 +135,39 @@
This method is called any exception is raised. This prints the traceback
and terminates this script
'''
- logging.error(traceback.format_exc())
- logging.error('FATAL ERROR.'+msg)
- self.update_status('Error.'+msg)
+ log.error(traceback.format_exc())
+ log.error('FATAL ERROR.'+msg)
+ self.update_status('Error.', 'All', msg)
sys.exit(1)
- def transfer_file(self):
+ def transfer_files(self):
'''
This method executes a scp process using pexpect library to transfer
the dataset file from the remote sequencer to the Galaxy server
'''
def print_ticks(d):
pass
- try:
- cmd = "scp %s@%s:%s %s" % ( self.username,
- self.host,
- self.remote_file,
- self.server_dir)
- logging.debug(cmd)
- output = pexpect.run(cmd, events={'.ssword:*': self.password+'\r\n',
- pexpect.TIMEOUT:print_ticks},
- timeout=10)
- logging.debug(output)
- if not os.path.exists(os.path.join(self.server_dir, os.path.basename(self.remote_file))):
- raise DataTransferException('Could not find the local file after transfer (%s)' % os.path.join(self.server_dir, os.path.basename(self.remote_file)))
- except DataTransferException, (e):
- self.error_and_exit(e.msg)
- except:
- self.error_and_exit()
+ for i, df in enumerate(self.dataset_files):
+ self.update_status(Sample.transfer_status.TRANSFERRING, df['index'])
+ try:
+ cmd = "scp %s@%s:%s %s/%s" % ( self.username,
+ self.host,
+ df['file'],
+ self.server_dir,
+ df['name'])
+ log.debug(cmd)
+ output = pexpect.run(cmd, events={'.ssword:*': self.password+'\r\n',
+ pexpect.TIMEOUT:print_ticks},
+ timeout=10)
+ log.debug(output)
+ path = os.path.join(self.server_dir, os.path.basename(df['file']))
+ if not os.path.exists(path):
+ msg = 'Could not find the local file after transfer (%s)' % path
+ log.error(msg)
+ raise Exception(msg)
+ except Exception, e:
+ msg = traceback.format_exc()
+ self.update_status('Error', df['index'], msg)
def add_to_library(self):
@@ -149,73 +175,72 @@
This method adds the dataset file to the target data library & folder
by opening the corresponding url in Galaxy server running.
'''
- try:
- logging.debug('Adding %s to library...' % os.path.basename(self.remote_file))
- # create url
- base_url = "http://%s:%s" % (self.server_host, self.server_port)
- # login
- url = "%s/user/login?email=%s&password=%s" % (base_url, self.datatx_email, self.datatx_password)
- cj = cookielib.CookieJar()
- opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
- f = opener.open(url)
- if f.read().find("ogged in as "+self.datatx_email) == -1:
- # if the user doesnt exist, create the user
- url = "%s/user/create?email=%s&username=%s&password=%s&confirm=%s&create_user_button=Submit" % ( base_url, self.datatx_email, self.datatx_email, self.datatx_password, self.datatx_password )
- f = opener.open(url)
- if f.read().find("ogged in as "+self.datatx_email) == -1:
- raise DataTransferException("The "+self.datatx_email+" user could not login to Galaxy")
- # after login, add dataset to the library
- params = urllib.urlencode(dict( cntrller='library_admin',
- tool_id='upload1',
- tool_state='None',
- library_id=self.library_id,
- folder_id=self.folder_id,
- upload_option='upload_directory',
- file_type='auto',
- server_dir=os.path.basename(self.server_dir),
- dbkey='',
- runtool_btn='Upload to library'))
- #url = "http://localhost:8080/library_common/upload_library_dataset?cntrller=librar…"
- #url = base_url+"/library_common/upload_library_dataset?library_id=adb5f5c93f827949&tool_id=upload1&file_type=auto&server_dir=datatx_22858&dbkey=%3F&upload_option=upload_directory&folder_id=529fd61ab1c6cc36&cntrller=library_admin&tool_state=None&runtool_btn=Upload+to+library"
- url = base_url+"/library_common/upload_library_dataset"
- logging.debug(url)
- logging.debug(params)
- f = opener.open(url, params)
- if f.read().find("Data Library") == -1:
- raise DataTransferException("Dataset could not be uploaded to the data library")
- # finally logout
- f = opener.open(base_url+'/user/logout')
- if f.read().find("You have been logged out.") == -1:
- raise DataTransferException("The "+self.datatx_email+" user could not logout of Galaxy")
- except DataTransferException, (e):
- self.error_and_exit(e.msg)
- except:
- self.error_and_exit()
+ self.update_status(Sample.transfer_status.ADD_TO_LIBRARY)
+ galaxyweb = GalaxyWebInterface(self.server_host, self.server_port,
+ self.datatx_email, self.datatx_password)
+ galaxyweb.add_to_library(self.server_dir, self.library_id, self.folder_id)
+ galaxyweb.logout()
- def update_status(self, status):
+ def update_status(self, status, dataset_index='All', msg=''):
'''
Update the data transfer status for this dataset in the database
'''
try:
- galaxy = GalaxyDbInterface(self.database_connection)
- df = from_json_string(galaxy.get_sample_dataset_files(self.sample_id))
- logging.debug(df)
- df[self.dataset_index][1] = status
- galaxy.set_sample_dataset_files(self.sample_id, to_json_string(df))
- logging.debug("######################\n"+str(from_json_string(galaxy.get_sample_dataset_files(self.sample_id))[self.dataset_index]))
+ log.debug('Setting status "%s" for sample "%s"' % ( status, str(dataset_index) ) )
+ df = from_json_string(self.galaxydb.get_sample_dataset_files(self.sample_id))
+ if dataset_index == 'All':
+ for dataset in self.dataset_files:
+ df[dataset['index']]['status'] = status
+ if status == 'Error':
+ df[dataset['index']]['error_msg'] = msg
+ else:
+ df[dataset['index']]['error_msg'] = ''
+
+ else:
+ df[dataset_index]['status'] = status
+ if status == 'Error':
+ df[dataset_index]['error_msg'] = msg
+ else:
+ df[dataset_index]['error_msg'] = ''
+
+ self.galaxydb.set_sample_dataset_files(self.sample_id, to_json_string(df))
+ log.debug('done.')
except:
- logging.error(traceback.format_exc())
- logging.error('FATAL ERROR')
+ log.error(traceback.format_exc())
+ log.error('FATAL ERROR')
sys.exit(1)
+
+ def get_value(self, dom, tag_name):
+ '''
+ This method extracts the tag value from the xml message
+ '''
+ nodelist = dom.getElementsByTagName(tag_name)[0].childNodes
+ rc = ""
+ for node in nodelist:
+ if node.nodeType == node.TEXT_NODE:
+ rc = rc + node.data
+ return rc
+
+ def get_value_index(self, dom, tag_name, index):
+ '''
+ This method extracts the tag value from the xml message
+ '''
+ try:
+ nodelist = dom.getElementsByTagName(tag_name)[index].childNodes
+ except:
+ return None
+ rc = ""
+ for node in nodelist:
+ if node.nodeType == node.TEXT_NODE:
+ rc = rc + node.data
+ return rc
if __name__ == '__main__':
- logging.info('STARTING %i %s' % (os.getpid(), str(sys.argv)))
- logging.info('daemonized %i' % os.getpid())
+ log.info('STARTING %i %s' % (os.getpid(), str(sys.argv)))
#
# Start the daemon
- #
- dt = DataTransfer(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4],
- int(sys.argv[5]), int(sys.argv[6]), sys.argv[7], sys.argv[8])
+ #
+ dt = DataTransfer(sys.argv[1])
dt.start()
sys.exit(0)
diff -r 207d0d70483b -r 076f572d7c9d scripts/galaxy_messaging/server/galaxydb_interface.py
--- a/scripts/galaxy_messaging/server/galaxydb_interface.py Tue Apr 20 15:36:03 2010 -0400
+++ b/scripts/galaxy_messaging/server/galaxydb_interface.py Wed Apr 21 10:41:30 2010 -0400
@@ -20,8 +20,8 @@
from sqlalchemy import *
from sqlalchemy.orm import sessionmaker
-logging.basicConfig(level=logging.DEBUG)
-log = logging.getLogger( 'GalaxyDbInterface' )
+#logging.basicConfig(level=logging.DEBUG)
+#log = logging.getLogger( 'GalaxyDbInterface' )
class GalaxyDbInterface(object):
@@ -53,9 +53,8 @@
x = result.fetchone()
if x:
sample_id = x[0]
- log.debug('Sample ID: %i' % sample_id)
+ #log.debug('Sample ID: %i' % sample_id)
return sample_id
- log.warning('This sample %s %s does not belong to any sample in the database.' % (field_name, value))
return -1
def current_state(self, sample_id):
@@ -74,16 +73,16 @@
subsubquery = select(columns=[self.sample_table.c.request_id],
whereclause=self.sample_table.c.id==sample_id)
self.request_id = subsubquery.execute().fetchall()[0][0]
- log.debug('REQUESTID: %i' % self.request_id)
+ #log.debug('REQUESTID: %i' % self.request_id)
subquery = select(columns=[self.request_table.c.request_type_id],
whereclause=self.request_table.c.id==self.request_id)
request_type_id = subquery.execute().fetchall()[0][0]
- log.debug('REQUESTTYPEID: %i' % request_type_id)
+ #log.debug('REQUESTTYPEID: %i' % request_type_id)
query = select(columns=[self.state_table.c.id, self.state_table.c.name],
whereclause=self.state_table.c.request_type_id==request_type_id,
order_by=self.state_table.c.id.asc())
states = query.execute().fetchall()
- log.debug('POSSIBLESTATES: '+ str(states))
+ #log.debug('POSSIBLESTATES: '+ str(states))
return states
def change_state(self, sample_id, new_state=None):
@@ -100,7 +99,7 @@
new_state_id = state_id
if new_state_id == -1:
return
- log.debug('Updating sample_id %i state to %s' % (sample_id, new_state))
+ #log.debug('Updating sample_id %i state to %s' % (sample_id, new_state))
i = self.event_table.insert()
i.execute(update_time=datetime.utcnow(),
create_time=datetime.utcnow(),
@@ -120,7 +119,7 @@
break
if request_complete:
request_state = 'Complete'
- log.debug('Updating request_id %i state to "%s"' % (self.request_id, request_state))
+ #log.debug('Updating request_id %i state to "%s"' % (self.request_id, request_state))
i = self.request_event_table.insert()
i.execute(update_time=datetime.utcnow(),
create_time=datetime.utcnow(),
diff -r 207d0d70483b -r 076f572d7c9d scripts/galaxy_messaging/server/galaxyweb_interface.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/scripts/galaxy_messaging/server/galaxyweb_interface.py Wed Apr 21 10:41:30 2010 -0400
@@ -0,0 +1,132 @@
+import ConfigParser
+import sys, os
+import serial
+import array
+import time
+import optparse,array
+import shutil, traceback
+import urllib,urllib2, cookielib
+
+assert sys.version_info[:2] >= ( 2, 4 )
+new_path = [ os.path.join( os.getcwd(), "lib" ) ]
+new_path.extend( sys.path[1:] ) # remove scripts/ from the path
+sys.path = new_path
+
+from galaxy import eggs
+import pkg_resources
+
+import pkg_resources
+pkg_resources.require( "pycrypto" )
+
+from Crypto.Cipher import Blowfish
+from Crypto.Util.randpool import RandomPool
+from Crypto.Util import number
+
+
+class GalaxyWebInterface(object):
+ def __init__(self, server_host, server_port, datatx_email, datatx_password):
+ self.server_host = server_host#config.get("main", "server_host")
+ self.server_port = server_port#config.get("main", "server_port")
+ self.datatx_email = datatx_email#config.get("main", "datatx_email")
+ self.datatx_password = datatx_password#config.get("main", "datatx_password")
+ try:
+ # create url
+ self.base_url = "http://%s:%s" % (self.server_host, self.server_port)
+ # login
+ url = "%s/user/login?email=%s&password=%s&login_button=Login" % (self.base_url, self.datatx_email, self.datatx_password)
+ cj = cookielib.CookieJar()
+ self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
+ #print url
+ f = self.opener.open(url)
+ if f.read().find("ogged in as "+self.datatx_email) == -1:
+ # if the user doesnt exist, create the user
+ url = "%s/user/create?email=%s&username=%s&password=%s&confirm=%s&create_user_button=Submit" % ( self.base_url, self.datatx_email, self.datatx_email, self.datatx_password, self.datatx_password )
+ f = self.opener.open(url)
+ if f.read().find("ogged in as "+self.datatx_email) == -1:
+ raise "The "+self.datatx_email+" user could not login to Galaxy"
+ except:
+ print traceback.format_exc()
+ sys.exit(1)
+
+ def add_to_library(self, server_dir, library_id, folder_id, dbkey=''):
+ '''
+ This method adds the dataset file to the target data library & folder
+ by opening the corresponding url in Galaxy server running.
+ '''
+ try:
+ params = urllib.urlencode(dict( cntrller='library_admin',
+ tool_id='upload1',
+ tool_state='None',
+ library_id=self.encode_id(library_id),
+ folder_id=self.encode_id(folder_id),
+ upload_option='upload_directory',
+ file_type='auto',
+ server_dir=os.path.basename(server_dir),
+ dbkey=dbkey,
+ show_dataset_id='True',
+ runtool_btn='Upload to library'))
+ #url = "http://localhost:8080/library_common/upload_library_dataset?cntrller=librar…"
+ #url = base_url+"/library_common/upload_library_dataset?library_id=adb5f5c93f827949&tool_id=upload1&file_type=auto&server_dir=datatx_22858&dbkey=%3F&upload_option=upload_directory&folder_id=529fd61ab1c6cc36&cntrller=library_admin&tool_state=None&runtool_btn=Upload+to+library"
+ url = self.base_url+"/library_common/upload_library_dataset"
+ #print url
+ #print params
+ f = self.opener.open(url, params)
+ if f.read().find("Data Library") == -1:
+ raise "Dataset could not be uploaded to the data library"
+ except:
+ print traceback.format_exc()
+ sys.exit(1)
+
+ def import_to_history(self, ldda_id, library_id, folder_id):
+ try:
+ params = urllib.urlencode(dict( cntrller='library_admin',
+ show_deleted='False',
+ library_id=self.encode_id(library_id),
+ folder_id=self.encode_id(folder_id),
+ ldda_ids=self.encode_id(ldda_id),
+ do_action='import_to_history',
+ use_panels='False'))
+ #url = "http://lion.bx.psu.edu:8080/library_common/act_on_multiple_datasets?library…"
+ #url = base_url+"/library_common/upload_library_dataset?library_id=adb5f5c93f827949&tool_id=upload1&file_type=auto&server_dir=datatx_22858&dbkey=%3F&upload_option=upload_directory&folder_id=529fd61ab1c6cc36&cntrller=library_admin&tool_state=None&runtool_btn=Upload+to+library"
+ url = self.base_url+"/library_common/act_on_multiple_datasets"
+ #print url
+ #print params
+ f = self.opener.open(url, params)
+ x = f.read()
+ if x.find("1 dataset(s) have been imported into your history.") == -1:
+ #print x
+ raise Exception("Dataset could not be imported into history")
+ except:
+ print traceback.format_exc()
+ sys.exit(1)
+
+
+ def run_workflow(self, workflow_id, hid, workflow_step):
+ input = str(workflow_step)+'|input'
+ try:
+ params = urllib.urlencode({'id':self.encode_id(workflow_id),
+ 'run_workflow': 'Run workflow',
+ input: hid})
+ url = self.base_url+"/workflow/run"
+ #print url+'?'+params
+ f = self.opener.open(url, params)
+# if f.read().find("1 dataset(s) have been imported into your history.") == -1:
+# raise Exception("Error in running the workflow")
+ except:
+ print traceback.format_exc()
+ sys.exit(1)
+
+
+ def logout(self):
+ # finally logout
+ f = self.opener.open(self.base_url+'/user/logout')
+
+ def encode_id(self, obj_id ):
+ id_secret = 'changethisinproductiontoo'
+ id_cipher = Blowfish.new( id_secret )
+ # Convert to string
+ s = str( obj_id )
+ # Pad to a multiple of 8 with leading "!"
+ s = ( "!" * ( 8 - len(s) % 8 ) ) + s
+ # Encrypt
+ return id_cipher.encrypt( s ).encode( 'hex' )
diff -r 207d0d70483b -r 076f572d7c9d templates/admin/requests/dataset.mako
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/templates/admin/requests/dataset.mako Wed Apr 21 10:41:30 2010 -0400
@@ -0,0 +1,71 @@
+<%inherit file="/base.mako"/>
+<%namespace file="/message.mako" import="render_msg" />
+
+
+%if message:
+ ${render_msg( message, status )}
+%endif
+
+<br/>
+<br/>
+
+<ul class="manage-table-actions">
+ <li>
+ <a class="action-button" href="${h.url_for( controller='requests_admin', action='show_datatx_page', sample_id=trans.security.encode_id(sample.id) )}">
+ <span>Dataset transfer page</span></a>
+ </li>
+</ul>
+
+<div class="toolForm">
+ <div class="toolFormTitle">Dataset details</div>
+ <div class="toolFormBody">
+ <form name="dataset_details" action="${h.url_for( controller='requests_admin', action='dataset_details', save_changes=True, sample_id=trans.security.encode_id(sample.id), dataset_index=dataset_index )}" method="post" >
+ <%
+ dataset = sample.dataset_files[dataset_index]
+ %>
+ <div class="form-row">
+ <label>Name:</label>
+ <div style="float: left; width: 250px; margin-right: 10px;">
+ %if dataset['status'] in [sample.transfer_status.IN_QUEUE, sample.transfer_status.NOT_STARTED]:
+ <input type="text" name="name" value="${dataset['name']}" size="60"/>
+ %else:
+ ${dataset['name']}
+ %endif
+
+ </div>
+ <div style="clear: both"></div>
+ </div>
+ <div class="form-row">
+ <label>File on the Sequencer:</label>
+ <div style="float: left; width: 250px; margin-right: 10px;">
+ ${dataset['filepath']}
+ ##<input type="text" name="filepath" value="${dataset['filepath']}" size="100" readonly/>
+ </div>
+ <div style="clear: both"></div>
+ </div>
+ <div class="form-row">
+ <label>Size:</label>
+ <div style="float: left; width: 250px; margin-right: 10px;">
+ ${dataset.get('size', 'Unknown')}
+ </div>
+ <div style="clear: both"></div>
+ </div>
+ <div class="form-row">
+ <label>Transfer status:</label>
+ <div style="float: left; width: 250px; margin-right: 10px;">
+ ${dataset['status']}
+ <br/>
+ %if dataset['status'] == sample.transfer_status.ERROR:
+ ${dataset['error_msg']}
+ %endif
+ </div>
+ <div style="clear: both"></div>
+ </div>
+ %if dataset['status'] in [sample.transfer_status.IN_QUEUE, sample.transfer_status.NOT_STARTED]:
+ <div class="form-row">
+ <input type="submit" name="save" value="Save"/>
+ </div>
+ %endif
+ </form>
+ </div>
+</div>
\ No newline at end of file
diff -r 207d0d70483b -r 076f572d7c9d templates/admin/requests/get_data.mako
--- a/templates/admin/requests/get_data.mako Tue Apr 20 15:36:03 2010 -0400
+++ b/templates/admin/requests/get_data.mako Wed Apr 21 10:41:30 2010 -0400
@@ -53,29 +53,44 @@
<div class="toolForm">
%if len(dataset_files):
## <form name="get_data" action="${h.url_for( controller='requests_admin', action='get_data', sample_id=sample.id)}" method="post" >
+ <div class="form-row">
+ <h4>Sample Dataset(s)</h4>
+ %if sample.untransferred_dataset_files():
<div class="form-row">
- <h4>Sample Dataset(s)</h4>
- <div class="form-row">
- <table class="grid">
- <thead>
- <tr>
- <th>Dataset File</th>
- <th>Transfer Status</th>
- <th></th>
- </tr>
- <thead>
- <tbody>
- %for dataset_index, dataset_file in enumerate(dataset_files):
- ${sample_dataset_files( dataset_index, dataset_file[0], dataset_file[1] )}
- %endfor
- </tbody>
- </table>
- </div>
- </div>
+ <ul class="manage-table-actions">
+ <li>
+ <a class="action-button" href="${h.url_for( controller='requests_admin', action='get_data', start_transfer_button=True, sample_id=sample.id )}">
+ <span>Start transfer</span></a>
+ </li>
+ </ul>
+ </div>
+ %endif
+ <div class="form-row">
+ <table class="grid">
+ <thead>
+ <tr>
+ <th>Dataset File</th>
+ <th>Transfer Status</th>
+ <th></th>
+ </tr>
+ <thead>
+ <tbody>
+ %for dataset_index, dataset_file in enumerate(dataset_files):
+ ${sample_dataset_files( dataset_index, dataset_file['name'], dataset_file['status'] )}
+ %endfor
+ </tbody>
+ </table>
+ </div>
+ </div>
+
## </form>
##</div>
+
+
+<br/>
<br/>
%endif
+
##<div class="toolForm">
<form name="get_data" action="${h.url_for( controller='requests_admin', action='get_data', sample_id=sample.id)}" method="post" >
<div class="form-row">
@@ -102,24 +117,24 @@
navigate away from this page. Once the transfer is complete
the dataset(s) will show up on this page.
</div>
- <input type="submit" name="start_transfer_button" value="Transfer"/>
+ <input type="submit" name="select_files_button" value="Select"/>
</div>
</div>
</div>
</form>
</div>
-<%def name="sample_dataset_files( dataset_index, dataset_file, status )">
+<%def name="sample_dataset_files( dataset_index, dataset_name, status )">
<tr>
<td>
-## <label class="msg_head"><a href="${h.url_for( controller='requests_admin', action='show_dataset_file', sample_id=trans.security.encode_id(sample.id), dataset_index=dataset_index )}">${dataset_file.split('/')[-1]}</a></label>
- <div class="msg_head"><u>${dataset_file.split('/')[-1]}</u></div>
- <div class="msg_body">
- ${dataset_file}
- </div>
+ <label class="msg_head"><a href="${h.url_for( controller='requests_admin', action='dataset_details', sample_id=trans.security.encode_id(sample.id), dataset_index=dataset_index )}">${dataset_name}</a></label>
+## <div class="msg_head"><u>${dataset_file.split('/')[-1]}</u></div>
+## <div class="msg_body">
+## ${dataset_file}
+## </div>
</td>
<td>
- %if status == sample.transfer_status.IN_PROGRESS:
+ %if status not in [sample.transfer_status.NOT_STARTED, sample.transfer_status.COMPLETE]:
<i>${status}</i>
%else:
${status}
diff -r 207d0d70483b -r 076f572d7c9d universe_wsgi.ini.sample
--- a/universe_wsgi.ini.sample Tue Apr 20 15:36:03 2010 -0400
+++ b/universe_wsgi.ini.sample Wed Apr 21 10:41:30 2010 -0400
@@ -287,7 +287,7 @@
# to be set up with a user account and other parameters listed below. The 'host'
# and 'port' fields should point to where the RabbitMQ server is running.
-#[galaxy:amqp]
+[galaxy_amqp]
#host = 127.0.0.1
#port = 5672
#userid = galaxy
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/207d0d70483b
changeset: 3673:207d0d70483b
user: Kanwei Li <kanwei(a)gmail.com>
date: Tue Apr 20 15:36:03 2010 -0400
description:
Fix history renaming on Saved Histories grid
diffstat:
lib/galaxy/web/controllers/history.py | 3 ++-
1 files changed, 2 insertions(+), 1 deletions(-)
diffs (13 lines):
diff -r 18d0d7fd543a -r 207d0d70483b lib/galaxy/web/controllers/history.py
--- a/lib/galaxy/web/controllers/history.py Tue Apr 20 13:19:44 2010 -0400
+++ b/lib/galaxy/web/controllers/history.py Tue Apr 20 15:36:03 2010 -0400
@@ -177,7 +177,8 @@
operation = kwargs['operation'].lower()
if operation == "share or publish":
return self.sharing( trans, **kwargs )
- if operation == "rename":
+ if operation == "rename" and kwargs.get('id', None): # Don't call rename if no ids
+ del kwargs['name'] # Remove ajax name param that rename method uses
return self.rename( trans, **kwargs )
history_ids = util.listify( kwargs.get( 'id', [] ) )
# Display no message by default
1
0
10 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/18d0d7fd543a
changeset: 3672:18d0d7fd543a
user: Kanwei Li <kanwei(a)gmail.com>
date: Tue Apr 20 13:19:44 2010 -0400
description:
GFF to Bed converter now converts the spaces to underscores to avoid UCSC problem [Brad Chapman] Closes #323
diffstat:
lib/galaxy/datatypes/converters/gff_to_bed_converter.py | 5 ++++-
1 files changed, 4 insertions(+), 1 deletions(-)
diffs (15 lines):
diff -r 7cb131814770 -r 18d0d7fd543a lib/galaxy/datatypes/converters/gff_to_bed_converter.py
--- a/lib/galaxy/datatypes/converters/gff_to_bed_converter.py Mon Apr 19 17:43:39 2010 -0400
+++ b/lib/galaxy/datatypes/converters/gff_to_bed_converter.py Tue Apr 20 13:19:44 2010 -0400
@@ -21,7 +21,10 @@
strand = '+'
# GFF format: chrom source, name, chromStart, chromEnd, score, strand
# Bed format: chrom, chromStart, chromEnd, name, score, strand
- out.write( "%s\t%s\t%s\t%s\t0\t%s\n" %( elems[0], start, elems[4], elems[2], strand ) )
+ #
+ # Replace any spaces in the name with underscores so UCSC will not complain
+ name = elems[2].replace(" ", "_")
+ out.write( "%s\t%s\t%s\t%s\t0\t%s\n" %( elems[0], start, elems[4], name, strand ) )
except:
skipped_lines += 1
if not first_skipped_line:
1
0
10 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/7cb131814770
changeset: 3671:7cb131814770
user: James Taylor <james(a)jamestaylor.org>
date: Mon Apr 19 17:43:39 2010 -0400
description:
Fix ordering in display_structured, also use insane eagerloading to make it massively faster
diffstat:
lib/galaxy/util/odict.py | 10 +++++++---
lib/galaxy/web/controllers/history.py | 25 ++++++++++++++++---------
templates/history/display_structured.mako | 13 ++++++++++---
3 files changed, 33 insertions(+), 15 deletions(-)
diffs (128 lines):
diff -r b8d25aabb98d -r 7cb131814770 lib/galaxy/util/odict.py
--- a/lib/galaxy/util/odict.py Tue Apr 20 11:47:51 2010 -0400
+++ b/lib/galaxy/util/odict.py Mon Apr 19 17:43:39 2010 -0400
@@ -31,9 +31,9 @@
self._keys = []
def copy(self):
- new = odict()
- new.update( self )
- return new
+ new = odict()
+ new.update( self )
+ return new
def items(self):
return zip(self._keys, self.values())
@@ -82,3 +82,7 @@
def __iter__( self ):
for key in self._keys:
yield key
+
+ def reverse( self ):
+ self._keys.reverse()
+
diff -r b8d25aabb98d -r 7cb131814770 lib/galaxy/web/controllers/history.py
--- a/lib/galaxy/web/controllers/history.py Tue Apr 20 11:47:51 2010 -0400
+++ b/lib/galaxy/web/controllers/history.py Mon Apr 19 17:43:39 2010 -0400
@@ -1,6 +1,7 @@
from galaxy.web.base.controller import *
from galaxy.web.framework.helpers import time_ago, iff, grids
from galaxy import util
+from galaxy.util.odict import odict
from galaxy.model.mapping import desc
from galaxy.model.orm import *
from galaxy.util.json import *
@@ -336,25 +337,31 @@
"""
# Get history
if id is None:
- history = trans.history
+ id = trans.history.id
else:
id = trans.security.decode_id( id )
- history = trans.sa_session.query( model.History ).get( id )
- assert history
- assert history.user and ( history.user == trans.user ) or ( history == trans.history )
+ # Expunge history from the session to allow us to force a reload
+ # with a bunch of eager loaded joins
+ trans.sa_session.expunge( trans.history )
+ history = trans.sa_session.query( model.History ).options(
+ eagerload_all( 'active_datasets.creating_job_associations.job.workflow_invocation_step.workflow_invocation.workflow' ),
+ eagerload_all( 'active_datasets.children' )
+ ).get( id )
+ assert history
+ assert history.user and ( history.user.id == trans.user.id ) or ( history.id == trans.history.id )
# Resolve jobs and workflow invocations for the datasets in the history
# items is filled with items (hdas, jobs, or workflows) that go at the
# top level
items = []
# First go through and group hdas by job, if there is no job they get
# added directly to items
- jobs = dict()
+ jobs = odict()
for hda in history.active_datasets:
# Follow "copied from ..." association until we get to the original
# instance of the dataset
original_hda = hda
- while original_hda.copied_from_history_dataset_association:
- original_hda = original_hda.copied_from_history_dataset_association
+ ## while original_hda.copied_from_history_dataset_association:
+ ## original_hda = original_hda.copied_from_history_dataset_association
# Check if the job has a creating job, most should, datasets from
# before jobs were tracked, or from the upload tool before it
# created a job, may not
@@ -370,7 +377,7 @@
else:
jobs[ job ] = [ ( hda, None ) ]
# Second, go through the jobs and connect to workflows
- wf_invocations = dict()
+ wf_invocations = odict()
for job, hdas in jobs.iteritems():
# Job is attached to a workflow step, follow it to the
# workflow_invocation and group
@@ -1025,4 +1032,4 @@
msg = 'Clone with name "%s" is now included in your previously stored histories.' % new_history.name
else:
msg = '%d cloned histories are now included in your previously stored histories.' % len( histories )
- return trans.show_ok_message( msg )
\ No newline at end of file
+ return trans.show_ok_message( msg )
diff -r b8d25aabb98d -r 7cb131814770 templates/history/display_structured.mako
--- a/templates/history/display_structured.mako Tue Apr 20 11:47:51 2010 -0400
+++ b/templates/history/display_structured.mako Mon Apr 19 17:43:39 2010 -0400
@@ -16,6 +16,7 @@
.workflow {
border: solid gray 1px;
+ margin: 5px 0;
border-left-width: 5px;
}
@@ -96,9 +97,15 @@
<%def name="render_item_job( job, children )">
<div class="tool toolForm">
- <div class="header toolFormTitle">Tool: ${trans.app.toolbox.tools_by_id[job.tool_id].name}</div>
+ <%
+ if job.tool_id in trans.app.toolbox.tools_by_id:
+ tool_name = trans.app.toolbox.tools_by_id[job.tool_id].name
+ else:
+ tool_name = "Unknown tool with id '%s'" % job.tool_id
+ %>
+ <div class="header toolFormTitle">Tool: ${tool_name}</div>
<div class="body toolFormBody">
- %for e, c in children:
+ %for e, c in reversed( children ):
${render_item( e, c )}
%endfor
</div>
@@ -111,7 +118,7 @@
<div class="workflow">
<div class="header">Workflow: ${wf.workflow.name}</div>
<div class="body">
- %for e, c in children:
+ %for e, c in reversed( children ):
${render_item( e, c )}
%endfor
</div>
1
0
10 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/b8d25aabb98d
changeset: 3670:b8d25aabb98d
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Tue Apr 20 11:47:51 2010 -0400
description:
Make failing to load datatype converters and display applications more graceful when given nonexistent file paths. A missing converter will still allow the application to start and a missing display application will not prevent a datatype from loading.
diffstat:
lib/galaxy/datatypes/registry.py | 46 ++++++++++++++++++++++-----------------
1 files changed, 26 insertions(+), 20 deletions(-)
diffs (63 lines):
diff -r e47ff545931f -r b8d25aabb98d lib/galaxy/datatypes/registry.py
--- a/lib/galaxy/datatypes/registry.py Mon Apr 19 17:41:35 2010 -0400
+++ b/lib/galaxy/datatypes/registry.py Tue Apr 20 11:47:51 2010 -0400
@@ -90,20 +90,22 @@
mimetype = composite_file.get( 'mimetype', None )
self.datatypes_by_extension[extension].add_composite_file( name, optional=optional, mimetype=mimetype )
for display_app in elem.findall( 'display' ):
- display_file = display_app.get( 'file', None )
- assert display_file is not None, "A file must be specified for a datatype display tag."
- inherit = galaxy.util.string_as_bool( display_app.get( 'inherit', 'False' ) )
- display_app = DisplayApplication.from_file( os.path.join( self.display_applications_path, display_file ), self )
- if display_app:
- if display_app.id in self.display_applications:
- #if we already loaded this display application, we'll use the first one again
- display_app = self.display_applications[ display_app.id ]
- self.log.debug( "Loaded display application '%s' for datatype '%s', inherit=%s" % ( display_app.id, extension, inherit ) )
- self.display_applications[ display_app.id ] = display_app #Display app by id
- self.datatypes_by_extension[ extension ].add_display_application( display_app )
- if inherit and ( self.datatypes_by_extension[extension], display_app ) not in inherit_display_application_by_class:
- #subclass inheritance will need to wait until all datatypes have been loaded
- inherit_display_application_by_class.append( ( self.datatypes_by_extension[extension], display_app ) )
+ display_file = os.path.join( self.display_applications_path, display_app.get( 'file', None ) )
+ try:
+ inherit = galaxy.util.string_as_bool( display_app.get( 'inherit', 'False' ) )
+ display_app = DisplayApplication.from_file( display_file, self )
+ if display_app:
+ if display_app.id in self.display_applications:
+ #if we already loaded this display application, we'll use the first one again
+ display_app = self.display_applications[ display_app.id ]
+ self.log.debug( "Loaded display application '%s' for datatype '%s', inherit=%s" % ( display_app.id, extension, inherit ) )
+ self.display_applications[ display_app.id ] = display_app #Display app by id
+ self.datatypes_by_extension[ extension ].add_display_application( display_app )
+ if inherit and ( self.datatypes_by_extension[extension], display_app ) not in inherit_display_application_by_class:
+ #subclass inheritance will need to wait until all datatypes have been loaded
+ inherit_display_application_by_class.append( ( self.datatypes_by_extension[extension], display_app ) )
+ except:
+ self.log.exception( "error reading display application from path: %s" % display_file )
except Exception, e:
self.log.warning( 'Error loading datatype "%s", problem: %s' % ( extension, str( e ) ) )
# Handle display_application subclass inheritance here:
@@ -290,12 +292,16 @@
tool_config = elem[0]
source_datatype = elem[1]
target_datatype = elem[2]
- converter = toolbox.load_tool( os.path.join( self.datatype_converters_path, tool_config ) )
- toolbox.tools_by_id[converter.id] = converter
- if source_datatype not in self.datatype_converters:
- self.datatype_converters[source_datatype] = odict()
- self.datatype_converters[source_datatype][target_datatype] = converter
- self.log.debug( "Loaded converter: %s", converter.id )
+ converter_path = os.path.join( self.datatype_converters_path, tool_config )
+ try:
+ converter = toolbox.load_tool( converter_path )
+ toolbox.tools_by_id[converter.id] = converter
+ if source_datatype not in self.datatype_converters:
+ self.datatype_converters[source_datatype] = odict()
+ self.datatype_converters[source_datatype][target_datatype] = converter
+ self.log.debug( "Loaded converter: %s", converter.id )
+ except:
+ self.log.exception( "error reading converter from path: %s" % converter_path )
def load_external_metadata_tool( self, toolbox ):
"""Adds a tool which is used to set external metadata"""
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/e47ff545931f
changeset: 3669:e47ff545931f
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Mon Apr 19 17:41:35 2010 -0400
description:
Cuffcompare wrapper.
diffstat:
tools/ngs_rna/cuffcompare_wrapper.py | 86 ++++++++++++++++++++
tools/ngs_rna/cuffcompare_wrapper.xml | 142 ++++++++++++++++++++++++++++++++++
2 files changed, 228 insertions(+), 0 deletions(-)
diffs (237 lines):
diff -r 91b8f0abffc8 -r e47ff545931f tools/ngs_rna/cuffcompare_wrapper.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/ngs_rna/cuffcompare_wrapper.py Mon Apr 19 17:41:35 2010 -0400
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+
+import optparse, os, shutil, subprocess, sys, tempfile
+
+def stop_err( msg ):
+ sys.stderr.write( "%s\n" % msg )
+ sys.exit()
+
+def __main__():
+ #Parse Command Line
+ parser = optparse.OptionParser()
+ parser.add_option( '-r', dest='ref_annotation', help='An optional "reference" annotation GTF. Each sample is matched against this file, and sample isoforms are tagged as overlapping, matching, or novel where appropriate. See the refmap and tmap output file descriptions below.' )
+ parser.add_option( '-R', action="store_true", dest='ignore_nonoverlap', help='If -r was specified, this option causes cuffcompare to ignore reference transcripts that are not overlapped by any transcript in one of cuff1.gtf,...,cuffN.gtf. Useful for ignoring annotated transcripts that are not present in your RNA-Seq samples and thus adjusting the "sensitivity" calculation in the accuracy report written in the transcripts accuracy file' )
+
+ # Wrapper / Galaxy options.
+ parser.add_option( '-A', '--transcripts-accuracy-output', dest='transcripts_accuracy_output_file', help='' )
+ parser.add_option( '-B', '--transcripts-combined-output', dest='transcripts_combined_output_file', help='' )
+ parser.add_option( '-C', '--transcripts-tracking-output', dest='transcripts_tracking_output_file', help='' )
+
+ (options, args) = parser.parse_args()
+
+ # Make temp directory for output.
+ tmp_output_dir = tempfile.mkdtemp()
+
+ # Build command.
+
+ # Base.
+ cmd = "cuffcompare -o cc_output"
+
+ # Add options.
+ if options.ref_annotation:
+ cmd += " -r %s" % options.ref_annotation
+ if options.ignore_nonoverlap:
+ cmd += " -R "
+
+ # Add input files.
+ if type(args) is list:
+ args = " ".join(args)
+ cmd += " " + args
+ print cmd
+
+ # Run command.
+ try:
+ tmp_name = tempfile.NamedTemporaryFile( dir=tmp_output_dir ).name
+ tmp_stderr = open( tmp_name, 'wb' )
+ proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_output_dir, stderr=tmp_stderr.fileno() )
+ returncode = proc.wait()
+ tmp_stderr.close()
+
+ # Get stderr, allowing for case where it's very large.
+ tmp_stderr = open( tmp_name, 'rb' )
+ stderr = ''
+ buffsize = 1048576
+ try:
+ while True:
+ stderr += tmp_stderr.read( buffsize )
+ if not stderr or len( stderr ) % buffsize != 0:
+ break
+ except OverflowError:
+ pass
+ tmp_stderr.close()
+
+ # Error checking.
+ if returncode != 0:
+ raise Exception, stderr
+
+ # check that there are results in the output file
+ if len( open( tmp_output_dir + "/cc_output", 'rb' ).read().strip() ) == 0:
+ raise Exception, 'The main output file is empty, there may be an error with your input file or settings.'
+ except Exception, e:
+ stop_err( 'Error running cuffcompare. ' + str( e ) )
+
+ # Copy output files from tmp directory to specified files.
+ try:
+ try:
+ shutil.copyfile( tmp_output_dir + "/cc_output", options.transcripts_accuracy_output_file )
+ shutil.copyfile( tmp_output_dir + "/cc_output.combined.gtf", options.transcripts_combined_output_file )
+ shutil.copyfile( tmp_output_dir + "/cc_output.tracking", options.transcripts_tracking_output_file )
+ except Exception, e:
+ stop_err( 'Error in cuffcompare:\n' + str( e ) )
+ finally:
+ # Clean up temp dirs
+ if os.path.exists( tmp_output_dir ):
+ shutil.rmtree( tmp_output_dir )
+
+if __name__=="__main__": __main__()
\ No newline at end of file
diff -r 91b8f0abffc8 -r e47ff545931f tools/ngs_rna/cuffcompare_wrapper.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/tools/ngs_rna/cuffcompare_wrapper.xml Mon Apr 19 17:41:35 2010 -0400
@@ -0,0 +1,142 @@
+<tool id="cuffcompare" name="Cuffcompare" version="0.8.2">
+ <description>compare assembled transcripts to a reference annotation and track Cufflinks transcripts across multiple experiments</description>
+ <command interpreter="python">
+ cuffcompare_wrapper.py
+ --transcripts-accuracy-output=$transcripts_accuracy
+ --transcripts-combined-output=$transcripts_combined
+ --transcripts-tracking-output=$transcripts_tracking
+ #if $annotation.use_ref_annotation == "Yes":
+ -r $annotation.reference_annotation
+ #if $annotation.ignore_nonoverlapping_reference:
+ -R
+ #end if
+ #end if
+ $input1
+ $input2
+ </command>
+ <inputs>
+ <param format="gtf" name="input1" type="data" label="SAM file of aligned RNA-Seq reads" help=""/>
+ <param format="gtf" name="input2" type="data" label="SAM file of aligned RNA-Seq reads" help=""/>
+ <conditional name="annotation">
+ <param name="use_ref_annotation" type="select" label="Use Reference Annotation?">
+ <option value="No">No</option>
+ <option value="Yes">Yes</option>
+ </param>
+ <when value="Yes">
+ <param format="gtf" name="reference_annotation" type="data" label="Reference Annotation" help=""/>
+ <param name="ignore_nonoverlapping_reference" type="boolean" label="Ignore reference transcripts that are not overlapped by any transcript in input files"/>
+ </when>
+ <when value="No">
+ </when>
+ </conditional>
+ </inputs>
+
+ <outputs>
+ <data format="gtf" name="transcripts_combined" />
+ <data format="tracking" name="transcripts_tracking" />
+ <data format="gtf" name="transcripts_accuracy" />
+ </outputs>
+
+ <tests>
+ <test>
+ </test>
+ </tests>
+
+ <help>
+**Cuffcompare Overview**
+
+Cuffcompare is part of Cufflinks_. Cuffcompare helps you: (a) compare your assembled transcripts to a reference annotation and (b) track Cufflinks transcripts across multiple experiments (e.g. across a time course). Please cite: Trapnell C, Williams BA, Pertea G, Mortazavi AM, Kwan G, van Baren MJ, Salzberg SL, Wold B, Pachter L. Transcript assembly and abundance estimation from RNA-Seq reveals thousands of new transcripts and switching among isoforms. (manuscript in press)
+
+.. _Cufflinks: http://cufflinks.cbcb.umd.edu/
+
+------
+
+**Know what you are doing**
+
+.. class:: warningmark
+
+There is no such thing (yet) as an automated gearshift in expression analysis. It is all like stick-shift driving in San Francisco. In other words, running this tool with default parameters will probably not give you meaningful results. A way to deal with this is to **understand** the parameters by carefully reading the `documentation`__ and experimenting. Fortunately, Galaxy makes experimenting easy.
+
+.. __: http://cufflinks.cbcb.umd.edu/manual.html#cuffcompare
+
+------
+
+**Input format**
+
+Cuffcompare takes Cufflinks' GTF output as input, and optionally can take a "reference" annotation (such as from Ensembl___)
+
+.. ___: http://www.todo.org
+
+------
+
+**Outputs**
+
+Cuffcompare produces the following output files:
+
+Transcripts Accuracy File:
+
+Cuffcompare reports various statistics related to the "accuracy" of the transcripts in each sample when compared to the reference annotation data. The typical gene finding measures of "sensitivity" and "specificity" (as defined in Burset, M., Guigó, R. : Evaluation of gene structure prediction programs (1996) Genomics, 34 (3), pp. 353-367. doi: 10.1006/geno.1996.0298) are calculated at various levels (nucleotide, exon, intron, transcript, gene) for each input file and reported in this file. The Sn and Sp columns show specificity and sensitivity values at each level, while the fSn and fSp columns are "fuzzy" variants of these same accuracy calculations, allowing for a very small variation in exon boundaries to still be counted as a "match".
+
+Transcripts Combined File:
+
+Cuffcompare reports a GTF file containing the "union" of all transfrags in each sample. If a transfrag is present in both samples, it is thus reported once in the combined gtf.
+
+Transcripts Tracking File:
+
+This file matches transcripts up between samples. Each row contains a transcript structure that is present in one or more input GTF files. Because the transcripts will generally have different IDs (unless you assembled your RNA-Seq reads against a reference transcriptome), cuffcompare examines the structure of each the transcripts, matching transcripts that agree on the coordinates and order of all of their introns, as well as strand. Matching transcripts are allowed to differ on the length of the first and last exons, since these lengths will naturally vary from sample to sample due to the random nature of sequencing.
+If you ran cuffcompare with the -r option, the first and second columns contain the closest matching reference transcript to the one described by each row.
+
+Here's an example of a line from the tracking file::
+
+ TCONS_00000045 XLOC_000023 Tcea|uc007afj.1 j \
+ q1:exp.115|exp.115.0|100|3.061355|0.350242|0.350207 \
+ q2:60hr.292|60hr.292.0|100|4.094084|0.000000|0.000000
+
+In this example, a transcript present in the two input files, called exp.115.0 in the first and 60hr.292.0 in the second, doesn't match any reference transcript exactly, but shares exons with uc007afj.1, an isoform of the gene Tcea, as indicated by the class code j. The first three columns are as follows::
+
+ Column number Column name Example Description
+ -----------------------------------------------------------------------
+ 1 Cufflinks transfrag id TCONS_00000045 A unique internal id for the transfrag
+ 2 Cufflinks locus id XLOC_000023 A unique internal id for the locus
+ 3 Reference gene id Tcea The gene_name attribute of the reference GTF record for this transcript, or '-' if no reference transcript overlaps this Cufflinks transcript
+ 4 Reference transcript id uc007afj.1 The transcript_id attribute of the reference GTF record for this transcript, or '-' if no reference transcript overlaps this Cufflinks transcript
+ 5 Class code c The type of match between the Cufflinks transcripts in column 6 and the reference transcript. See class codes
+
+Each of the columns after the fifth have the following format:
+ qJ:gene_id|transcript_id|FMI|FPKM|conf_lo|conf_hi
+
+A transcript need be present in all samples to be reported in the tracking file. A sample not containing a transcript will have a "-" in its entry in the row for that transcript.
+
+Class Codes
+
+If you ran cuffcompare with the -r option, tracking rows will contain the following values. If you did not use -r, the rows will all contain "-" in their class code column::
+
+ Priority Code Description
+ ---------------------------------
+ 1 = Match
+ 2 c Contained
+ 3 j New isoform
+ 4 e A single exon transcript overlapping a reference exon and at least 10 bp of a reference intron, indicating a possible pre-mRNA fragment.
+ 5 i A single exon transcript falling entirely with a reference intron
+ 6 r Repeat. Currently determined by looking at the reference sequence and applied to transcripts where at least 50% of the bases are lower case
+ 7 p Possible polymerase run-on fragment
+ 8 u Unknown, intergenic transcript
+ 9 o Unknown, generic overlap with reference
+ 10 . (.tracking file only, indicates multiple classifications)
+
+-------
+
+**Settings**
+
+All of the options have a default value. You can change any of them. Most of the options in Cuffcompare have been implemented here.
+
+------
+
+**Cuffcompare parameter list**
+
+This is a list of implemented Cuffcompare options::
+
+ -r An optional "reference" annotation GTF. Each sample is matched against this file, and sample isoforms are tagged as overlapping, matching, or novel where appropriate. See the refmap and tmap output file descriptions below.
+ -R If -r was specified, this option causes cuffcompare to ignore reference transcripts that are not overlapped by any transcript in one of cuff1.gtf,...,cuffN.gtf. Useful for ignoring annotated transcripts that are not present in your RNA-Seq samples and thus adjusting the "sensitivity" calculation in the accuracy report written in the transcripts_accuracy file
+ </help>
+</tool>
1
0
10 May '10
details: http://www.bx.psu.edu/hg/galaxy/rev/91b8f0abffc8
changeset: 3668:91b8f0abffc8
user: Kanwei Li <kanwei(a)gmail.com>
date: Mon Apr 19 16:03:24 2010 -0400
description:
User-custom dbkeys can now be set for datasets in Edit Attributes and upload
Always use autocomplete for dbkey entry (used to be >20)
diffstat:
lib/galaxy/web/controllers/tracks.py | 15 +++++++--------
lib/galaxy/web/framework/__init__.py | 12 +++++++++---
static/scripts/galaxy.base.js | 8 ++++----
static/scripts/packed/galaxy.base.js | 2 +-
4 files changed, 21 insertions(+), 16 deletions(-)
diffs (104 lines):
diff -r 3c6ffa5362d2 -r 91b8f0abffc8 lib/galaxy/web/controllers/tracks.py
--- a/lib/galaxy/web/controllers/tracks.py Mon Apr 19 14:09:07 2010 -0400
+++ b/lib/galaxy/web/controllers/tracks.py Mon Apr 19 16:03:24 2010 -0400
@@ -21,7 +21,6 @@
from galaxy.web.framework import simplejson
from galaxy.web.framework.helpers import time_ago, grids
from galaxy.util.bunch import Bunch
-from galaxy.util import dbnames
from galaxy.visualization.tracks.data.array_tree import ArrayTreeDataProvider
from galaxy.visualization.tracks.data.interval_index import IntervalIndexDataProvider
@@ -79,7 +78,7 @@
"""
available_tracks = None
- len_dbkeys = None
+ len_files = None
@web.expose
@web.require_login()
@@ -91,17 +90,17 @@
@web.expose
@web.require_login()
def new_browser( self, trans ):
- if not self.len_dbkeys:
+ if not self.len_files:
len_files = glob.glob(os.path.join( trans.app.config.tool_data_path, 'shared','ucsc','chrom', "*.len" ))
- len_files = [ os.path.split(f)[1].split(".len")[0] for f in len_files ] # get xxx.len
- loaded_dbkeys = dbnames
- self.len_dbkeys = [ (k, v) for k, v in loaded_dbkeys if k in len_files ]
+ self.len_files = [ os.path.split(f)[1].split(".len")[0] for f in len_files ] # get xxx.len
- user_keys = None
+ user_keys = {}
user = trans.get_user()
if 'dbkeys' in user.preferences:
user_keys = from_json_string( user.preferences['dbkeys'] )
- return trans.fill_template( "tracks/new_browser.mako", user_keys=user_keys, dbkeys=self.len_dbkeys )
+
+ dbkeys = [ (k, v) for k, v in trans.db_builds if k in self.len_files or k in user_keys ]
+ return trans.fill_template( "tracks/new_browser.mako", dbkeys=dbkeys )
@web.json
@web.require_login()
diff -r 3c6ffa5362d2 -r 91b8f0abffc8 lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py Mon Apr 19 14:09:07 2010 -0400
+++ b/lib/galaxy/web/framework/__init__.py Mon Apr 19 16:03:24 2010 -0400
@@ -10,7 +10,7 @@
import base
import pickle
from galaxy import util
-from galaxy.util.json import to_json_string
+from galaxy.util.json import to_json_string, from_json_string
pkg_resources.require( "simplejson" )
import simplejson
@@ -657,10 +657,16 @@
dbnames = list()
datasets = self.sa_session.query( self.app.model.HistoryDatasetAssociation ) \
.filter_by( deleted=False, history_id=self.history.id, extension="len" )
- if datasets.count() > 0:
- dbnames.append( (util.dbnames.default_value, '--------- User Defined Builds ----------') )
+
for dataset in datasets:
dbnames.append( (dataset.dbkey, dataset.name) )
+
+ user = self.get_user()
+ if user and 'dbkeys' in user.preferences:
+ user_keys = from_json_string( user.preferences['dbkeys'] )
+ for key, chrom_dict in user_keys.iteritems():
+ dbnames.append((key, "%s (%s) [Custom]" % (chrom_dict['name'], key) ))
+
dbnames.extend( util.dbnames )
return dbnames
diff -r 3c6ffa5362d2 -r 91b8f0abffc8 static/scripts/galaxy.base.js
--- a/static/scripts/galaxy.base.js Mon Apr 19 14:09:07 2010 -0400
+++ b/static/scripts/galaxy.base.js Mon Apr 19 16:03:24 2010 -0400
@@ -144,13 +144,13 @@
return 0;
}
-// Replace any select box with 20+ options with a text input box + autocomplete.
+// Replace select box with a text input box + autocomplete.
// TODO: make work with dynamic tool inputs and then can replace all big selects.
-function replace_big_select_inputs() {
+function replace_big_select_inputs(min_length) {
$('select[name=dbkey]').each( function() {
var select_elt = $(this);
- // Skip if there are < 20 options.
- if (select_elt.find('option').length < 20)
+ // Skip if # of options < threshold
+ if (min_length !== undefined && select_elt.find('option').length < min_length)
return;
// Replace select with text + autocomplete.
diff -r 3c6ffa5362d2 -r 91b8f0abffc8 static/scripts/packed/galaxy.base.js
--- a/static/scripts/packed/galaxy.base.js Mon Apr 19 14:09:07 2010 -0400
+++ b/static/scripts/packed/galaxy.base.js Mon Apr 19 16:03:24 2010 -0400
@@ -1,1 +1,1 @@
-$(document).ready(function(){replace_big_select_inputs()});$.fn.makeAbsolute=function(a){return this.each(function(){var b=$(this);var c=b.position();b.css({position:"absolute",marginLeft:0,marginTop:0,top:c.top,left:c.left,right:$(window).width()-(c.left+b.width())});if(a){b.remove().appendTo("body")}})};function ensure_popup_helper(){if($("#popup-helper").length===0){$("<div id='popup-helper'/>").css({background:"white",opacity:0,zIndex:15000,position:"absolute",top:0,left:0,width:"100%",height:"100%"}).appendTo("body").hide()}}function attach_popupmenu(b,d){var a=function(){d.unbind().hide();$("#popup-helper").unbind("click.popupmenu").hide()};var c=function(g){$("#popup-helper").bind("click.popupmenu",a).show();d.click(a).css({left:0,top:-1000}).show();var f=g.pageX-d.width()/2;f=Math.min(f,$(document).scrollLeft()+$(window).width()-$(d).width()-20);f=Math.max(f,$(document).scrollLeft()+20);d.css({top:g.pageY-5,left:f});return false};$(b).click(c)}function make_popupmen!
u(c,b){ensure_popup_helper();var a=$("<ul id='"+c.attr("id")+"-menu'></ul>");$.each(b,function(f,e){if(e){$("<li/>").html(f).click(e).appendTo(a)}else{$("<li class='head'/>").html(f).appendTo(a)}});var d=$("<div class='popmenu-wrapper'>");d.append(a).append("<div class='overlay-border'>").css("position","absolute").appendTo("body").hide();attach_popupmenu(c,d)}function make_popup_menus(){jQuery("div[popupmenu]").each(function(){var c={};$(this).find("a").each(function(){var b=$(this).attr("confirm"),d=$(this).attr("href"),e=$(this).attr("target");c[$(this).text()]=function(){if(!b||confirm(b)){var g=window;if(e=="_parent"){g=window.parent}else{if(e=="_top"){g=window.top}}g.location=d}}});var a=$("#"+$(this).attr("popupmenu"));make_popupmenu(a,c);$(this).remove();a.addClass("popup").show()})}function array_length(b){if(b.length){return b.length}var c=0;for(var a in b){c++}return c}function naturalSort(i,g){var n=/(-?[0-9\.]+)/g,j=i.toString().toLowerCase()||"",f=g.toString()!
.toLowerCase()||"",k=String.fromCharCode(0),l=j.replace(n,k+"$1"+k).sp
lit(k),e=f.replace(n,k+"$1"+k).split(k),d=(new Date(j)).getTime(),m=d?(new Date(f)).getTime():null;if(m){if(d<m){return -1}else{if(d>m){return 1}}}for(var h=0,c=Math.max(l.length,e.length);h<c;h++){oFxNcL=parseFloat(l[h])||l[h];oFyNcL=parseFloat(e[h])||e[h];if(oFxNcL<oFyNcL){return -1}else{if(oFxNcL>oFyNcL){return 1}}}return 0}function replace_big_select_inputs(){$("select[name=dbkey]").each(function(){var a=$(this);if(a.find("option").length<20){return}var b=a.attr("value");var c=$("<input type='text' class='text-and-autocomplete-select'></input>");c.attr("size",40);c.attr("name",a.attr("name"));c.attr("id",a.attr("id"));c.click(function(){var h=$(this).attr("value");$(this).attr("value","Loading...");$(this).showAllInCache();$(this).attr("value",h);$(this).select()});var g=[];var f={};a.children("option").each(function(){var i=$(this).text();var h=$(this).attr("value");if(h=="?"){return}g.push(i);f[i]=h;f[h]=h;if(h==b){c.attr("value",i)}});g.push("unspecified (?)");f["unsp!
ecified (?)"]="?";f["?"]="?";if(c.attr("value")==""){c.attr("value","Click to Search or Select")}g=g.sort(naturalSort);var e={selectFirst:false,autoFill:false,mustMatch:false,matchContains:true,max:1000,minChars:0,hideForLessThanMinChars:false};c.autocomplete(g,e);a.replaceWith(c);var d=function(){var i=c.attr("value");var h=f[i];if(h!==null&&h!==undefined){c.attr("value",h)}else{if(b!=""){c.attr("value",b)}else{c.attr("value","?")}}};c.parents("form").submit(function(){d()});$(document).bind("convert_dbkeys",function(){d()})})}function async_save_text(d,f,e,a,c,h,i,g,b){if(c===undefined){c=30}if(i===undefined){i=4}$("#"+d).live("click",function(){if($("#renaming-active").length>0){return}var l=$("#"+f),k=l.text(),j;if(h){j=$("<textarea></textarea>").attr({rows:i,cols:c}).text(k)}else{j=$("<input type='text'></input>").attr({value:k,size:c})}j.attr("id","renaming-active");j.blur(function(){$(this).remove();l.show();if(b){b(j)}});j.keyup(function(n){if(n.keyCode===27){$(this!
).trigger("blur")}else{if(n.keyCode===13){var m={};m[a]=$(this).val();
$(this).trigger("blur");$.ajax({url:e,data:m,error:function(){alert("Text editing for elt "+f+" failed")},success:function(o){l.text(o);if(b){b(j)}}})}}});if(g){g(j)}l.hide();j.insertAfter(l);j.focus();j.select();return})}function init_history_items(d,a,c){var b=function(){try{var e=$.jStore.store("history_expand_state");if(e){for(var g in e){$("#"+g+" div.historyItemBody").show()}}}catch(f){$.jStore.remove("history_expand_state")}if($.browser.mozilla){$("div.historyItemBody").each(function(){if(!$(this).is(":visible")){$(this).find("pre.peek").css("overflow","hidden")}})}d.each(function(){var j=this.id;var h=$(this).children("div.historyItemBody");var i=h.find("pre.peek");$(this).find(".historyItemTitleBar > .historyItemTitle").wrap("<a href='javascript:void();'></a>").click(function(){if(h.is(":visible")){if($.browser.mozilla){i.css("overflow","hidden")}h.slideUp("fast");if(!c){var k=$.jStore.store("history_expand_state");if(k){delete k[j];$.jStore.store("history_expand_st!
ate",k)}}}else{h.slideDown("fast",function(){if($.browser.mozilla){i.css("overflow","auto")}});if(!c){var k=$.jStore.store("history_expand_state");if(k===undefined){k={}}k[j]=true;$.jStore.store("history_expand_state",k)}}return false})});$("#top-links > a.toggle").click(function(){var h=$.jStore.store("history_expand_state");if(h===undefined){h={}}$("div.historyItemBody:visible").each(function(){if($.browser.mozilla){$(this).find("pre.peek").css("overflow","hidden")}$(this).slideUp("fast");if(h){delete h[$(this).parent().attr("id")]}});$.jStore.store("history_expand_state",h)}).show()};if(a){b()}else{$.jStore.init("galaxy");$.jStore.engineReady(function(){b()})}}$(document).ready(function(){$("a[confirm]").click(function(){return confirm($(this).attr("confirm"))});if($.fn.tipsy){$(".tooltip").tipsy({gravity:"s"})}make_popup_menus()});
\ No newline at end of file
+$(document).ready(function(){replace_big_select_inputs()});$.fn.makeAbsolute=function(a){return this.each(function(){var b=$(this);var c=b.position();b.css({position:"absolute",marginLeft:0,marginTop:0,top:c.top,left:c.left,right:$(window).width()-(c.left+b.width())});if(a){b.remove().appendTo("body")}})};function ensure_popup_helper(){if($("#popup-helper").length===0){$("<div id='popup-helper'/>").css({background:"white",opacity:0,zIndex:15000,position:"absolute",top:0,left:0,width:"100%",height:"100%"}).appendTo("body").hide()}}function attach_popupmenu(b,d){var a=function(){d.unbind().hide();$("#popup-helper").unbind("click.popupmenu").hide()};var c=function(g){$("#popup-helper").bind("click.popupmenu",a).show();d.click(a).css({left:0,top:-1000}).show();var f=g.pageX-d.width()/2;f=Math.min(f,$(document).scrollLeft()+$(window).width()-$(d).width()-20);f=Math.max(f,$(document).scrollLeft()+20);d.css({top:g.pageY-5,left:f});return false};$(b).click(c)}function make_popupmen!
u(c,b){ensure_popup_helper();var a=$("<ul id='"+c.attr("id")+"-menu'></ul>");$.each(b,function(f,e){if(e){$("<li/>").html(f).click(e).appendTo(a)}else{$("<li class='head'/>").html(f).appendTo(a)}});var d=$("<div class='popmenu-wrapper'>");d.append(a).append("<div class='overlay-border'>").css("position","absolute").appendTo("body").hide();attach_popupmenu(c,d)}function make_popup_menus(){jQuery("div[popupmenu]").each(function(){var c={};$(this).find("a").each(function(){var b=$(this).attr("confirm"),d=$(this).attr("href"),e=$(this).attr("target");c[$(this).text()]=function(){if(!b||confirm(b)){var g=window;if(e=="_parent"){g=window.parent}else{if(e=="_top"){g=window.top}}g.location=d}}});var a=$("#"+$(this).attr("popupmenu"));make_popupmenu(a,c);$(this).remove();a.addClass("popup").show()})}function array_length(b){if(b.length){return b.length}var c=0;for(var a in b){c++}return c}function naturalSort(i,g){var n=/(-?[0-9\.]+)/g,j=i.toString().toLowerCase()||"",f=g.toString()!
.toLowerCase()||"",k=String.fromCharCode(0),l=j.replace(n,k+"$1"+k).sp
lit(k),e=f.replace(n,k+"$1"+k).split(k),d=(new Date(j)).getTime(),m=d?(new Date(f)).getTime():null;if(m){if(d<m){return -1}else{if(d>m){return 1}}}for(var h=0,c=Math.max(l.length,e.length);h<c;h++){oFxNcL=parseFloat(l[h])||l[h];oFyNcL=parseFloat(e[h])||e[h];if(oFxNcL<oFyNcL){return -1}else{if(oFxNcL>oFyNcL){return 1}}}return 0}function replace_big_select_inputs(a){$("select[name=dbkey]").each(function(){var b=$(this);if(a!==undefined&&b.find("option").length<a){return}var c=b.attr("value");var d=$("<input type='text' class='text-and-autocomplete-select'></input>");d.attr("size",40);d.attr("name",b.attr("name"));d.attr("id",b.attr("id"));d.click(function(){var i=$(this).attr("value");$(this).attr("value","Loading...");$(this).showAllInCache();$(this).attr("value",i);$(this).select()});var h=[];var g={};b.children("option").each(function(){var j=$(this).text();var i=$(this).attr("value");if(i=="?"){return}h.push(j);g[j]=i;g[i]=i;if(i==c){d.attr("value",j)}});h.push("unspecifie!
d (?)");g["unspecified (?)"]="?";g["?"]="?";if(d.attr("value")==""){d.attr("value","Click to Search or Select")}h=h.sort(naturalSort);var f={selectFirst:false,autoFill:false,mustMatch:false,matchContains:true,max:1000,minChars:0,hideForLessThanMinChars:false};d.autocomplete(h,f);b.replaceWith(d);var e=function(){var j=d.attr("value");var i=g[j];if(i!==null&&i!==undefined){d.attr("value",i)}else{if(c!=""){d.attr("value",c)}else{d.attr("value","?")}}};d.parents("form").submit(function(){e()});$(document).bind("convert_dbkeys",function(){e()})})}function async_save_text(d,f,e,a,c,h,i,g,b){if(c===undefined){c=30}if(i===undefined){i=4}$("#"+d).live("click",function(){if($("#renaming-active").length>0){return}var l=$("#"+f),k=l.text(),j;if(h){j=$("<textarea></textarea>").attr({rows:i,cols:c}).text(k)}else{j=$("<input type='text'></input>").attr({value:k,size:c})}j.attr("id","renaming-active");j.blur(function(){$(this).remove();l.show();if(b){b(j)}});j.keyup(function(n){if(n.keyCo!
de===27){$(this).trigger("blur")}else{if(n.keyCode===13){var m={};m[a]
=$(this).val();$(this).trigger("blur");$.ajax({url:e,data:m,error:function(){alert("Text editing for elt "+f+" failed")},success:function(o){l.text(o);if(b){b(j)}}})}}});if(g){g(j)}l.hide();j.insertAfter(l);j.focus();j.select();return})}function init_history_items(d,a,c){var b=function(){try{var e=$.jStore.store("history_expand_state");if(e){for(var g in e){$("#"+g+" div.historyItemBody").show()}}}catch(f){$.jStore.remove("history_expand_state")}if($.browser.mozilla){$("div.historyItemBody").each(function(){if(!$(this).is(":visible")){$(this).find("pre.peek").css("overflow","hidden")}})}d.each(function(){var j=this.id;var h=$(this).children("div.historyItemBody");var i=h.find("pre.peek");$(this).find(".historyItemTitleBar > .historyItemTitle").wrap("<a href='javascript:void();'></a>").click(function(){if(h.is(":visible")){if($.browser.mozilla){i.css("overflow","hidden")}h.slideUp("fast");if(!c){var k=$.jStore.store("history_expand_state");if(k){delete k[j];$.jStore.store("hi!
story_expand_state",k)}}}else{h.slideDown("fast",function(){if($.browser.mozilla){i.css("overflow","auto")}});if(!c){var k=$.jStore.store("history_expand_state");if(k===undefined){k={}}k[j]=true;$.jStore.store("history_expand_state",k)}}return false})});$("#top-links > a.toggle").click(function(){var h=$.jStore.store("history_expand_state");if(h===undefined){h={}}$("div.historyItemBody:visible").each(function(){if($.browser.mozilla){$(this).find("pre.peek").css("overflow","hidden")}$(this).slideUp("fast");if(h){delete h[$(this).parent().attr("id")]}});$.jStore.store("history_expand_state",h)}).show()};if(a){b()}else{$.jStore.init("galaxy");$.jStore.engineReady(function(){b()})}}$(document).ready(function(){$("a[confirm]").click(function(){return confirm($(this).attr("confirm"))});if($.fn.tipsy){$(".tooltip").tipsy({gravity:"s"})}make_popup_menus()});
\ No newline at end of file
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/3c6ffa5362d2
changeset: 3667:3c6ffa5362d2
user: Kanwei Li <kanwei(a)gmail.com>
date: Mon Apr 19 14:09:07 2010 -0400
description:
Fix autocomplete for Edit Attributes dbkeys
diffstat:
templates/dataset/edit_attributes.mako | 4 ++--
1 files changed, 2 insertions(+), 2 deletions(-)
diffs (18 lines):
diff -r af004e0932b7 -r 3c6ffa5362d2 templates/dataset/edit_attributes.mako
--- a/templates/dataset/edit_attributes.mako Mon Apr 19 11:40:20 2010 -0400
+++ b/templates/dataset/edit_attributes.mako Mon Apr 19 14:09:07 2010 -0400
@@ -4,12 +4,12 @@
<%def name="title()">${_('Edit Dataset Attributes')}</%def>
<%def name="stylesheets()">
- ${h.css( "base" )}
+ ${h.css( "base", "autocomplete_tagging" )}
</%def>
<%def name="javascripts()">
${parent.javascripts()}
- ${h.js( "galaxy.base" )}
+ ${h.js( "galaxy.base", "jquery.autocomplete", "autocomplete_tagging" )}
</%def>
<%def name="datatype( dataset, datatypes )">
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/af004e0932b7
changeset: 3666:af004e0932b7
user: Nate Coraor <nate(a)bx.psu.edu>
date: Mon Apr 19 11:40:20 2010 -0400
description:
Add column join tool to main tool conf
diffstat:
tool_conf.xml.main | 1 +
1 files changed, 1 insertions(+), 0 deletions(-)
diffs (11 lines):
diff -r 239fb5cf4e37 -r af004e0932b7 tool_conf.xml.main
--- a/tool_conf.xml.main Mon Apr 19 11:14:19 2010 -0400
+++ b/tool_conf.xml.main Mon Apr 19 11:40:20 2010 -0400
@@ -70,6 +70,7 @@
<tool file="filters/compare.xml"/>
<tool file="new_operations/subtract_query.xml"/>
<tool file="stats/grouping.xml" />
+ <tool file="new_operations/column_join.xml"/>
</section>
<section name="Extract Features" id="features">
<tool file="filters/ucsc_gene_bed_to_exon_bed.xml" />
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/d3ff52561d78
changeset: 3664:d3ff52561d78
user: jeremy goecks <jeremy.goecks(a)emory.edu>
date: Mon Apr 19 11:08:25 2010 -0400
description:
Complete Cufflinks wrapper.
diffstat:
tools/ngs_rna/cufflinks_wrapper.py | 64 +++++++++++++++++++-------
tools/ngs_rna/cufflinks_wrapper.xml | 88 ++++++++++++++++++++++++++++++++++--
2 files changed, 128 insertions(+), 24 deletions(-)
diffs (244 lines):
diff -r efd404f7a60b -r d3ff52561d78 tools/ngs_rna/cufflinks_wrapper.py
--- a/tools/ngs_rna/cufflinks_wrapper.py Fri Apr 16 18:46:35 2010 -0400
+++ b/tools/ngs_rna/cufflinks_wrapper.py Mon Apr 19 11:08:25 2010 -0400
@@ -10,20 +10,20 @@
#Parse Command Line
parser = optparse.OptionParser()
parser.add_option( '-1', '--input', dest='input', help=' file of RNA-Seq read alignments in the SAM format. SAM is a standard short read alignment, that allows aligners to attach custom tags to individual alignments, and Cufflinks requires that the alignments you supply have some of these tags. Please see Input formats for more details.' )
- parser.add_option( '-s', '--inner-dist-std-dev', help='The standard deviation for the distribution on inner distances between mate pairs. The default is 20bp.' )
- parser.add_option( '-I', '--max-intron-length', help='The minimum intron length. Cufflinks will not report transcripts with introns longer than this, and will ignore SAM alignments with REF_SKIP CIGAR operations longer than this. The default is 300,000.' )
- parser.add_option( '-F', '--min-isoform-fraction', help='After calculating isoform abundance for a gene, Cufflinks filters out transcripts that it believes are very low abundance, because isoforms expressed at extremely low levels often cannot reliably be assembled, and may even be artifacts of incompletely spliced precursors of processed transcripts. This parameter is also used to filter out introns that have far fewer spliced alignments supporting them. The default is 0.05, or 5% of the most abundant isoform (the major isoform) of the gene.' )
- parser.add_option( '-j', '--pre-mrna-fraction', help='Some RNA-Seq protocols produce a significant amount of reads that originate from incompletely spliced transcripts, and these reads can confound the assembly of fully spliced mRNAs. Cufflinks uses this parameter to filter out alignments that lie within the intronic intervals implied by the spliced alignments. The minimum depth of coverage in the intronic region covered by the alignment is divided by the number of spliced reads, and if the result is lower than this parameter value, the intronic alignments are ignored. The default is 5%.' )
- parser.add_option( '-p', '--num-threads', help='Use this many threads to align reads. The default is 1.' )
+ parser.add_option( '-s', '--inner-dist-std-dev', dest='inner_dist_std_dev', help='The standard deviation for the distribution on inner distances between mate pairs. The default is 20bp.' )
+ parser.add_option( '-I', '--max-intron-length', dest='max_intron_len', help='The minimum intron length. Cufflinks will not report transcripts with introns longer than this, and will ignore SAM alignments with REF_SKIP CIGAR operations longer than this. The default is 300,000.' )
+ parser.add_option( '-F', '--min-isoform-fraction', dest='min_isoform_fraction', help='After calculating isoform abundance for a gene, Cufflinks filters out transcripts that it believes are very low abundance, because isoforms expressed at extremely low levels often cannot reliably be assembled, and may even be artifacts of incompletely spliced precursors of processed transcripts. This parameter is also used to filter out introns that have far fewer spliced alignments supporting them. The default is 0.05, or 5% of the most abundant isoform (the major isoform) of the gene.' )
+ parser.add_option( '-j', '--pre-mrna-fraction', dest='pre_mrna_fraction', help='Some RNA-Seq protocols produce a significant amount of reads that originate from incompletely spliced transcripts, and these reads can confound the assembly of fully spliced mRNAs. Cufflinks uses this parameter to filter out alignments that lie within the intronic intervals implied by the spliced alignments. The minimum depth of coverage in the intronic region covered by the alignment is divided by the number of spliced reads, and if the result is lower than this parameter value, the intronic alignments are ignored. The default is 5%.' )
+ parser.add_option( '-p', '--num-threads', dest='num_threads', help='Use this many threads to align reads. The default is 1.' )
parser.add_option( '-m', '--inner-mean-dist', dest='inner_mean_dist', help='This is the expected (mean) inner distance between mate pairs. \
For, example, for paired end runs with fragments selected at 300bp, \
where each end is 50bp, you should set -r to be 200. The default is 45bp.')
- parser.add_option( '-Q', '--min-mapqual', help='Instructs Cufflinks to ignore alignments with a SAM mapping quality lower than this number. The default is 0.' )
- parser.add_option( '-L', '--label', help='Cufflinks will report transfrags in GTF format, with a prefix given by this option. The default prefix is "CUFF".' )
- parser.add_option( '-G', '--GTF', help='Tells Cufflinks to use the supplied reference annotation to estimate isoform expression. It will not assemble novel transcripts, and the program will ignore alignments not structurally compatible with any reference transcript.' )
+ parser.add_option( '-Q', '--min-mapqual', dest='min_mapqual', help='Instructs Cufflinks to ignore alignments with a SAM mapping quality lower than this number. The default is 0.' )
+ parser.add_option( '-G', '--GTF', dest='GTF', help='Tells Cufflinks to use the supplied reference annotation to estimate isoform expression. It will not assemble novel transcripts, and the program will ignore alignments not structurally compatible with any reference transcript.' )
+
# Advanced Options:
- parser.add_option( '--num-importance-samples', help='Sets the number of importance samples generated for each locus during abundance estimation. Default: 1000' )
- parser.add_option( '--max-mle-iterations', help='Sets the number of iterations allowed during maximum likelihood estimation of abundances. Default: 5000' )
+ parser.add_option( '--num-importance-samples', dest='num_importance_samples', help='Sets the number of importance samples generated for each locus during abundance estimation. Default: 1000' )
+ parser.add_option( '--max-mle-iterations', dest='max_mle_iterations', help='Sets the number of iterations allowed during maximum likelihood estimation of abundances. Default: 5000' )
# Wrapper / Galaxy options.
parser.add_option( '-A', '--assembled-isoforms-output', dest='assembled_isoforms_output_file', help='Assembled isoforms output file; formate is GTF.' )
@@ -41,31 +41,61 @@
cmd = "cufflinks"
# Add options.
+ if options.inner_dist_std_dev:
+ cmd += ( " -s %i" % int ( options.inner_dist_std_dev ) )
+ if options.max_intron_len:
+ cmd += ( " -I %i" % int ( options.max_intron_len ) )
+ if options.min_isoform_fraction:
+ cmd += ( " -F %f" % float ( options.min_isoform_fraction ) )
+ if options.pre_mrna_fraction:
+ cmd += ( " -j %f" % float ( options.pre_mrna_fraction ) )
+ if options.num_threads:
+ cmd += ( " -p %i" % int ( options.num_threads ) )
if options.inner_mean_dist:
cmd += ( " -m %i" % int ( options.inner_mean_dist ) )
+ if options.min_mapqual:
+ cmd += ( " -Q %i" % int ( options.min_mapqual ) )
+ if options.GTF:
+ cmd += ( " -G %i" % options.GTF )
+ if options.num_importance_samples:
+ cmd += ( " --num-importance-samples %i" % int ( options.num_importance_samples ) )
+ if options.max_mle_iterations:
+ cmd += ( " --max-mle-iterations %i" % int ( options.max_mle_iterations ) )
# Add input files.
cmd += " " + options.input
-
- # Run
+ print cmd
+
+ # Run command.
try:
- proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_output_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE )
+ tmp_name = tempfile.NamedTemporaryFile( dir=tmp_output_dir ).name
+ tmp_stderr = open( tmp_name, 'wb' )
+ proc = subprocess.Popen( args=cmd, shell=True, cwd=tmp_output_dir, stderr=tmp_stderr.fileno() )
returncode = proc.wait()
+ tmp_stderr.close()
+
+ # Get stderr, allowing for case where it's very large.
+ tmp_stderr = open( tmp_name, 'rb' )
stderr = ''
buffsize = 1048576
try:
while True:
- stderr += proc.stderr.read( buffsize )
+ stderr += tmp_stderr.read( buffsize )
if not stderr or len( stderr ) % buffsize != 0:
break
except OverflowError:
pass
+ tmp_stderr.close()
+
+ # Error checking.
if returncode != 0:
raise Exception, stderr
+
+ # check that there are results in the output file
+ if len( open( tmp_output_dir + "/transcripts.gtf", 'rb' ).read().strip() ) == 0:
+ raise Exception, 'The main output file is empty, there may be an error with your input file or settings.'
except Exception, e:
- stop_err( 'Error in cufflinks:\n' + str( e ) )
-
- # TODO: look for errors in program output.
+ stop_err( 'Error running cufflinks. ' + str( e ) )
# Copy output files from tmp directory to specified files.
try:
diff -r efd404f7a60b -r d3ff52561d78 tools/ngs_rna/cufflinks_wrapper.xml
--- a/tools/ngs_rna/cufflinks_wrapper.xml Fri Apr 16 18:46:35 2010 -0400
+++ b/tools/ngs_rna/cufflinks_wrapper.xml Mon Apr 19 11:08:25 2010 -0400
@@ -1,5 +1,5 @@
<tool id="cufflinks" name="Cufflinks" version="0.8.2">
- <description>Transcript assembly, differential expression, and differential regulation for RNA-Seq</description>
+ <description>transcript assembly, differential expression, and differential regulation for RNA-Seq</description>
<command interpreter="python">
cufflinks_wrapper.py
--input=$input
@@ -7,24 +7,46 @@
--transcripts-expression-output=$transcripts_expression
--genes-expression-output=$genes_expression
--num-threads="4"
+ -I $max_intron_len
+ -F $min_isoform_fraction
+ -j $pre_mrna_fraction
+ -Q $min_map_quality
+ #if $reference_annotation.use_ref == "Yes":
+ -G $reference_annotation.reference_annotation_file
+ #end if
#if $singlePaired.sPaired == "paired":
-r $singlePaired.mean_inner_distance
+ -s $singlePaired.inner_distance_std_dev
#end if
</command>
<inputs>
<param format="sam" name="input" type="data" label="SAM file of aligned RNA-Seq reads" help=""/>
+ <param name="max_intron_len" type="integer" value="300000" label="Max Intron Length" help=""/>
+ <param name="min_isoform_fraction" type="float" value="0.05" label="Min Isoform Fraction" help=""/>
+ <param name="pre_mrna_fraction" type="float" value="0.05" label="Pre MRNA Fraction" help=""/>
+ <param name="min_map_quality" type="integer" value="0" label="Min SAM Map Quality" help=""/>
+ <conditional name="reference_annotation">
+ <param name="use_ref" type="select" label="Use Reference Annotation?">
+ <option value="No">No</option>
+ <option value="Yes">Yes</option>
+ </param>
+ <when value="No"></when>
+ <when value="Yes">
+ <param format="gtf" name="reference_annotation_file" type="data" label="Reference Annotation" help=""/>
+ </when>
+ </conditional>
<conditional name="singlePaired">
<param name="sPaired" type="select" label="Is this library mate-paired?">
<option value="single">Single-end</option>
<option value="paired">Paired-end</option>
</param>
- <when value="single">
-
- </when>
+ <when value="single"></when>
<when value="paired">
<param name="mean_inner_distance" type="integer" value="20" label="Mean Inner Distance between Mate Pairs"/>
+ <param name="inner_distance_std_dev" type="integer" value="20" label="Standard Deviation for Inner Distance between Mate Pairs"/>
</when>
</conditional>
+
</inputs>
<outputs>
@@ -67,19 +89,64 @@
**Input formats**
-Cufflinks accepts files in SAM format.
+Cufflinks takes a text file of SAM alignments as input. The RNA-Seq read mapper TopHat produces output in this format, and is recommended for use with Cufflinks. However Cufflinks will accept SAM alignments generated by any read mapper. Here's an example of an alignment Cufflinks will accept::
+
+ s6.25mer.txt-913508 16 chr1 4482736 255 14M431N11M * 0 0 \
+ CAAGATGCTAGGCAAGTCTTGGAAG IIIIIIIIIIIIIIIIIIIIIIIII NM:i:0 XS:A:-
+
+Note the use of the custom tag XS. This attribute, which must have a value of "+" or "-", indicates which strand the RNA that produced this read came from. While this tag can be applied to any alignment, including unspliced ones, it must be present for all spliced alignment records (those with a 'N' operation in the CIGAR string).
+The SAM file supplied to Cufflinks must be sorted by reference position. If you aligned your reads with TopHat, your alignments will be properly sorted already. If you used another tool, you may want to make sure they are properly sorted as follows::
+
+ sort -k 3,3 -k 4,4n hits.sam > hits.sam.sorted
+
+NOTE: Cufflinks currently only supports SAM alignments with the CIGAR match ('M') and reference skip ('N') operations. Support for the other operations, such as insertions, deletions, and clipping, will be added in the future.
------
**Outputs**
-TODO
+Cufflinks produces three output files:
+
+Transcripts and Genes:
+
+This GTF file contains Cufflinks' assembled isoforms. The first 7 columns are standard GTF, and the last column contains attributes, some of which are also standardized (e.g. gene_id, transcript_id). There one GTF record per row, and each record represents either a transcript or an exon within a transcript. The columns are defined as follows::
+
+ Column number Column name Example Description
+ -----------------------------------------------------
+ 1 seqname chrX Chromosome or contig name
+ 2 source Cufflinks The name of the program that generated this file (always 'Cufflinks')
+ 3 feature exon The type of record (always either "transcript" or "exon").
+ 4 start 77696957 The leftmost coordinate of this record (where 0 is the leftmost possible coordinate)
+ 5 end 77712009 The rightmost coordinate of this record, inclusive.
+ 6 score 77712009 The most abundant isoform for each gene is assigned a score of 1000. Minor isoforms are scored by the ratio (minor FPKM/major FPKM)
+ 7 strand + Cufflinks' guess for which strand the isoform came from. Always one of '+', '-' '.'
+ 7 frame . Cufflinks does not predict where the start and stop codons (if any) are located within each transcript, so this field is not used.
+ 8 attributes See below
+
+Each GTF record is decorated with the following attributes::
+
+ Attribute Example Description
+ -----------------------------------------
+ gene_id CUFF.1 Cufflinks gene id
+ transcript_id CUFF.1.1 Cufflinks transcript id
+ FPKM 101.267 Isoform-level relative abundance in Reads Per Kilobase of exon model per Million mapped reads
+ frac 0.7647 Reserved. Please ignore, as this attribute may be deprecated in the future
+ conf_lo 0.07 Lower bound of the 95% confidence interval of the abundance of this isoform, as a fraction of the isoform abundance. That is, lower bound = FPKM * (1.0 - conf_lo)
+ conf_hi 0.1102 Upper bound of the 95% confidence interval of the abundance of this isoform, as a fraction of the isoform abundance. That is, upper bound = FPKM * (1.0 + conf_lo)
+ cov 100.765 Estimate for the absolute depth of read coverage across the whole transcript
+
+
+Transcripts only:
+ This file is simply a tab delimited file containing one row per transcript and with columns containing the attributes above. There are a few additional attributes not in the table above, but these are reserved for debugging, and may change or disappear in the future.
+
+Genes only:
+This file contains gene-level coordinates and expression values.
-------
**Cufflinks settings**
-All of the options have a default value. You can change any of them. Some of the options in Cufflinks have been implemented here.
+All of the options have a default value. You can change any of them. Most of the options in Cufflinks have been implemented here.
------
@@ -87,5 +154,12 @@
This is a list of implemented Cufflinks options::
+ -m INT This is the expected (mean) inner distance between mate pairs. For, example, for paired end runs with fragments selected at 300bp, where each end is 50bp, you should set -r to be 200. The default is 45bp.
+ -s INT The standard deviation for the distribution on inner distances between mate pairs. The default is 20bp.
+ -I INT The minimum intron length. Cufflinks will not report transcripts with introns longer than this, and will ignore SAM alignments with REF_SKIP CIGAR operations longer than this. The default is 300,000.
+ -F After calculating isoform abundance for a gene, Cufflinks filters out transcripts that it believes are very low abundance, because isoforms expressed at extremely low levels often cannot reliably be assembled, and may even be artifacts of incompletely spliced precursors of processed transcripts. This parameter is also used to filter out introns that have far fewer spliced alignments supporting them. The default is 0.05, or 5% of the most abundant isoform (the major isoform) of the gene.
+ -j Some RNA-Seq protocols produce a significant amount of reads that originate from incompletely spliced transcripts, and these reads can confound the assembly of fully spliced mRNAs. Cufflinks uses this parameter to filter out alignments that lie within the intronic intervals implied by the spliced alignments. The minimum depth of coverage in the intronic region covered by the alignment is divided by the number of spliced reads, and if the result is lower than this parameter value, the intronic alignments are ignored. The default is 5%.
+ -Q Instructs Cufflinks to ignore alignments with a SAM mapping quality lower than this number. The default is 0.
+ -G Tells Cufflinks to use the supplied reference annotation to estimate isoform expression. It will not assemble novel transcripts, and the program will ignore alignments not structurally compatible with any reference transcript.
</help>
</tool>
1
0