commit/galaxy-central: 6 new changesets
6 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/21b53ebad211/ Changeset: 21b53ebad211 Branch: stable User: jmchilton Date: 2014-06-29 20:24:32 Summary: Spelling fix in method name. Affected #: 2 files diff -r 65e5e5b72893445eef5c4d2f43350df72e89b436 -r 21b53ebad2119dd9968c72f67b53d069ebc17902 lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -2874,7 +2874,7 @@ Find any additional datasets generated by a tool and attach (for cases where number of outputs is not known in advance). """ - return output_collect.collect_primary_datatasets( self, output, job_working_directory ) + return output_collect.collect_primary_datasets( self, output, job_working_directory ) def to_dict( self, trans, link_details=False, io_details=False ): """ Returns dict of tool. """ diff -r 65e5e5b72893445eef5c4d2f43350df72e89b436 -r 21b53ebad2119dd9968c72f67b53d069ebc17902 lib/galaxy/tools/parameters/output_collect.py --- a/lib/galaxy/tools/parameters/output_collect.py +++ b/lib/galaxy/tools/parameters/output_collect.py @@ -14,7 +14,7 @@ DEFAULT_EXTRA_FILENAME_PATTERN = r"primary_DATASET_ID_(?P<designation>[^_]+)_(?P<visible>[^_]+)_(?P<ext>[^_]+)(_(?P<dbkey>[^_]+))?" -def collect_primary_datatasets( tool, output, job_working_directory ): +def collect_primary_datasets( tool, output, job_working_directory ): app = tool.app sa_session = tool.sa_session new_primary_datasets = {} https://bitbucket.org/galaxy/galaxy-central/commits/c14bdcb9ae96/ Changeset: c14bdcb9ae96 Branch: stable User: jmchilton Date: 2014-06-29 20:24:32 Summary: Allow discovered datasets to use input data format in 'ext' definition. Affected #: 4 files diff -r 21b53ebad2119dd9968c72f67b53d069ebc17902 -r c14bdcb9ae9643525c0797090f293030ea899948 lib/galaxy/jobs/__init__.py --- a/lib/galaxy/jobs/__init__.py +++ b/lib/galaxy/jobs/__init__.py @@ -1065,6 +1065,13 @@ out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] ) inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] ) out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] ) + input_ext = 'data' + for _, data in inp_data.items(): + # For loop odd, but sort simulating behavior in galaxy.tools.actions + if not data: + continue + input_ext = data.ext + param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows param_dict = self.tool.params_from_strings( param_dict, self.app ) # Check for and move associated_files @@ -1075,7 +1082,7 @@ # Create generated output children and primary datasets and add to param_dict collected_datasets = { 'children': self.tool.collect_child_datasets(out_data, self.working_directory), - 'primary': self.tool.collect_primary_datasets(out_data, self.working_directory) + 'primary': self.tool.collect_primary_datasets(out_data, self.working_directory, input_ext) } param_dict.update({'__collected_datasets__': collected_datasets}) # Certain tools require tasks to be completed after job execution diff -r 21b53ebad2119dd9968c72f67b53d069ebc17902 -r c14bdcb9ae9643525c0797090f293030ea899948 lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -2869,12 +2869,12 @@ self.sa_session.flush() return children - def collect_primary_datasets( self, output, job_working_directory ): + def collect_primary_datasets( self, output, job_working_directory, input_ext ): """ Find any additional datasets generated by a tool and attach (for cases where number of outputs is not known in advance). """ - return output_collect.collect_primary_datasets( self, output, job_working_directory ) + return output_collect.collect_primary_datasets( self, output, job_working_directory, input_ext ) def to_dict( self, trans, link_details=False, io_details=False ): """ Returns dict of tool. """ diff -r 21b53ebad2119dd9968c72f67b53d069ebc17902 -r c14bdcb9ae9643525c0797090f293030ea899948 lib/galaxy/tools/parameters/output_collect.py --- a/lib/galaxy/tools/parameters/output_collect.py +++ b/lib/galaxy/tools/parameters/output_collect.py @@ -14,7 +14,7 @@ DEFAULT_EXTRA_FILENAME_PATTERN = r"primary_DATASET_ID_(?P<designation>[^_]+)_(?P<visible>[^_]+)_(?P<ext>[^_]+)(_(?P<dbkey>[^_]+))?" -def collect_primary_datasets( tool, output, job_working_directory ): +def collect_primary_datasets( tool, output, job_working_directory, input_ext ): app = tool.app sa_session = tool.sa_session new_primary_datasets = {} @@ -66,6 +66,8 @@ designation = fields_match.designation visible = fields_match.visible ext = fields_match.ext + if ext == "input": + ext = input_ext dbkey = fields_match.dbkey # Create new primary dataset primary_data = app.model.HistoryDatasetAssociation( extension=ext, diff -r 21b53ebad2119dd9968c72f67b53d069ebc17902 -r c14bdcb9ae9643525c0797090f293030ea899948 test/functional/tools/multi_output_configured.xml --- a/test/functional/tools/multi_output_configured.xml +++ b/test/functional/tools/multi_output_configured.xml @@ -8,19 +8,24 @@ echo "1" > subdir2/CUSTOM_1.txt; echo "2" > subdir2/CUSTOM_2.tabular; echo "3" > subdir2/CUSTOM_3.txt; + mkdir subdir3; + echo "Foo" > subdir3/Foo; </command><inputs> - <param name="input" type="integer" value="7" /> + <param name="num_param" type="integer" value="7" /> + <param name="input" type="data" /></inputs><outputs><data format="txt" name="report"><discover_datasets pattern="__designation_and_ext__" directory="subdir1" /><discover_datasets pattern="CUSTOM_(?P<designation>.+)\.(?P<ext>.+)" directory="subdir2" /> + <discover_datasets pattern="__designation__" directory="subdir3" ext="input" /></data></outputs><tests><test> - <param name="input" value="7" /> + <param name="num_param" value="7" /> + <param name="input" ftype="txt" value="simple_line.txt"/><output name="report"><assert_contents><has_line line="Hello" /> @@ -37,6 +42,9 @@ <discovered_dataset designation="2" ftype="tabular"><assert_contents><has_line line="2" /></assert_contents></discovered_dataset> + <discovered_dataset designation="Foo" ftype="txt"> + <assert_contents><has_line line="Foo" /></assert_contents> + </discovered_dataset></output></test></tests> https://bitbucket.org/galaxy/galaxy-central/commits/b6fae2cd673c/ Changeset: b6fae2cd673c User: davebgx Date: 2014-07-08 19:17:22 Summary: Remove deprecated "data admin" code, since the functionality was implemented better with Dan's data manager framework. Affected #: 14 files diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 lib/galaxy/app.py --- a/lib/galaxy/app.py +++ b/lib/galaxy/app.py @@ -12,7 +12,6 @@ from galaxy.visualization.data_providers.registry import DataProviderRegistry from galaxy.visualization.registry import VisualizationsRegistry from galaxy.tools.imp_exp import load_history_imp_exp_tools -from galaxy.tools.genome_index import load_genome_index_tools from galaxy.sample_tracking import external_service_types from galaxy.openid.providers import OpenIDProviders from galaxy.tools.data_manager.manager import DataManagers @@ -93,8 +92,6 @@ self.datatypes_registry.load_external_metadata_tool( self.toolbox ) # Load history import/export tools. load_history_imp_exp_tools( self.toolbox ) - # Load genome indexer tool. - load_genome_index_tools( self.toolbox ) # visualizations registry: associates resources with visualizations, controls how to render self.visualizations_registry = None if self.config.visualization_plugins_directory: diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 lib/galaxy/jobs/__init__.py --- a/lib/galaxy/jobs/__init__.py +++ b/lib/galaxy/jobs/__init__.py @@ -1192,9 +1192,6 @@ param_dict = self.tool.params_from_strings( param_dict, self.app ) # Check for and move associated_files self.tool.collect_associated_files(out_data, self.working_directory) - gitd = self.sa_session.query( model.GenomeIndexToolData ).filter_by( job=job ).first() - if gitd: - self.tool.collect_associated_files({'': gitd}, self.working_directory) # Create generated output children and primary datasets and add to param_dict collected_datasets = { 'children': self.tool.collect_child_datasets(out_data, self.working_directory), @@ -1248,7 +1245,6 @@ self.external_output_metadata.cleanup_external_metadata( self.sa_session ) galaxy.tools.imp_exp.JobExportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session ) galaxy.tools.imp_exp.JobImportHistoryArchiveWrapper( self.app, self.job_id ).cleanup_after_job() - galaxy.tools.genome_index.GenomeIndexToolWrapper( self.job_id ).postprocessing( self.sa_session, self.app ) if delete_files: self.app.object_store.delete(self.get_job(), base_dir='job_work', entire_dir=True, dir_only=True, extra_dir=str(self.job_id)) except: @@ -1351,10 +1347,8 @@ dataset_path_rewriter = self.dataset_path_rewriter job = self.get_job() - # Job output datasets are combination of history, library, jeha and gitd datasets. + # Job output datasets are combination of history, library, and jeha datasets. special = self.sa_session.query( model.JobExportHistoryArchive ).filter_by( job=job ).first() - if not special: - special = self.sa_session.query( model.GenomeIndexToolData ).filter_by( job=job ).first() false_path = None results = [] diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 lib/galaxy/jobs/deferred/genome_index.py --- a/lib/galaxy/jobs/deferred/genome_index.py +++ /dev/null @@ -1,43 +0,0 @@ -""" -Module for managing genome transfer jobs. -""" -from __future__ import with_statement - -import logging, shutil, gzip, bz2, zipfile, tempfile, tarfile, sys, os - -from galaxy import eggs -from sqlalchemy import and_ -from data_transfer import * - -log = logging.getLogger( __name__ ) - -__all__ = [ 'GenomeIndexPlugin' ] - -class GenomeIndexPlugin( DataTransfer ): - - def __init__( self, app ): - super( GenomeIndexPlugin, self ).__init__( app ) - self.app = app - self.tool = app.toolbox.tools_by_id['__GENOME_INDEX__'] - self.sa_session = app.model.context.current - - def create_job( self, trans, path, indexes, dbkey, intname ): - params = dict( user=trans.user.id, path=path, indexes=indexes, dbkey=dbkey, intname=intname ) - deferred = trans.app.model.DeferredJob( state = self.app.model.DeferredJob.states.NEW, plugin = 'GenomeIndexPlugin', params = params ) - self.sa_session.add( deferred ) - self.sa_session.flush() - log.debug( 'Job created, id %d' % deferred.id ) - return deferred.id - - def check_job( self, job ): - log.debug( 'Job check' ) - return 'ready' - - def run_job( self, job ): - incoming = dict( path=os.path.abspath( job.params[ 'path' ] ), indexer=job.params[ 'indexes' ][0], user=job.params[ 'user' ] ) - indexjob = self.tool.execute( self, set_output_hid=False, history=None, incoming=incoming, transfer=None, deferred=job ) - job.params[ 'indexjob' ] = indexjob[0].id - job.state = self.app.model.DeferredJob.states.RUNNING - self.sa_session.add( job ) - self.sa_session.flush() - return self.app.model.DeferredJob.states.RUNNING diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 lib/galaxy/jobs/deferred/genome_transfer.py --- a/lib/galaxy/jobs/deferred/genome_transfer.py +++ /dev/null @@ -1,250 +0,0 @@ -""" -Module for managing genome transfer jobs. -""" -from __future__ import with_statement - -import logging, shutil, gzip, bz2, zipfile, tempfile, tarfile, sys - -from galaxy import eggs -from sqlalchemy import and_ - -from galaxy.util.odict import odict -from galaxy.workflow.modules import module_factory -from galaxy.jobs.actions.post import ActionBox - -from galaxy.tools.parameters import visit_input_values -from galaxy.tools.parameters.basic import DataToolParameter -from galaxy.tools.data import ToolDataTableManager - -from galaxy.datatypes.checkers import * -from galaxy.datatypes.sequence import Fasta -from data_transfer import * - -log = logging.getLogger( __name__ ) - -__all__ = [ 'GenomeTransferPlugin' ] - -class GenomeTransferPlugin( DataTransfer ): - - locations = {} - - def __init__( self, app ): - super( GenomeTransferPlugin, self ).__init__( app ) - self.app = app - self.tool = app.toolbox.tools_by_id['__GENOME_INDEX__'] - self.sa_session = app.model.context.current - tdtman = ToolDataTableManager( app.config.tool_data_path ) - xmltree = tdtman.load_from_config_file( app.config.tool_data_table_config_path, app.config.tool_data_path ) - for node in xmltree: - table = node.get('name') - location = node.findall('file')[0].get('path') - self.locations[table] = location - - def create_job( self, trans, url, dbkey, intname, indexes ): - job = trans.app.transfer_manager.new( protocol='http', url=url ) - params = dict( user=trans.user.id, transfer_job_id=job.id, protocol='http', type='init_transfer', url=url, dbkey=dbkey, indexes=indexes, intname=intname, liftover=None ) - deferred = trans.app.model.DeferredJob( state = self.app.model.DeferredJob.states.NEW, plugin = 'GenomeTransferPlugin', params = params ) - self.sa_session.add( deferred ) - self.sa_session.flush() - return deferred.id - - def check_job( self, job ): - if job.params['type'] == 'init_transfer': - if not hasattr(job, 'transfer_job'): - job.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params[ 'transfer_job_id' ] ) ) - else: - self.sa_session.refresh( job.transfer_job ) - if job.transfer_job.state == 'done': - transfer = job.transfer_job - transfer.state = 'downloaded' - job.params['type'] = 'extract_transfer' - self.sa_session.add( job ) - self.sa_session.add( transfer ) - self.sa_session.flush() - return self.job_states.READY - elif job.transfer_job.state == 'running': - return self.job_states.WAIT - elif job.transfer_job.state == 'new': - assert job.params[ 'protocol' ] in [ 'http', 'ftp', 'https' ], 'Unknown protocol %s' % job.params[ 'protocol' ] - self.app.transfer_manager.run( job.transfer_job ) - self.sa_session.add( job.transfer_job ) - self.sa_session.flush() - return self.job_states.WAIT - else: - log.error( "An error occurred while downloading from %s" % job.params[ 'url' ] ) - return self.job_states.INVALID - elif job.params[ 'type' ] == 'extract_transfer': - return self.job_states.READY - - def get_job_status( self, jobid ): - job = self.sa_session.query( self.app.model.DeferredJob ).get( int( jobid ) ) - if 'transfer_job_id' in job.params: - if not hasattr( job, 'transfer_job' ): - job.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params[ 'transfer_job_id' ] ) ) - else: - self.sa_session.refresh( job.transfer_job ) - return job - - def run_job( self, job ): - params = job.params - dbkey = params[ 'dbkey' ] - if not hasattr( job, 'transfer_job' ): - job.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params[ 'transfer_job_id' ] ) ) - else: - self.sa_session.refresh( job.transfer_job ) - transfer = job.transfer_job - if params[ 'type' ] == 'extract_transfer': - CHUNK_SIZE = 2**20 - destpath = os.path.join( self.app.config.get( 'genome_data_path', 'tool-data/genome' ), job.params[ 'dbkey' ], 'seq' ) - destfile = '%s.fa' % job.params[ 'dbkey' ] - destfilepath = os.path.join( destpath, destfile ) - tmpprefix = '%s_%s_download_unzip_' % ( job.params['dbkey'], job.params[ 'transfer_job_id' ] ) - tmppath = os.path.dirname( os.path.abspath( transfer.path ) ) - if not os.path.exists( destpath ): - os.makedirs( destpath ) - protocol = job.params[ 'protocol' ] - data_type = self._check_compress( transfer.path ) - if data_type is None: - sniffer = Fasta() - if sniffer.sniff( transfer.path ): - data_type = 'fasta' - fd, uncompressed = tempfile.mkstemp( prefix=tmpprefix, dir=tmppath, text=False ) - if data_type in [ 'tar.gzip', 'tar.bzip' ]: - fp = open( transfer.path, 'r' ) - tar = tarfile.open( mode = 'r:*', bufsize = CHUNK_SIZE, fileobj = fp ) - files = tar.getmembers() - for filename in files: - z = tar.extractfile(filename) - while 1: - try: - chunk = z.read( CHUNK_SIZE ) - except IOError: - os.close( fd ) - log.error( 'Problem decompressing compressed data' ) - exit() - if not chunk: - break - os.write( fd, chunk ) - os.write( fd, '\n' ) - os.close( fd ) - tar.close() - fp.close() - elif data_type == 'gzip': - compressed = gzip.open( transfer.path, mode = 'rb' ) - while 1: - try: - chunk = compressed.read( CHUNK_SIZE ) - except IOError: - compressed.close() - log.error( 'Problem decompressing compressed data' ) - exit() - if not chunk: - break - os.write( fd, chunk ) - os.close( fd ) - compressed.close() - elif data_type == 'bzip': - compressed = bz2.BZ2File( transfer.path, mode = 'r' ) - while 1: - try: - chunk = compressed.read( CHUNK_SIZE ) - except IOError: - compressed.close() - log.error( 'Problem decompressing compressed data' ) - exit() - if not chunk: - break - os.write( fd, chunk ) - os.close( fd ) - compressed.close() - elif data_type == 'zip': - uncompressed_name = None - unzipped = False - z = zipfile.ZipFile( transfer.path ) - z.debug = 3 - for name in z.namelist(): - if name.endswith('/'): - continue - zipped_file = z.open( name ) - while 1: - try: - chunk = zipped_file.read( CHUNK_SIZE ) - except IOError: - os.close( fd ) - log.error( 'Problem decompressing zipped data' ) - return self.app.model.DeferredJob.states.INVALID - if not chunk: - break - os.write( fd, chunk ) - zipped_file.close() - os.close( fd ) - z.close() - elif data_type == 'fasta': - uncompressed = transfer.path - else: - job.state = self.app.model.DeferredJob.states.INVALID - log.error( "Unrecognized compression format for file %s." % transfer.path ) - self.sa_session.add( job ) - self.sa_session.flush() - return - shutil.move( uncompressed, destfilepath ) - if os.path.exists( transfer.path ): - os.remove( transfer.path ) - os.chmod( destfilepath, 0644 ) - fastaline = '\t'.join( [ dbkey, dbkey, params[ 'intname' ], os.path.abspath( destfilepath ) ] ) - self._add_line( 'all_fasta', fastaline ) - if params[ 'indexes' ] is not None: - job.state = self.app.model.DeferredJob.states.WAITING - job.params[ 'indexjobs' ] = [] - else: - job.state = self.app.model.DeferredJob.states.OK - job.params[ 'type' ] = 'finish_transfer' - transfer.path = os.path.abspath(destfilepath) - transfer.state = 'done' - self.sa_session.add( job ) - self.sa_session.add( transfer ) - if transfer.state == 'done': - if params[ 'indexes' ] is not None: - for indexer in params[ 'indexes' ]: - incoming = dict(indexer=indexer, dbkey=params[ 'dbkey' ], intname=params[ 'intname' ], path=transfer.path, user=params['user'] ) - deferred = self.tool.execute( self, set_output_hid=False, history=None, incoming=incoming, transfer=transfer, deferred=job ) - job.params[ 'indexjobs' ].append( deferred[0].id ) - else: - job.state = self.app.model.DeferredJob.states.OK - self.sa_session.add( job ) - self.sa_session.flush() - return self.app.model.DeferredJob.states.OK - - def _check_compress( self, filepath ): - retval = '' - if tarfile.is_tarfile( filepath ): - retval = 'tar.' - if check_zip( filepath ): - return 'zip' - is_bzipped, is_valid = check_bz2( filepath ) - if is_bzipped and is_valid: - return retval + 'bzip' - is_gzipped, is_valid = check_gzip( filepath ) - if is_gzipped and is_valid: - return retval + 'gzip' - return None - - def _add_line( self, locfile, newline ): - filepath = self.locations[ locfile ] - origlines = [] - output = [] - comments = [] - with open( filepath, 'r' ) as destfile: - for line in destfile: - if line.startswith( '#' ): - comments.append( line.strip() ) - else: - origlines.append( line.strip() ) - if newline not in origlines: - origlines.append( newline ) - output.extend( comments ) - origlines.sort() - output.extend( origlines ) - with open( filepath, 'w+' ) as destfile: - destfile.write( '\n'.join( output ) ) - diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 lib/galaxy/jobs/deferred/liftover_transfer.py --- a/lib/galaxy/jobs/deferred/liftover_transfer.py +++ /dev/null @@ -1,158 +0,0 @@ -""" -Module for managing genome transfer jobs. -""" -from __future__ import with_statement - -import logging, shutil, gzip, tempfile, sys - -from galaxy import eggs -from sqlalchemy import and_ - -from galaxy.util.odict import odict -from galaxy.workflow.modules import module_factory -from galaxy.jobs.actions.post import ActionBox - -from galaxy.tools.parameters import visit_input_values -from galaxy.tools.parameters.basic import DataToolParameter - -from galaxy.datatypes.checkers import * - -from data_transfer import * - -log = logging.getLogger( __name__ ) - -__all__ = [ 'LiftOverTransferPlugin' ] - -class LiftOverTransferPlugin( DataTransfer ): - - locations = {} - - def __init__( self, app ): - super( LiftOverTransferPlugin, self ).__init__( app ) - self.app = app - self.sa_session = app.model.context.current - - def create_job( self, trans, url, dbkey, from_genome, to_genome, destfile, parentjob ): - job = trans.app.transfer_manager.new( protocol='http', url=url ) - params = dict( user=trans.user.id, transfer_job_id=job.id, protocol='http', - type='init_transfer', dbkey=dbkey, from_genome=from_genome, - to_genome=to_genome, destfile=destfile, parentjob=parentjob ) - deferred = trans.app.model.DeferredJob( state = self.app.model.DeferredJob.states.NEW, plugin = 'LiftOverTransferPlugin', params = params ) - self.sa_session.add( deferred ) - self.sa_session.flush() - return deferred.id - - def check_job( self, job ): - if job.params['type'] == 'init_transfer': - if not hasattr(job, 'transfer_job'): - job.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params[ 'transfer_job_id' ] ) ) - else: - self.sa_session.refresh( job.transfer_job ) - if job.transfer_job.state == 'done': - transfer = job.transfer_job - transfer.state = 'downloaded' - job.params['type'] = 'extract_transfer' - self.sa_session.add( job ) - self.sa_session.add( transfer ) - self.sa_session.flush() - return self.job_states.READY - elif job.transfer_job.state == 'running': - return self.job_states.WAIT - elif job.transfer_job.state == 'new': - assert job.params[ 'protocol' ] in [ 'http', 'ftp', 'https' ], 'Unknown protocol %s' % job.params[ 'protocol' ] - ready = True - parent = self.sa_session.query( self.app.model.DeferredJob ).get( int( job.params[ 'parentjob' ] ) ) - if not hasattr( parent, 'transfer_job' ): - parent.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( parent.params[ 'transfer_job_id' ] ) ) - if parent.transfer_job.state not in [ 'ok', 'error', 'done' ]: - ready = False - for lo_job in parent.params[ 'liftover' ]: - liftoverjob = self.sa_session.query( self.app.model.TransferJob ).get( int( lo_job ) ) - if liftoverjob: - if liftoverjob.state not in [ 'ok', 'error', 'new', 'done' ]: - ready = False - if ready: - self.app.transfer_manager.run( job.transfer_job ) - self.sa_session.add( job.transfer_job ) - self.sa_session.flush() - return self.job_states.WAIT - else: - log.error( "An error occurred while downloading from %s" % job.transfer_job.params[ 'url' ] ) - return self.job_states.INVALID - elif job.params[ 'type' ] == 'extract_transfer': - return self.job_states.READY - - def get_job_status( self, jobid ): - job = self.sa_session.query( self.app.model.DeferredJob ).get( int( jobid ) ) - return job - - def run_job( self, job ): - params = job.params - dbkey = params[ 'dbkey' ] - source = params[ 'from_genome' ] - target = params[ 'to_genome' ] - if not hasattr( job, 'transfer_job' ): - job.transfer_job = self.sa_session.query( self.app.model.TransferJob ).get( int( job.params[ 'transfer_job_id' ] ) ) - else: - self.sa_session.refresh( job.transfer_job ) - transfer = job.transfer_job - if params[ 'type' ] == 'extract_transfer': - CHUNK_SIZE = 2**20 - destpath = os.path.join( self.app.config.get( 'genome_data_path', 'tool-data/genome' ), source, 'liftOver' ) - if not os.path.exists( destpath ): - os.makedirs( destpath ) - destfile = job.params[ 'destfile' ] - destfilepath = os.path.join( destpath, destfile ) - tmpprefix = '%s_%s_download_unzip_' % ( job.params['dbkey'], job.params[ 'transfer_job_id' ] ) - tmppath = os.path.dirname( os.path.abspath( transfer.path ) ) - if not os.path.exists( destpath ): - os.makedirs( destpath ) - fd, uncompressed = tempfile.mkstemp( prefix=tmpprefix, dir=tmppath, text=False ) - chain = gzip.open( transfer.path, 'rb' ) - while 1: - try: - chunk = chain.read( CHUNK_SIZE ) - except IOError: - os.close( fd ) - log.error( 'Problem decompressing compressed data' ) - exit() - if not chunk: - break - os.write( fd, chunk ) - os.close( fd ) - chain.close() - # Replace the gzipped file with the decompressed file if it's safe to do so - shutil.move( uncompressed, destfilepath ) - os.remove( transfer.path ) - os.chmod( destfilepath, 0644 ) - locline = '\t'.join( [ source, target, os.path.abspath( destfilepath ) ] ) - self._add_line( locline ) - job.state = self.app.model.DeferredJob.states.OK - job.params[ 'type' ] = 'finish_transfer' - transfer.path = os.path.abspath(destfilepath) - transfer.state = 'done' - parentjob = self.sa_session.query( self.app.model.DeferredJob ).get( int( job.params[ 'parentjob' ] ) ) - finished = True - for i in parentjob.params[ 'liftover' ]: - sibling = self.sa_session.query( self.app.model.DeferredJob ).get( int( i ) ) - if sibling.state not in [ 'done', 'ok', 'error' ]: - finished = False - if finished: - parentjob.state = self.app.model.DeferredJob.states.OK - self.sa_session.add( parentjob ) - self.sa_session.add( job ) - self.sa_session.add( transfer ) - self.sa_session.flush() - return self.app.model.DeferredJob.states.OK - - def _add_line( self, newline ): - filepath = 'tool-data/liftOver.loc' - origlines = [] - with open( filepath, 'r' ) as destfile: - for line in destfile: - origlines.append( line.strip() ) - if newline not in origlines: - origlines.append( newline ) - with open( filepath, 'w+' ) as destfile: - destfile.write( '\n'.join( origlines ) ) - diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 lib/galaxy/tools/actions/index_genome.py --- a/lib/galaxy/tools/actions/index_genome.py +++ /dev/null @@ -1,67 +0,0 @@ -import tempfile -from __init__ import ToolAction -from galaxy.util.odict import odict -from galaxy.tools.genome_index import * - -import logging -log = logging.getLogger( __name__ ) - -class GenomeIndexToolAction( ToolAction ): - """Tool action used for exporting a history to an archive. """ - - def execute( self, tool, trans, *args, **kwargs ): - # - # Get genome to index. - # - incoming = kwargs['incoming'] - # - # Create the job and output dataset objects - # - job = trans.app.model.Job() - job.tool_id = tool.id - job.user_id = incoming['user'] - start_job_state = job.state # should be job.states.NEW - job.state = job.states.WAITING # we need to set job state to something other than NEW, - # or else when tracking jobs in db it will be picked up - # before we have added input / output parameters - trans.sa_session.add( job ) - - # Create dataset that will serve as archive. - temp_dataset = trans.app.model.Dataset( state=trans.app.model.Dataset.states.NEW ) - trans.sa_session.add( temp_dataset ) - - trans.sa_session.flush() # ensure job.id and archive_dataset.id are available - trans.app.object_store.create( temp_dataset ) # set the object store id, create dataset (because galaxy likes having datasets) - - # - # Setup job and job wrapper. - # - - # Add association for keeping track of index jobs, transfer jobs, and so on. - user = trans.sa_session.query( trans.app.model.User ).get( int( incoming['user'] ) ) - assoc = trans.app.model.GenomeIndexToolData( job=job, dataset=temp_dataset, fasta_path=incoming['path'], \ - indexer=incoming['indexer'], user=user, \ - deferred_job=kwargs['deferred'], transfer_job=kwargs['transfer'] ) - trans.sa_session.add( assoc ) - - job_wrapper = GenomeIndexToolWrapper( job ) - cmd_line = job_wrapper.setup_job( assoc ) - - # - # Add parameters to job_parameter table. - # - incoming[ '__GENOME_INDEX_COMMAND__' ] = cmd_line - for name, value in tool.params_to_strings( incoming, trans.app ).iteritems(): - job.add_parameter( name, value ) - - job.state = start_job_state # job inputs have been configured, restore initial job state - job.set_handler(tool.get_job_handler(None)) - trans.sa_session.flush() - - - # Queue the job for execution - trans.app.job_queue.put( job.id, tool.id ) - log.info( "Added genome index job to the job queue, id: %s" % str( job.id ) ) - - return job, odict() - diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 lib/galaxy/tools/genome_index/__init__.py --- a/lib/galaxy/tools/genome_index/__init__.py +++ /dev/null @@ -1,243 +0,0 @@ -from __future__ import with_statement - -import json -import logging -import os -import shutil -import tarfile -import tempfile - -from galaxy import model, util -from galaxy.web.framework.helpers import to_unicode -from galaxy.model.item_attrs import UsesAnnotations -from galaxy.util.json import * -from galaxy.web.base.controller import UsesHistoryMixin -from galaxy.tools.data import ToolDataTableManager - - -log = logging.getLogger(__name__) - -def load_genome_index_tools( toolbox ): - """ Adds tools for indexing genomes via the main job runner. """ - # Create XML for loading the tool. - tool_xml_text = """ - <tool id="__GENOME_INDEX__" name="Index Genome" version="0.1" tool_type="genome_index"> - <type class="GenomeIndexTool" module="galaxy.tools"/> - <action module="galaxy.tools.actions.index_genome" class="GenomeIndexToolAction"/> - <command>$__GENOME_INDEX_COMMAND__ $output_file $output_file.files_path "$__app__.config.rsync_url" "$__app__.config.tool_data_path"</command> - <inputs> - <param name="__GENOME_INDEX_COMMAND__" type="hidden"/> - </inputs> - <outputs> - <data format="txt" name="output_file"/> - </outputs> - <stdio> - <exit_code range="1:" err_level="fatal" /> - </stdio> - </tool> - """ - - # Load index tool. - tmp_name = tempfile.NamedTemporaryFile() - tmp_name.write( tool_xml_text ) - tmp_name.flush() - genome_index_tool = toolbox.load_tool( tmp_name.name ) - toolbox.tools_by_id[ genome_index_tool.id ] = genome_index_tool - log.debug( "Loaded genome index tool: %s", genome_index_tool.id ) - -class GenomeIndexToolWrapper( object ): - """ Provides support for performing jobs that index a genome. """ - def __init__( self, job_id ): - self.locations = dict() - self.job_id = job_id - - def setup_job( self, genobj ): - """ Perform setup for job to index a genome and return an archive. Method generates - attribute files, sets the corresponding attributes in the associated database - object, and returns a command line for running the job. The command line - includes the command, inputs, and options; it does not include the output - file because it must be set at runtime. """ - - # - # Create and return command line for running tool. - # - scriptpath = os.path.join( os.path.abspath( os.getcwd() ), "lib/galaxy/tools/genome_index/index_genome.py" ) - return "python %s %s %s" % ( scriptpath, genobj.indexer, genobj.fasta_path ) - - def postprocessing( self, sa_session, app ): - """ Finish the job, move the finished indexes to their final resting place, - and update the .loc files where applicable. """ - gitd = sa_session.query( model.GenomeIndexToolData ).filter_by( job_id=self.job_id ).first() - indexdirs = dict( bfast='bfast_index', bowtie='bowtie_index', bowtie2='bowtie2_index', - bwa='bwa_index', perm='perm_%s_index', picard='srma_index', sam='sam_index' ) - - - if gitd: - fp = open( gitd.dataset.get_file_name(), 'r' ) - deferred = sa_session.query( model.DeferredJob ).filter_by( id=gitd.deferred_job_id ).first() - try: - logloc = json.load( fp ) - except ValueError: - deferred.state = app.model.DeferredJob.states.ERROR - sa_session.add( deferred ) - sa_session.flush() - log.debug( 'Indexing job failed, setting deferred job state to error.' ) - return False - finally: - fp.close() - destination = None - tdtman = ToolDataTableManager( app.config.tool_data_path ) - xmltree = tdtman.load_from_config_file( app.config.tool_data_table_config_path, app.config.tool_data_path ) - for node in xmltree: - table = node.get('name') - location = node.findall('file')[0].get('path') - self.locations[table] = os.path.abspath( location ) - locbase = os.path.abspath( os.path.split( self.locations['all_fasta'] )[0] ) - params = deferred.params - dbkey = params[ 'dbkey' ] - basepath = os.path.join( os.path.abspath( app.config.genome_data_path ), dbkey ) - intname = params[ 'intname' ] - indexer = gitd.indexer - workingdir = os.path.abspath( gitd.dataset.extra_files_path ) - location = [] - indexdata = gitd.dataset.extra_files_path - if indexer == '2bit': - indexdata = os.path.join( workingdir, '%s.2bit' % dbkey ) - destination = os.path.join( basepath, 'seq', '%s.2bit' % dbkey ) - location.append( dict( line='\t'.join( [ 'seq', dbkey, destination ] ), file= os.path.join( locbase, 'alignseq.loc' ) ) ) - elif indexer == 'bowtie': - self._ex_tar( workingdir, 'cs.tar' ) - destination = os.path.join( basepath, 'bowtie_index' ) - for var in [ 'nt', 'cs' ]: - for line in logloc[ var ]: - idx = line - if var == 'nt': - locfile = self.locations[ 'bowtie_indexes' ] - locdir = os.path.join( destination, idx ) - else: - locfile = self.locations[ 'bowtie_indexes_color' ] - locdir = os.path.join( destination, var, idx ) - location.append( dict( line='\t'.join( [ dbkey, dbkey, intname, locdir ] ), file=locfile ) ) - elif indexer == 'bowtie2': - destination = os.path.join( basepath, 'bowtie2_index' ) - for line in logloc[ 'nt' ]: - idx = line - locfile = self.locations[ 'bowtie2_indexes' ] - locdir = os.path.join( destination, idx ) - location.append( dict( line='\t'.join( [ dbkey, dbkey, intname, locdir ] ), file=locfile ) ) - elif indexer == 'bwa': - self._ex_tar( workingdir, 'cs.tar' ) - destination = os.path.join( basepath, 'bwa_index' ) - for var in [ 'nt', 'cs' ]: - for line in logloc[ var ]: - idx = line - if var == 'nt': - locfile = self.locations[ 'bwa_indexes' ] - locdir = os.path.join( destination, idx ) - else: - locfile = self.locations[ 'bwa_indexes_color' ] - locdir = os.path.join( destination, var, idx ) - location.append( dict( line='\t'.join( [ dbkey, dbkey, intname, locdir ] ), file=locfile ) ) - elif indexer == 'perm': - self._ex_tar( workingdir, 'cs.tar' ) - destination = os.path.join( basepath, 'perm_index' ) - for var in [ 'nt', 'cs' ]: - for line in logloc[ var ]: - idx = line.pop() - if var == 'nt': - locfile = self.locations[ 'perm_base_indexes' ] - locdir = os.path.join( destination, idx ) - else: - locfile = self.locations[ 'perm_color_indexes' ] - locdir = os.path.join( destination, var, idx ) - line.append( locdir ) - location.append( dict( line='\t'.join( line ), file=locfile ) ) - elif indexer == 'picard': - destination = os.path.join( basepath, 'srma_index' ) - for var in [ 'nt' ]: - for line in logloc[ var ]: - idx = line - locfile = self.locations[ 'picard_indexes' ] - locdir = os.path.join( destination, idx ) - location.append( dict( line='\t'.join( [ dbkey, dbkey, intname, locdir ] ), file=locfile ) ) - elif indexer == 'sam': - destination = os.path.join( basepath, 'sam_index' ) - for var in [ 'nt' ]: - for line in logloc[ var ]: - locfile = self.locations[ 'sam_fa_indexes' ] - locdir = os.path.join( destination, line ) - location.append( dict( line='\t'.join( [ 'index', dbkey, locdir ] ), file=locfile ) ) - - if destination is not None and os.path.exists( os.path.split( destination )[0] ) and not os.path.exists( destination ): - log.debug( 'Moving %s to %s' % ( indexdata, destination ) ) - shutil.move( indexdata, destination ) - if indexer not in [ '2bit' ]: - genome = '%s.fa' % dbkey - target = os.path.join( destination, genome ) - fasta = os.path.abspath( os.path.join( basepath, 'seq', genome ) ) - self._check_link( fasta, target ) - if os.path.exists( os.path.join( destination, 'cs' ) ): - target = os.path.join( destination, 'cs', genome ) - fasta = os.path.abspath( os.path.join( basepath, 'seq', genome ) ) - self._check_link( fasta, target ) - for line in location: - self._add_line( line[ 'file' ], line[ 'line' ] ) - deferred.state = app.model.DeferredJob.states.OK - sa_session.add( deferred ) - sa_session.flush() - - - def _check_link( self, targetfile, symlink ): - target = os.path.relpath( targetfile, os.path.dirname( symlink ) ) - filename = os.path.basename( targetfile ) - if not os.path.exists( targetfile ): # this should never happen. - raise Exception, "%s not found. Unable to proceed without a FASTA file. Aborting." % targetfile - if os.path.exists( symlink ) and os.path.islink( symlink ): - if os.path.realpath( symlink ) == os.path.abspath( targetfile ): # symlink exists, points to the correct FASTA file. - return - else: # no it doesn't. Make a new one, and this time do it right. - os.remove( symlink ) - os.symlink( target, symlink ) - return - elif not os.path.exists( symlink ): # no symlink to the FASTA file. Create one. - os.symlink( target, symlink ) - return - elif os.path.exists( symlink ) and not os.path.islink( symlink ): - if self._hash_file( targetfile ) == self._hash_file( symlink ): # files are identical. No need to panic. - return - else: - if os.path.getsize( symlink ) == 0: # somehow an empty file got copied instead of the symlink. Delete with extreme prejudice. - os.remove( symlink ) - os.symlink( target, symlink ) - return - else: - raise Exception, "Regular file %s exists, is not empty, contents do not match %s." % ( symlink, targetfile ) - - def _hash_file( self, filename ): - import hashlib - md5 = hashlib.md5() - with open( filename, 'rb' ) as f: - for chunk in iter( lambda: f.read( 8192 ), '' ): - md5.update( chunk ) - return md5.digest() - - - def _ex_tar( self, directory, filename ): - fh = tarfile.open( os.path.join( directory, filename ) ) - fh.extractall( path=directory ) - fh.close() - os.remove( os.path.join( directory, filename ) ) - - def _add_line( self, locfile, newline ): - filepath = locfile - origlines = [] - output = [] - comments = [] - with open( filepath, 'r' ) as destfile: - for line in destfile: - origlines.append( line.strip() ) - if newline not in origlines: - origlines.append( newline ) - with open( filepath, 'w+' ) as destfile: - origlines.append( '' ) - destfile.write( '\n'.join( origlines ) ) diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 lib/galaxy/tools/genome_index/index_genome.py --- a/lib/galaxy/tools/genome_index/index_genome.py +++ /dev/null @@ -1,327 +0,0 @@ -#!/usr/bin/env python -""" -Export a history to an archive file using attribute files. - -usage: %prog history_attrs dataset_attrs job_attrs out_file - -G, --gzip: gzip archive file -""" -from __future__ import with_statement - -import json -import optparse -import os -import shlex -import shutil -import subprocess -import sys -import tarfile -import tempfile -import time - - -class ManagedIndexer(): - def __init__( self, output_file, infile, workingdir, rsync_url, tooldata ): - self.tooldatapath = os.path.abspath( tooldata ) - self.workingdir = os.path.abspath( workingdir ) - self.outfile = open( os.path.abspath( output_file ), 'w' ) - self.basedir = os.path.split( self.workingdir )[0] - self.fasta = os.path.abspath( infile ) - self.locations = dict( nt=[], cs=[] ) - self.log = [] - self.rsync_opts = '-aclSzq' - self.rsync_url = rsync_url - self.indexers = { - 'bwa': '_bwa', - 'bowtie': '_bowtie', - 'bowtie2': '_bowtie2', - '2bit': '_twobit', - 'perm': '_perm', - 'bfast': '_bfast', - 'picard': '_picard', - 'sam': '_sam' - } - if not os.path.exists( self.workingdir ): - os.makedirs( self.workingdir ) - self.logfile = open( os.path.join( self.workingdir, 'ManagedIndexer.log' ), 'w+' ) - - def run_indexer( self, indexer ): - self.fapath = self.fasta - self.fafile = os.path.basename( self.fapath ) - self.genome = os.path.splitext( self.fafile )[0] - with WithChDir( self.basedir ): - if indexer not in self.indexers: - sys.stderr.write( 'The requested indexing function does not exist' ) - exit(127) - else: - with WithChDir( self.workingdir ): - self._log( 'Running indexer %s.' % indexer ) - result = getattr( self, self.indexers[ indexer ] )() - if result in [ None, False ]: - sys.stderr.write( 'Error running indexer %s, %s' % ( indexer, result ) ) - self._flush_files() - exit(1) - else: - self._log( self.locations ) - self._log( 'Indexer %s completed successfully.' % indexer ) - self._flush_files() - exit(0) - - def _check_link( self ): - self._log( 'Checking symlink to %s' % self.fafile ) - if not os.path.exists( self.fafile ): - self._log( 'Symlink not found, creating' ) - os.symlink( os.path.relpath( self.fapath ), self.fafile ) - - def _do_rsync( self, idxpath ): - self._log( 'Trying rsync at %s/%s%s' % ( self.rsync_url, self.genome, idxpath ) ) - result = subprocess.call( shlex.split( 'rsync %s %s/%s%s .' % ( self.rsync_opts, self.rsync_url, self.genome, idxpath ) ), stderr=self.logfile ) - if result != 0: - self._log( 'Rsync failed or index not found. Generating.' ) - else: - self._log( 'Rsync succeeded.' ) - return result - - def _flush_files( self ): - json.dump( self.locations, self.outfile ) - self.outfile.close() - self.logfile.close() - - def _log( self, stuff ): - timestamp = time.strftime('%Y-%m-%d %H:%M:%S %z') - self.logfile.write( "[%s] %s\n" % (timestamp, stuff) ) - - def _bwa( self ): - result = self._do_rsync( '/bwa_index/' ) - if result == 0: - self.locations[ 'nt' ].append( self.fafile ) - return self._bwa_cs() - else: - self._check_link() - command = shlex.split( 'bwa index -a bwtsw %s' % self.fafile ) - result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) - if result != 0: - newcommand = shlex.split( 'bwa index -c %s' % self.fafile ) - result = call( newcommand, stderr=self.logfile, stdout=self.logfile ) - if result == 0: - self.locations[ 'nt' ].append( self.fafile ) - os.remove( self.fafile ) - return self._bwa_cs() - else: - self._log( 'BWA (base) exited with code %s' % result ) - return False - - def _bwa_cs( self ): - if not os.path.exists( os.path.join( self.workingdir, 'cs' ) ): - os.makedirs( 'cs' ) - with WithChDir( 'cs' ): - self._check_link() - command = shlex.split( 'bwa index -a bwtsw -c %s' % self.fafile ) - result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) - if result != 0: - newcommand = shlex.split( 'bwa index -c %s' % self.fafile ) - result = call( newcommand, stderr=self.logfile, stdout=self.logfile ) - if result == 0: - self.locations[ 'cs' ].append( self.fafile ) - os.remove( self.fafile ) - else: - self._log( 'BWA (color) exited with code %s' % result ) - return False - else: - self.locations[ 'cs' ].append( self.fafile ) - os.remove( self.fafile ) - else: - self.locations[ 'cs' ].append( self.fafile ) - temptar = tarfile.open( 'cs.tar', 'w' ) - temptar.add( 'cs' ) - temptar.close() - shutil.rmtree( 'cs' ) - return True - - - def _bowtie( self ): - result = self._do_rsync( '/bowtie_index/' ) - if result == 0: - self.locations[ 'nt' ].append( self.genome ) - return self._bowtie_cs() - else: - self._check_link() - command = shlex.split( 'bowtie-build -f %s %s' % ( self.fafile, self.genome ) ) - result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) - if result == 0: - self.locations[ 'nt' ].append( self.genome ) - os.remove( self.fafile ) - return self._bowtie_cs() - else: - self._log( 'Bowtie (base) exited with code %s' % result ) - return False - - def _bowtie_cs( self ): - indexdir = os.path.join( os.getcwd(), 'cs' ) - if not ( os.path.exists( indexdir ) ): - os.makedirs( indexdir ) - with WithChDir( indexdir ): - self._check_link() - command = shlex.split( 'bowtie-build -C -f %s %s' % ( self.fafile, self.genome ) ) - result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) - if result == 0: - self.locations[ 'cs' ].append( self.genome ) - else: - self._log( 'Bowtie (color) exited with code %s' % result ) - return False - os.remove( os.path.join( indexdir, self.fafile ) ) - else: - self.locations[ 'cs' ].append( self.genome ) - temptar = tarfile.open( 'cs.tar', 'w' ) - temptar.add( 'cs' ) - temptar.close() - shutil.rmtree( 'cs' ) - return True - - - def _bowtie2( self ): - result = self._do_rsync( '/bowtie2_index/' ) - if result == 0: - self.locations[ 'nt' ].append( self.fafile ) - return True - ref_base = os.path.splitext(self.fafile)[0] - self._check_link() - command = shlex.split( 'bowtie2-build %s %s' % ( self.fafile, ref_base ) ) - result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) - if result == 0: - self.locations[ 'nt' ].append( ref_base ) - os.remove( self.fafile ) - return True - else: - self._log( 'Bowtie2 exited with code %s' % result ) - return False - - def _twobit( self ): - """Index reference files using 2bit for random access. - """ - result = self._do_rsync( '/seq/%s.2bit' % self.genome ) - if result == 0: - self.locations['nt'].append( "%s.2bit" % self.genome ) - return True - else: - out_file = "%s.2bit" % self.genome - self._check_link() - command = shlex.split( 'faToTwoBit %s %s' % ( self.fafile, out_file ) ) - result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) - if result == 0: - self.locations['nt'].append( out_file ) - os.remove( self.fafile ) - return True - else: - self._log( 'faToTwoBit exited with code %s' % result ) - return False - - def _perm( self ): - result = self._do_rsync( '/perm_index/' ) - self._check_link() - genome = self.genome - read_length = 50 - for seed in [ 'F3', 'F4' ]: - key = '%s_%s_%s' % (self.genome, seed, read_length) - desc = '%s: seed=%s, read length=%s' % (self.genome, seed, read_length) - index = "%s_base_%s_%s.index" % (self.genome, seed, read_length) - if not os.path.exists( index ): - command = shlex.split("PerM %s %s --readFormat fastq --seed %s -m -s %s" % (self.fafile, read_length, seed, index)) - result = subprocess.call( command ) - if result != 0: - self._log( 'PerM (base) exited with code %s' % result ) - return False - self.locations[ 'nt' ].append( [ key, desc, index ] ) - os.remove( self.fafile ) - return self._perm_cs() - - def _perm_cs( self ): - genome = self.genome - read_length = 50 - if not os.path.exists( 'cs' ): - os.makedirs( 'cs' ) - with WithChDir( 'cs' ): - self._check_link() - for seed in [ 'F3', 'F4' ]: - key = '%s_%s_%s' % (genome, seed, read_length) - desc = '%s: seed=%s, read length=%s' % (genome, seed, read_length) - index = "%s_color_%s_%s.index" % (genome, seed, read_length) - if not os.path.exists( index ): - command = shlex.split("PerM %s %s --readFormat csfastq --seed %s -m -s %s" % (self.fafile, read_length, seed, index)) - result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) - if result != 0: - self._log( 'PerM (color) exited with code %s' % result ) - return False - self.locations[ 'cs' ].append( [ key, desc, index ] ) - os.remove( self.fafile ) - temptar = tarfile.open( 'cs.tar', 'w' ) - temptar.add( 'cs' ) - temptar.close() - shutil.rmtree( 'cs' ) - return True - - def _picard( self ): - result = self._do_rsync( '/srma_index/' ) - if result == 0 and os.path.exists( '%s.dict' % self.genome): - self.locations[ 'nt' ].append( self.fafile ) - return True - local_ref = self.fafile - srma = os.path.abspath( os.path.join( self.tooldatapath, 'shared/jars/picard/CreateSequenceDictionary.jar' ) ) - genome = os.path.splitext( self.fafile )[0] - self._check_link() - if not os.path.exists( '%s.fai' % self.fafile ) and not os.path.exists( '%s.fai' % self.genome ): - command = shlex.split( 'samtools faidx %s' % self.fafile ) - subprocess.call( command, stderr=self.logfile ) - command = shlex.split( "java -jar %s R=%s O=%s.dict URI=%s" \ - % ( srma, local_ref, genome, local_ref ) ) - if not os.path.exists( '%s.dict' % self.genome ): - result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) - self._log( ' '.join( command ) ) - if result != 0: - self._log( 'Picard exited with code %s' % result ) - return False - self.locations[ 'nt' ].append( self.fafile ) - os.remove( self.fafile ) - return True - - def _sam( self ): - local_ref = self.fafile - local_file = os.path.splitext( self.fafile )[ 0 ] - print 'Trying rsync' - result = self._do_rsync( '/sam_index/' ) - if result == 0 and ( os.path.exists( '%s.fai' % self.fafile ) or os.path.exists( '%s.fai' % self.genome ) ): - self.locations[ 'nt' ].append( '%s.fai' % local_ref ) - return True - self._check_link() - print 'Trying indexer' - command = shlex.split("samtools faidx %s" % local_ref) - result = subprocess.call( command, stderr=self.logfile, stdout=self.logfile ) - if result != 0: - self._log( 'SAM exited with code %s' % result ) - return False - else: - self.locations[ 'nt' ].append( '%s.fai' % local_ref ) - os.remove( local_ref ) - return True - -class WithChDir(): - def __init__( self, target ): - self.working = target - self.previous = os.getcwd() - def __enter__( self ): - os.chdir( self.working ) - def __exit__( self, *args ): - os.chdir( self.previous ) - - -if __name__ == "__main__": - # Parse command line. - parser = optparse.OptionParser() - (options, args) = parser.parse_args() - indexer, infile, outfile, working_dir, rsync_url, tooldata = args - - # Create archive. - idxobj = ManagedIndexer( outfile, infile, working_dir, rsync_url, tooldata ) - returncode = idxobj.run_indexer( indexer ) - if not returncode: - exit(1) - exit(0) diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 lib/galaxy/webapps/galaxy/controllers/data_admin.py --- a/lib/galaxy/webapps/galaxy/controllers/data_admin.py +++ /dev/null @@ -1,299 +0,0 @@ -import ftplib -import json -import sys -from galaxy import model, util -from galaxy.jobs import transfer_manager -from galaxy.model.orm import * -from galaxy.web.base.controller import * -from galaxy.web.framework.helpers import grids, iff, time_ago -from library_common import get_comptypes, lucene_search, whoosh_search - - -# Older py compatibility -try: - set() -except: - from sets import Set as set - -import logging -log = logging.getLogger( __name__ ) - -class DataAdmin( BaseUIController ): - jobstyles = dict( - done='panel-done-message', - waiting='state-color-waiting', - running='state-color-running', - downloaded='state-color-running', - new='state-color-new', - ok='panel-done-message', - error='panel-error-message', - queued='state-color-waiting' - ) - - @web.expose - @web.require_admin - def manage_data( self, trans, **kwd ): - if trans.app.config.get_bool( 'enable_beta_job_managers', False ) == False: - return trans.fill_template( '/admin/data_admin/generic_error.mako', message='This feature requires that enable_beta_job_managers be set to True in your Galaxy configuration.' ) - if 'all_fasta' not in trans.app.tool_data_tables.data_tables: - return trans.fill_template( '/admin/data_admin/generic_error.mako', message='The local data manager requires that an all_fasta entry exists in your tool_data_table_conf.xml.' ) - indextable = {} - dbkeys = [] - labels = { 'bowtie_indexes': 'Bowtie', 'bowtie2_indexes': 'Bowtie 2', 'bwa_indexes': 'BWA', 'srma_indexes': 'Picard', 'sam_fa_indexes': 'SAM', 'perm_base_indexes': 'PerM' } - tablenames = { 'Bowtie': 'bowtie_indexes', 'Bowtie 2': 'bowtie2_indexes', 'BWA': 'bwa_indexes', 'Picard': 'srma_indexes', 'SAM': 'sam_fa_indexes', 'PerM': 'perm_base_indexes' } - indexfuncs = dict( bowtie_indexes='bowtie', bowtie2_indexes='bowtie2', bwa_indexes='bwa', srma_indexes='picard', sam_fa_indexes='sam', perm_base_indexes='perm' ) - for genome in trans.app.tool_data_tables.data_tables[ 'all_fasta' ].data: - dbkey = genome[0] - dbkeys.append( dbkey ) - indextable[ dbkey ] = dict( indexes=dict(), name=genome[2], path=genome[3] ) - for genome in indextable: - for label in labels: - indextable[ genome ][ 'indexes' ][ label ] = 'Generate' - if label not in trans.app.tool_data_tables.data_tables: - indextable[ genome ][ 'indexes' ][ label ] = 'Disabled' - else: - for row in trans.app.tool_data_tables.data_tables[ label ].data: - if genome in row or row[0].startswith( genome ): - indextable[ genome ][ 'indexes' ][ label ] = 'Generated' - jobgrid = [] - sa_session = trans.app.model.context.current - jobs = sa_session.query( model.GenomeIndexToolData ).order_by( model.GenomeIndexToolData.created_time.desc() ).filter_by( user_id=trans.get_user().id ).group_by( model.GenomeIndexToolData.deferred ).limit( 20 ).all() - prevjobid = 0 - for job in jobs: - if prevjobid == job.deferred.id: - continue - prevjobid = job.deferred.id - state = job.deferred.state - params = job.deferred.params - if job.transfer is not None: - jobtype = 'download' - else: - jobtype = 'index' - indexers = ', '.join( params['indexes'] ) - jobgrid.append( dict( jobtype=jobtype, indexers=indexers, rowclass=state, deferred=job.deferred.id, state=state, intname=job.deferred.params[ 'intname' ], dbkey=job.deferred.params[ 'dbkey' ] ) ) - styles = dict( Generate=self.jobstyles['new'], Generated=self.jobstyles['ok'], Disabled=self.jobstyles['error'] ) - return trans.fill_template( '/admin/data_admin/local_data.mako', jobgrid=jobgrid, indextable=indextable, labels=labels, dbkeys=dbkeys, styles=styles, indexfuncs=indexfuncs ) - - @web.expose - @web.require_admin - def add_genome( self, trans, **kwd ): - if trans.app.config.get_bool( 'enable_beta_job_managers', False ) == False: - return trans.fill_template( '/admin/data_admin/generic_error.mako', message='This feature requires that enable_beta_job_managers be set to True in your Galaxy configuration.' ) - dbkeys = trans.ucsc_builds - ensemblkeys = trans.ensembl_builds - ncbikeys = trans.ncbi_builds - return trans.fill_template( '/admin/data_admin/data_form.mako', dbkeys=dbkeys, ensembls=ensemblkeys, ncbi=ncbikeys ) - - @web.expose - @web.require_admin - def genome_search( self, trans, **kwd ): - results = list() - ncbikeys = trans.ncbi_builds - params = util.Params( kwd ) - search = params.get( 'q', None ) - limit = params.get( 'limit', None ) - if search is not None: - query = search.lower() - for row in ncbikeys: - if query in row[ 'name' ].lower() or query in row[ 'dbkey' ].lower(): - result = '|'.join( [ ': '.join( [ row[ 'dbkey' ], row[ 'name' ] ] ), row[ 'dbkey' ] ] ) - results.append( result ) - if len( results ) >= limit: - break - return trans.fill_template( '/admin/data_admin/ajax_status.mako', json='\n'.join( results ) ) - - @web.expose - @web.require_admin - def index_build( self, trans, **kwd ): - """Index a previously downloaded genome.""" - params = util.Params( kwd ) - path = os.path.abspath( params.get( 'path', None ) ) - indexes = [ params.get( 'indexes', None ) ] - dbkey = params.get( 'dbkey', None ) - intname = params.get( 'longname', None ) - indexjob = trans.app.job_manager.deferred_job_queue.plugins['GenomeIndexPlugin'].create_job( trans, path, indexes, dbkey, intname ) - return indexjob - - @web.expose - @web.require_admin - def download_build( self, trans, **kwd ): - """Download a genome from a remote source and add it to the library.""" - params = util.Params( kwd ) - paramdict = build_param_dict( params, trans ) - if paramdict[ 'status' ] == 'error': - return trans.fill_template( '/admin/data_admin/generic_error.mako', message=paramdict[ 'message' ] ) - url = paramdict[ 'url' ] - liftover = paramdict[ 'liftover' ] - dbkey = paramdict[ 'dbkey' ] - indexers = paramdict[ 'indexers' ] - longname = paramdict[ 'longname' ] - dbkeys = dict() - protocol = 'http' - if url is None: - return trans.fill_template( '/admin/data_admin/generic_error.mako', message='Unable to generate a valid URL with the specified parameters.' ) - jobid = trans.app.job_manager.deferred_job_queue.plugins['GenomeTransferPlugin'].create_job( trans, url, dbkey, longname, indexers ) - chainjob = [] - if liftover is not None: - for chain in liftover: - liftover_url = u'ftp://hgdownload.cse.ucsc.edu%s' % chain[0] - from_genome = chain[1] - to_genome = chain[2] - destfile = liftover_url.split('/')[-1].replace('.gz', '') - lochain = trans.app.job_manager.deferred_job_queue.plugins['LiftOverTransferPlugin'].create_job( trans, liftover_url, dbkey, from_genome, to_genome, destfile, jobid ) - chainjob.append( lochain ) - job = trans.app.job_manager.deferred_job_queue.plugins['GenomeTransferPlugin'].get_job_status( jobid ) - job.params['liftover'] = chainjob - trans.app.model.context.current.add( job ) - trans.app.model.context.current.flush() - return trans.response.send_redirect( web.url_for( controller='data_admin', - action='monitor_status', - job=jobid ) ) - - @web.expose - @web.require_admin - def monitor_status( self, trans, **kwd ): - params = util.Params( kwd ) - jobid = params.get( 'job', '' ) - deferred = trans.app.model.context.current.query( model.DeferredJob ).filter_by( id=jobid ).first() - if deferred is None: - return trans.fill_template( '/admin/data_admin/generic_error.mako', message='Invalid genome downloader job specified.' ) - gname = deferred.params[ 'intname' ] - indexers = ', '.join( deferred.params[ 'indexes' ] ) - jobs = self._get_jobs( deferred, trans ) - jsonjobs = json.dumps( jobs ) - return trans.fill_template( '/admin/data_admin/download_status.mako', name=gname, indexers=indexers, mainjob=jobid, jobs=jobs, jsonjobs=jsonjobs ) - - @web.expose - @web.require_admin - def get_jobs( self, trans, **kwd ): - sa_session = trans.app.model.context.current - jobs = [] - params = util.Params( kwd ) - jobid = params.get( 'jobid', '' ) - job = sa_session.query( model.DeferredJob ).filter_by( id=jobid ).first() - jobs = self._get_jobs( job, trans ) - return trans.fill_template( '/admin/data_admin/ajax_status.mako', json=json.dumps( jobs ) ) - - def _get_job( self, jobid, jobtype, trans ): - sa = trans.app.model.context.current - if jobtype == 'liftover': - liftoverjob = sa.query( model.DeferredJob ).filter_by( id=jobid ).first() - job = sa.query( model.TransferJob ).filter_by( id=liftoverjob.params[ 'transfer_job_id' ] ).first() - joblabel = 'Download liftOver (%s to %s)' % ( liftoverjob.params[ 'from_genome' ], liftoverjob.params[ 'to_genome' ] ) - elif jobtype == 'transfer': - job = sa.query( model.TransferJob ).filter_by( id=jobid ).first() - joblabel = 'Download Genome' - elif jobtype == 'deferred': - job = sa.query( model.DeferredJob ).filter_by( id=jobid ).first() - joblabel = 'Main Controller' - elif jobtype == 'index': - job = sa.query( model.Job ).filter_by( id=jobid.job_id ).first() - joblabel = 'Index Genome (%s)' % jobid.indexer - return dict( status=job.state, jobid=job.id, style=self.jobstyles[job.state], type=jobtype, label=joblabel ) - - def _get_jobs( self, deferredjob, trans ): - jobs = [] - idxjobs = [] - sa_session = trans.app.model.context.current - job = sa_session.query( model.GenomeIndexToolData ).filter_by( deferred=deferredjob ).first() - jobs.append( self._get_job( deferredjob.id, 'deferred', trans ) ) - if 'transfer_job_id' in deferredjob.params: #hasattr( job, 'transfer' ) and job.transfer is not None: # This is a transfer job, check for indexers - jobs.append( self._get_job( deferredjob.params[ 'transfer_job_id' ], 'transfer', trans ) ) - if hasattr( job, 'deferred' ): - idxjobs = sa_session.query( model.GenomeIndexToolData ).filter_by( deferred=job.deferred, transfer=job.transfer ).all() - if deferredjob.params.has_key( 'liftover' ) and deferredjob.params[ 'liftover' ] is not None: - for jobid in deferredjob.params[ 'liftover' ]: - jobs.append( self._get_job( jobid, 'liftover', trans ) ) - for idxjob in idxjobs: - jobs.append( self._get_job( idxjob, 'index', trans ) ) - return jobs - -def build_param_dict( params, trans ): - - source = params.get('source', '') - longname = params.get('longname', None) - if not isinstance( params.get( 'indexers', None ), list ): - indexers = [ params.get( 'indexers', None ) ] - else: - indexers = params.get( 'indexers', None ) - if indexers is not None: - if indexers == [None]: - indexers = None - url = None - liftover = None - newlift = [] - dbkey = params.get( 'dbkey', None ) - dbkeys = dict() - protocol = 'http' - - if source == 'NCBI': - build = params.get('ncbi_name', '') - dbkey = build.split( ': ' )[0] - longname = build.split( ': ' )[-1] - url = 'http://togows.dbcls.jp/entry/ncbi-nucleotide/%s.fasta' % dbkey - elif source == 'URL': - dbkey = params.get( 'url_dbkey', '' ) - url = params.get( 'url', None ) - longname = params.get( 'longname', None ) - elif source == 'UCSC': - longname = None - for build in trans.ucsc_builds: - if dbkey == build[0]: - dbkey = build[0] - longname = build[1] - break - if dbkey == '?': - return dict( status='error', message='An invalid build was specified.' ) - ftp = ftplib.FTP('hgdownload.cse.ucsc.edu') - ftp.login('anonymous', trans.get_user().email) - checker = [] - liftover = [] - newlift = [] - ftp.retrlines('NLST /goldenPath/%s/liftOver/*.chain.gz' % dbkey, liftover.append) - try: - for chain in liftover: - lifts = [] - fname = chain.split( '/' )[-1] - organisms = fname.replace( '.over.chain.gz', '' ).split( 'To' ) - lifts.append( [ organisms[0], organisms[1][0].lower() + organisms[1][1:] ] ) - lifts.append( [ organisms[1][0].lower() + organisms[1][1:], organisms[0] ] ) - for organism in lifts: - remotepath = '/goldenPath/%s/liftOver/%sTo%s.over.chain.gz' % ( organism[0], organism[0], organism[1][0].upper() + organism[1][1:] ) - localfile = '%sTo%s.over.chain' % ( organism[0], organism[1][0].upper() + organism[1][1:] ) - localpath = os.path.join( trans.app.config.get( 'genome_data_path', 'tool-data/genome' ), organism[0], 'liftOver', localfile ) - if not os.path.exists( localpath ) or os.path.getsize( localpath ) == 0: - newlift.append( [ remotepath, organism[0], organism[1] ] ) - except: - newlift = None - pass - ftp.retrlines('NLST /goldenPath/%s/bigZips/' % dbkey, checker.append) - ftp.quit() - for filename in [ dbkey, 'chromFa' ]: - for extension in [ '.tar.gz', '.tar.bz2', '.zip', '.fa.gz', '.fa.bz2' ]: - testfile = '/goldenPath/%s/bigZips/%s%s' % ( dbkey, filename, extension ) - if testfile in checker: - url = 'ftp://hgdownload.cse.ucsc.edu%s' % testfile - break; - else: - continue - if url is None: - message = 'The genome %s was not found on the UCSC server.' % dbkey - status = 'error' - return dict( status=status, message=message ) - - elif source == 'Ensembl': - dbkey = params.get( 'ensembl_dbkey', None ) - if dbkey == '?': - return dict( status='error', message='An invalid build was specified.' ) - for build in trans.ensembl_builds: - if build[ 'dbkey' ] == dbkey: - dbkey = build[ 'dbkey' ] - release = build[ 'release' ] - pathname = '_'.join( build[ 'name' ].split(' ')[0:2] ) - longname = build[ 'name' ].replace('_', ' ') - break - url = 'ftp://ftp.ensembl.org/pub/release-%s/fasta/%s/dna/%s.%s.%s.dna.toplevel.fa.gz' % ( release, pathname.lower(), pathname, dbkey, release ) - - params = dict( status='ok', dbkey=dbkey, datatype='fasta', url=url, user=trans.user.id, liftover=newlift, longname=longname, indexers=indexers ) - - return params diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 templates/admin/data_admin/ajax_status.mako --- a/templates/admin/data_admin/ajax_status.mako +++ /dev/null @@ -1,1 +0,0 @@ -${json} \ No newline at end of file diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 templates/admin/data_admin/data_form.mako --- a/templates/admin/data_admin/data_form.mako +++ /dev/null @@ -1,178 +0,0 @@ -<%inherit file="/base.mako"/> -<%namespace file="/message.mako" import="render_msg" /> -<%namespace file="/library/common/common.mako" import="common_javascripts" /> - -<%! - def inherit(context): - if context.get('use_panels'): - return '/webapps/galaxy/base_panels.mako' - else: - return '/base.mako' -%> -<%inherit file="${inherit(context)}"/> - -<%def name="init()"> -<% - self.has_left_panel=False - self.has_right_panel=False - self.message_box_visible=False - self.active_view="user" - self.overlay_visible=False - self.has_accessible_datasets = False -%> -</%def> -<%def name="stylesheets()"> - ${parent.stylesheets()} - ${h.css( "autocomplete_tagging" )} -</%def> -<%def name="javascripts()"> - ${parent.javascripts()} - ${h.js("libs/jquery/jquery.autocomplete", "galaxy.autocom_tagging" )} -</%def> -## -## Override methods from base.mako and base_panels.mako -## -<%def name="center_panel()"> - <div style="overflow: auto; height: 100%;"> - <div class="page-container" style="padding: 10px;"> - ${render_content()} - </div> - </div> -</%def> -<style type="text/css"> - .params-block { display: none; } -</style> -<div class="toolForm"> - %if message: - <div class="${status}">${message}</div> - %endif - <div class="toolFormTitle">Get build from a remote server</div> - <div class="toolFormBody"> - <form name="download_build" action="${h.url_for( controller='data_admin', action='download_build' )}" enctype="multipart/form-data" method="post"> - <div class="form-row"> - <label for="source">Data Source</label> - <select id="datasource" name="source" label="Data Source"> - <option value="UCSC">UCSC</option> - <option value="URL">Direct Link</option> - <option value="NCBI">NCBI</option> - <option value="Ensembl">EnsemblGenome</option> - </select> - <div style="clear: both;"> </div> - </div> - <div class="form-row"> - <label for="indexers">Indexers</label> - <select name="indexers" multiple style="width: 200px; height: 125px;"> - <option value="2bit" selected>TwoBit</option> - <option value="bowtie">Bowtie</option> - <option value="bowtie2">Bowtie 2</option> - <option value="bwa">BWA</option> - <option value="perm">PerM</option> - <option value="picard">Picard</option> - <option value="sam">sam</option> - </select> - <div class="toolParamHelp" style="clear: both;"> - Select the indexers you want to run on the FASTA file after downloading. - </div> - </div> - <h2>Parameters</h2> - <div id="params_URL" class="params-block"> - <div class="form-row"> - <label for="longname">Long Name</label> - <input name="longname" type="text" label="Long Name" /> - <div style="clear: both;"> </div> - <div class="toolParamHelp" style="clear: both;"> - A descriptive name for this build. - </div> - </div> - <div class="form-row"> - <label for="url_dbkey">DB Key</label> - <input name="url_dbkey" type="text" label="DB Key" /> - <div style="clear: both;"> </div> - <div class="toolParamHelp" style="clear: both;"> - The internal DB key for this build. WARNING: Using a value that already exists in one or more .loc files may have unpredictable results. - </div> - </div> - <div id="dlparams"> - <div class="form-row"> - <label for="url">URL</label> - <input name="url" type="text" label="URL" /> - <div style="clear: both;"> </div> - <div class="toolParamHelp" style="clear: both;"> - The URL to download this build from. - </div> - </div> - </div> - </div> - <div id="params_NCBI" class="params-block"> - <div class="form-row"> - <label>Genome:</label> - <div class="form-row-input"> - <input type="text" class="text-and-autocomplete-select ac_input" size="40" name="ncbi_name" id="ncbi_name" value="" /> - </div> - <div class="toolParamHelp" style="clear: both;"> - If you can't find the build you want in this list, open a terminal and execute - <pre>sh cron/updatencbi.sh</pre> - in your galaxy root directory. - </div> - </div> - </div> - <div id="params_Ensembl" class="params-block"> - <div class="form-row"> - <label>Genome:</label> - <div class="form-row-input"> - <select name="ensembl_dbkey" last_selected_value="?"> - %for dbkey in ensembls: - <option value="${dbkey['dbkey']}">${dbkey['dbkey']} - ${dbkey['name']}</option> - %endfor - </select> - </div> - <div class="toolParamHelp" style="clear: both;"> - If you can't find the build you want in this list, open a terminal and execute - <pre>sh cron/updateensembl.sh</pre> - in your galaxy root directory. - </div> - </div> - </div> - <div id="params_UCSC" class="params-block"> - <div class="form-row"> - <label>Genome:</label> - <div class="form-row-input"> - <select name="dbkey" last_selected_value="?"> - %for dbkey in dbkeys: - %if dbkey[0] == last_used_build: - <option value="${dbkey[0]}" selected>${dbkey[1]}</option> - %else: - <option value="${dbkey[0]}">${dbkey[1]}</option> - %endif - %endfor - </select> - </div> - <div class="toolParamHelp" style="clear: both;"> - If you can't find the build you want in this list, open a terminal and execute - <pre>sh cron/updateucsc.sh</pre> - in your galaxy root directory. - </div> - </div> - </div> - <div class="form-row"> - <input type="submit" class="primary-button" name="runtool_btn" value="Download and index"/> - </div> - <script type="text/javascript"> - $(document).ready(function() { - checkDataSource(); - }); - $('#datasource').change(function() { - checkDataSource(); - }); - function checkDataSource() { - var ds = $('#datasource').val(); - $('.params-block').each(function() { - $(this).hide(); - }); - $('#params_' + ds).show(); - }; - - var ac = $('#ncbi_name').autocomplete( $('#ncbi_name'), { minChars: 3, max: 100, url: '${h.url_for( controller='data_admin', action='genome_search' )}' } ); - </script> - </form> -</div> diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 templates/admin/data_admin/download_status.mako --- a/templates/admin/data_admin/download_status.mako +++ /dev/null @@ -1,110 +0,0 @@ -<%namespace file="/library/common/library_item_info.mako" import="render_library_item_info" /> -<%namespace file="/library/common/common.mako" import="render_actions_on_multiple_items" /> -<%namespace file="/library/common/common.mako" import="render_compression_types_help" /> -<%namespace file="/library/common/common.mako" import="common_javascripts" /> - -<%! - def inherit(context): - if context.get('use_panels'): - return '/webapps/galaxy/base_panels.mako' - else: - return '/base.mako' -%> -<%inherit file="${inherit(context)}"/> - -<%def name="init()"> -<% - self.has_left_panel=False - self.has_right_panel=False - self.message_box_visible=False - self.active_view="user" - self.overlay_visible=False - self.has_accessible_datasets = False -%> -</%def> - -## -## Override methods from base.mako and base_panels.mako -## -<%def name="center_panel()"> - <div style="overflow: auto; height: 100%;"> - <div class="page-container" style="padding: 10px;"> - ${render_content()} - </div> - </div> -</%def> -<p>${name} been added to the job queue - %if indexers: - to be indexed with ${indexers} - %endif - </p> -<table id="jobStatus"> -</table> -<p><a href="${h.url_for( controller='data_admin', action='manage_data' )}">Overview</a>.</p> -<p><a href="${h.url_for( controller='data_admin', action='add_genome' )}">Download form</a>.</p> -<script type="text/javascript"> - jobs = ${jsonjobs} - finalstates = new Array('done', 'error', 'ok'); - - function makeHTML(jobrow) { - jc = 'jobrow ' + jobrow['style']; - djid = jobrow['jobid']; - jt = jobrow['type']; - idval = jt + '-job-' + djid; - return '<tr id="' + idval + '" class="' + jc + '" data-status="' + jobrow['status'] + '" data-jobid="' + djid + '" data-jobtype="' + jt + '">' + - '<td style="padding: 0px 5px 0px 30px;">' + jobrow['label'] + '</td>' + - '<td style="padding: 0px 5px;">' + jobrow['status'] + '</td></tr>'; - } - - function checkJobs() { - var alldone = true; - var mainjob; - $('.jobrow').each(function() { - status = $(this).attr('data-status'); - if ($(this).attr('data-jobtype') == 'deferred') { - mainjob = $(this).attr('data-jobid'); - } - if ($.inArray(status, finalstates) == -1) { - alldone = false; - } - }); - if (!alldone) { - checkForNewJobs(mainjob); - $('#jobStatus').delay(3000).queue(function(n) { - checkJobs(); - n(); - }); - } - } - - function checkForNewJobs(mainjob) { - $.get('${h.url_for( controller='data_admin', action='get_jobs' )}', { jobid: mainjob }, function(data) { - jsondata = JSON.parse(data); - for (i in jsondata) { - currentjob = jsondata[i] - if (jobs[i] == undefined) { - $('#jobStatus').append(makeHTML(jsondata[i])); - jobs.push(jsondata[i]); - } - $('#' + currentjob['type'] + '-job-' + currentjob['jobid']).replaceWith(makeHTML(currentjob)); - } - }); - } - - $(document).ready(function() { - for (job in jobs) { - jobrow = jobs[job]; - $('#jobStatus').append(makeHTML(jobrow)); - if (jobrow['type'] == 'deferred') { - $('#jobStatus').delay(5000).queue(function(n) { - checkForNewJobs(jobrow['jobid']); - n(); - }).fadeIn(); - } - } - $('#jobStatus').delay(3000).queue(function(n) { - checkJobs(); - n(); - }).fadeIn(); - }); -</script> \ No newline at end of file diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 templates/admin/data_admin/generic_error.mako --- a/templates/admin/data_admin/generic_error.mako +++ /dev/null @@ -1,35 +0,0 @@ -<%inherit file="/base.mako"/> -<%namespace file="/message.mako" import="render_msg" /> -<%namespace file="/library/common/common.mako" import="common_javascripts" /> - -<%! - def inherit(context): - if context.get('use_panels'): - return '/webapps/galaxy/base_panels.mako' - else: - return '/base.mako' -%> -<%inherit file="${inherit(context)}"/> - -<%def name="init()"> -<% - self.has_left_panel=False - self.has_right_panel=False - self.message_box_visible=False - self.active_view="user" - self.overlay_visible=False - self.has_accessible_datasets = False -%> -</%def> -<%def name="stylesheets()"> - ${parent.stylesheets()} - ${h.css( "autocomplete_tagging" )} -</%def> -<%def name="javascripts()"> - ${parent.javascripts()} - ${h.js("libs/jquery/jquery.autocomplete", "galaxy.autocom_tagging" )} -</%def> -## -## Override methods from base.mako and base_panels.mako -## -<p class="panel-error-message">${message}</p> \ No newline at end of file diff -r 4eeb41f2fa7569e1d8652d9a0be3a1f1d5c6c2c0 -r b6fae2cd673c6a286138b9e4628f7c4ef0abb099 templates/admin/data_admin/local_data.mako --- a/templates/admin/data_admin/local_data.mako +++ /dev/null @@ -1,169 +0,0 @@ -<%inherit file="/base.mako"/> -<%namespace file="/message.mako" import="render_msg" /> -<%namespace file="/library/common/common.mako" import="common_javascripts" /> - -<%! - def inherit(context): - if context.get('use_panels'): - return '/webapps/galaxy/base_panels.mako' - else: - return '/base.mako' -%> -<%inherit file="${inherit(context)}"/> - -<%def name="init()"> -<% - self.has_left_panel=False - self.has_right_panel=False - self.message_box_visible=False - self.active_view="user" - self.overlay_visible=False - self.has_accessible_datasets = False -%> -</%def> -<%def name="stylesheets()"> - ${parent.stylesheets()} - ${h.css( "autocomplete_tagging" )} -</%def> -<%def name="javascripts()"> - ${parent.javascripts()} - ${h.js("libs/jquery/jquery.autocomplete", "galaxy.autocom_tagging" )} -</%def> -## -## Override methods from base.mako and base_panels.mako -## -<%def name="center_panel()"> - <div style="overflow: auto; height: 100%;"> - <div class="page-container" style="padding: 10px;"> - ${render_content()} - </div> - </div> -</%def> -<style type="text/css"> - .params-block { display: none; } - td, th { padding-left: 10px; padding-right: 10px; } - td.state-color-new { text-decoration: underline; } - td.panel-done-message { background-image: none; padding: 0px 10px 0px 10px; } - td.panel-error-message { background-image: none; padding: 0px 10px 0px 10px; } -</style> -<div class="toolForm"> - %if message: - <div class="${status}">${message}</div> - %endif - <div class="toolFormTitle">Currently tracked builds <a class="action-button" href="${h.url_for( controller='data_admin', action='add_genome' )}">Add new</a></div> - <div class="toolFormBody"> - <h2>Locally cached data:</h2> - <h3>NOTE: Indexes generated here will not be reflected in the table until Galaxy is restarted.</h3> - <table id="locfiles"> - <tr> - <th>DB Key</th> - <th>Name</th> - %for label in labels: - <th>${labels[label]}</th> - %endfor - </tr> - %for dbkey in sorted(dbkeys): - <tr> - <td>${dbkey}</td> - <td>${indextable[dbkey]['name']}</td> - %for label in labels: - <td id="${dbkey}-${indexfuncs[label]}" class="indexcell ${styles[indextable[dbkey]['indexes'][label]]}" data-fapath="${indextable[dbkey]['path']}" data-longname="${indextable[dbkey]['name']}" data-index="${indexfuncs[label]}" data-dbkey="${dbkey}">${indextable[dbkey]['indexes'][label]}</td> - %endfor - - </tr> - %endfor - </table> - <h2>Recent jobs:</h2> - <p>Click the job ID to see job details and the status of any individual sub-jobs. Note that this list only shows jobs initiated by your account.</p> - <div id="recentJobs"> - %for job in jobgrid: - <div id="job-${job['deferred']}" data-dbkey="${job['dbkey']}" data-name="${job['intname']}" data-indexes="${job['indexers']}" data-jobid="${job['deferred']}" data-state="${job['state']}" class="historyItem-${job['state']} historyItemWrapper historyItem"> - <p>Job ID <a href="${h.url_for( controller='data_admin', action='monitor_status', job=job['deferred'] )}">${job['deferred']}</a>: - %if job['jobtype'] == 'download': - Download <em>${job['intname']}</em> - %if job['indexers']: - and index with ${job['indexers']} - %endif - %else: - Index <em>${job['intname']}</em> with ${job['indexers']} - %endif - </p> - </div> - %endfor - </div> -</div> -<script type="text/javascript"> - finalstates = new Array('done', 'error', 'ok'); - $('.indexcell').click(function() { - status = $(this).html(); - elem = $(this); - if (status != 'Generate') { - return; - } - longname = $(this).attr('data-longname'); - dbkey = $(this).attr('data-dbkey'); - indexes = $(this).attr('data-index'); - path = $(this).attr('data-fapath'); - $.post('${h.url_for( controller='data_admin', action='index_build' )}', { longname: longname, dbkey: dbkey, indexes: indexes, path: path }, function(data) { - if (data == 'ERROR') { - alert('There was an error.'); - } - else { - elem.html('Generating'); - elem.attr('class', 'indexcell state-color-running'); - } - newhtml = '<div data-dbkey="' + dbkey + '" data-name="' + longname + '" data-indexes="' + indexes + '" id="job-' + data + '" class="historyItem-new historyItemWrapper historyItem">' + - '<p>Job ID <a href="${h.url_for( controller='data_admin', action='monitor_status')}?job=' + data + '">' + data + '</a>: ' + - 'Index <em>' + longname + '</em> with ' + indexes + '</p></div>'; - $('#recentJobs').prepend(newhtml); - $('#job-' + data).delay(3000).queue(function(n) { - checkJob(data); - n(); - }); - }); - }); - - function checkJob(jobid) { - $.get('${h.url_for( controller='data_admin', action='get_jobs' )}', { jobid: jobid }, function(data) { - jsondata = JSON.parse(data)[0]; - jsondata["name"] = $('#job-' + jobid).attr('data-name'); - jsondata["dbkey"] = $('#job-' + jobid).attr('data-dbkey'); - jsondata["indexes"] = $('#job-' + jobid).attr('data-indexes'); - tdid = jq(jsondata["dbkey"] + '-' + jsondata["indexes"]); - newhtml = makeNewHTML(jsondata); - $('#job-' + jobid).replaceWith(newhtml); - if ($.inArray(jsondata["status"], finalstates) == -1) { - $('#job-' + jobid).delay(3000).queue(function(n) { - checkJob(jobid); - n(); - }); - } - if (jsondata["status"] == 'done' || jsondata["status"] == 'ok') { - elem = $(tdid); - elem.html('Generated'); - elem.attr('class', 'indexcell panel-done-message'); - } - }); - } - - function makeNewHTML(jsondata) { - newhtml = '<div data-dbkey="' + jsondata["dbkey"] + '" data-name="' + jsondata["name"] + '" data-indexes="' + jsondata["indexes"] + '" id="job-' + jsondata["jobid"] + '" class="historyItem-' + jsondata["status"] + ' historyItemWrapper historyItem">' + - '<p>Job ID <a href="${h.url_for( controller='data_admin', action='monitor_status')}?job=' + jsondata["jobid"] + '">' + jsondata["jobid"] + '</a>: ' + - 'Index <em>' + jsondata["name"] + '</em> with ' + jsondata["indexes"] + '</p></div>'; - return newhtml; - } - - $(document).ready(function() { - $('.historyItem').each(function() { - state = $(this).attr('data-state'); - jobid = $(this).attr('data-jobid'); - if ($.inArray(state, finalstates) == -1) { - checkJob(jobid); - } - }); - }); - - function jq(id) { - return '#' + id.replace(/(:|\.)/g,'\\$1'); - } -</script> \ No newline at end of file https://bitbucket.org/galaxy/galaxy-central/commits/d9ab8058b274/ Changeset: d9ab8058b274 Branch: stable User: davebgx Date: 2014-07-08 19:21:48 Summary: Merge stables. Affected #: 4 files diff -r c4519bae84d32d638d8fe168c6fbcab40f4e8449 -r d9ab8058b2748de6b4b1a17646c2d7456d709877 lib/galaxy/jobs/__init__.py --- a/lib/galaxy/jobs/__init__.py +++ b/lib/galaxy/jobs/__init__.py @@ -1065,6 +1065,13 @@ out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] ) inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] ) out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] ) + input_ext = 'data' + for _, data in inp_data.items(): + # For loop odd, but sort simulating behavior in galaxy.tools.actions + if not data: + continue + input_ext = data.ext + param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) # why not re-use self.param_dict here? ##dunno...probably should, this causes tools.parameters.basic.UnvalidatedValue to be used in following methods instead of validated and transformed values during i.e. running workflows param_dict = self.tool.params_from_strings( param_dict, self.app ) # Check for and move associated_files @@ -1075,7 +1082,7 @@ # Create generated output children and primary datasets and add to param_dict collected_datasets = { 'children': self.tool.collect_child_datasets(out_data, self.working_directory), - 'primary': self.tool.collect_primary_datasets(out_data, self.working_directory) + 'primary': self.tool.collect_primary_datasets(out_data, self.working_directory, input_ext) } param_dict.update({'__collected_datasets__': collected_datasets}) # Certain tools require tasks to be completed after job execution diff -r c4519bae84d32d638d8fe168c6fbcab40f4e8449 -r d9ab8058b2748de6b4b1a17646c2d7456d709877 lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -2869,12 +2869,12 @@ self.sa_session.flush() return children - def collect_primary_datasets( self, output, job_working_directory ): + def collect_primary_datasets( self, output, job_working_directory, input_ext ): """ Find any additional datasets generated by a tool and attach (for cases where number of outputs is not known in advance). """ - return output_collect.collect_primary_datatasets( self, output, job_working_directory ) + return output_collect.collect_primary_datasets( self, output, job_working_directory, input_ext ) def to_dict( self, trans, link_details=False, io_details=False ): """ Returns dict of tool. """ diff -r c4519bae84d32d638d8fe168c6fbcab40f4e8449 -r d9ab8058b2748de6b4b1a17646c2d7456d709877 lib/galaxy/tools/parameters/output_collect.py --- a/lib/galaxy/tools/parameters/output_collect.py +++ b/lib/galaxy/tools/parameters/output_collect.py @@ -14,7 +14,7 @@ DEFAULT_EXTRA_FILENAME_PATTERN = r"primary_DATASET_ID_(?P<designation>[^_]+)_(?P<visible>[^_]+)_(?P<ext>[^_]+)(_(?P<dbkey>[^_]+))?" -def collect_primary_datatasets( tool, output, job_working_directory ): +def collect_primary_datasets( tool, output, job_working_directory, input_ext ): app = tool.app sa_session = tool.sa_session new_primary_datasets = {} @@ -66,6 +66,8 @@ designation = fields_match.designation visible = fields_match.visible ext = fields_match.ext + if ext == "input": + ext = input_ext dbkey = fields_match.dbkey # Create new primary dataset primary_data = app.model.HistoryDatasetAssociation( extension=ext, diff -r c4519bae84d32d638d8fe168c6fbcab40f4e8449 -r d9ab8058b2748de6b4b1a17646c2d7456d709877 test/functional/tools/multi_output_configured.xml --- a/test/functional/tools/multi_output_configured.xml +++ b/test/functional/tools/multi_output_configured.xml @@ -8,19 +8,24 @@ echo "1" > subdir2/CUSTOM_1.txt; echo "2" > subdir2/CUSTOM_2.tabular; echo "3" > subdir2/CUSTOM_3.txt; + mkdir subdir3; + echo "Foo" > subdir3/Foo; </command><inputs> - <param name="input" type="integer" value="7" /> + <param name="num_param" type="integer" value="7" /> + <param name="input" type="data" /></inputs><outputs><data format="txt" name="report"><discover_datasets pattern="__designation_and_ext__" directory="subdir1" /><discover_datasets pattern="CUSTOM_(?P<designation>.+)\.(?P<ext>.+)" directory="subdir2" /> + <discover_datasets pattern="__designation__" directory="subdir3" ext="input" /></data></outputs><tests><test> - <param name="input" value="7" /> + <param name="num_param" value="7" /> + <param name="input" ftype="txt" value="simple_line.txt"/><output name="report"><assert_contents><has_line line="Hello" /> @@ -37,6 +42,9 @@ <discovered_dataset designation="2" ftype="tabular"><assert_contents><has_line line="2" /></assert_contents></discovered_dataset> + <discovered_dataset designation="Foo" ftype="txt"> + <assert_contents><has_line line="Foo" /></assert_contents> + </discovered_dataset></output></test></tests> https://bitbucket.org/galaxy/galaxy-central/commits/5874f6bc02f9/ Changeset: 5874f6bc02f9 User: davebgx Date: 2014-07-08 19:22:06 Summary: Merge stable. Affected #: 2 files https://bitbucket.org/galaxy/galaxy-central/commits/6b1415cbc145/ Changeset: 6b1415cbc145 Branch: daniel_blanchard/update-drmaa-python-version-in-eggsini-t-1402925026891 User: davebgx Date: 2014-07-08 19:22:35 Summary: Close branch daniel_blanchard/update-drmaa-python-version-in-eggsini-t-1402925026891 Affected #: 0 files Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org