1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/0b3db8ce29d6/ changeset: 0b3db8ce29d6 user: jgoecks date: 2012-12-07 17:46:03 summary: Visualization framework enhancements: (a) refactor orphan vars and functions into mixins; (b) remove overly-qualified model references; (c) add support for multiple data types, gracefully falling back if a converter is not available; and (d) add bigwig as an index datatype for BAM. affected #: 6 files
diff -r 6591b6ebfb4ea56118941deef03855412c2c00fd -r 0b3db8ce29d68bf7fe8e2a37699f58c2d9fdbddb lib/galaxy/datatypes/binary.py --- a/lib/galaxy/datatypes/binary.py +++ b/lib/galaxy/datatypes/binary.py @@ -239,7 +239,7 @@ except: return "Binary bam alignments file (%s)" % ( data.nice_size( dataset.get_size() ) ) def get_track_type( self ): - return "ReadTrack", {"data": "bai", "index": "summary_tree"} + return "ReadTrack", { "data": "bai", "index": [ "bigwig", "summary_tree" ] }
Binary.register_sniffable_binary_format("bam", "bam", Bam)
diff -r 6591b6ebfb4ea56118941deef03855412c2c00fd -r 0b3db8ce29d68bf7fe8e2a37699f58c2d9fdbddb lib/galaxy/model/__init__.py --- a/lib/galaxy/model/__init__.py +++ b/lib/galaxy/model/__init__.py @@ -872,6 +872,16 @@ PAUSED = 'paused', SETTING_METADATA = 'setting_metadata', FAILED_METADATA = 'failed_metadata' ) + + conversion_messages = Bunch( PENDING = "pending", + NO_DATA = "no data", + NO_CHROMOSOME = "no chromosome", + NO_CONVERTER = "no converter", + NO_TOOL = "no tool", + DATA = "data", + ERROR = "error", + OK = "ok" ) + permitted_actions = get_permitted_actions( filter='DATASET' ) file_path = "/tmp/" object_store = None # This get initialized in mapping.py (method init) by app.py @@ -996,6 +1006,7 @@ class DatasetInstance( object ): """A base class for all 'dataset instances', HDAs, LDAs, etc""" states = Dataset.states + conversion_messages = Dataset.conversion_messages permitted_actions = Dataset.permitted_actions def __init__( self, id=None, hid=None, name=None, info=None, blurb=None, peek=None, tool_version=None, extension=None, dbkey=None, metadata=None, history=None, dataset=None, deleted=False, designation=None, @@ -1170,9 +1181,9 @@ if dep_dataset is None: # None means converter is running first time return None - elif dep_dataset.state == trans.app.model.Job.states.ERROR: + elif dep_dataset.state == Job.states.ERROR: raise ConverterDependencyException("A dependency (%s) was in an error state." % dependency) - elif dep_dataset.state != trans.app.model.Job.states.OK: + elif dep_dataset.state != Job.states.OK: # Pending return None deps[dependency] = dep_dataset @@ -1196,12 +1207,11 @@ for name, value in self.metadata.items(): # HACK: MetadataFile objects do not have a type/ext, so need to use metadata name # to determine type. - if dataset_ext == 'bai' and name == 'bam_index' and isinstance( value, trans.app.model.MetadataFile ): + if dataset_ext == 'bai' and name == 'bam_index' and isinstance( value, MetadataFile ): # HACK: MetadataFile objects cannot be used by tools, so return # a fake HDA that points to metadata file. - fake_dataset = trans.app.model.Dataset( state=trans.app.model.Dataset.states.OK, - external_filename=value.file_name ) - fake_hda = trans.app.model.HistoryDatasetAssociation( dataset=fake_dataset ) + fake_dataset = Dataset( state=Dataset.states.OK, external_filename=value.file_name ) + fake_hda = HistoryDatasetAssociation( dataset=fake_dataset ) return fake_hda def clear_associated_files( self, metadata_safe = False, purge = False ): raise 'Unimplemented' @@ -1311,16 +1321,26 @@ track_type, data_sources = self.datatype.get_track_type() data_sources_dict = {} msg = None - for source_type, data_source in data_sources.iteritems(): + for source_type, source_list in data_sources.iteritems(): + data_source = None if source_type == "data_standalone": # Nothing to do. msg = None + data_source = source_list else: # Convert. - msg = self.convert_dataset( trans, data_source ) + if isinstance( source_list, str ): + source_list = [ source_list ] + + # Loop through sources until viable one is found. + for source in source_list: + msg = self.convert_dataset( trans, source ) + if msg == self.conversion_messages.PENDING: + data_source = source + break
# Store msg. - data_sources_dict[ source_type ] = { "name" : data_source, "message": msg } + data_sources_dict[ source_type ] = { "name": data_source, "message": msg }
return data_sources_dict
@@ -1331,35 +1351,23 @@ was converted successfully. """
- # FIXME: copied from controller.py - messages = Bunch( - PENDING = "pending", - NO_DATA = "no data", - NO_CHROMOSOME = "no chromosome", - NO_CONVERTER = "no converter", - NO_TOOL = "no tool", - DATA = "data", - ERROR = "error", - OK = "ok" - ) - # Get converted dataset; this will start the conversion if necessary. try: converted_dataset = self.get_converted_dataset( trans, target_type ) except NoConverterException: - return messages.NO_CONVERTER + return self.conversion_messages.NO_CONVERTER except ConverterDependencyException, dep_error: - return { 'kind': messages.ERROR, 'message': dep_error.value } + return { 'kind': self.conversion_messages.ERROR, 'message': dep_error.value }
# Check dataset state and return any messages. msg = None - if converted_dataset and converted_dataset.state == trans.app.model.Dataset.states.ERROR: - job_id = trans.sa_session.query( trans.app.model.JobToOutputDatasetAssociation ) \ + if converted_dataset and converted_dataset.state == Dataset.states.ERROR: + job_id = trans.sa_session.query( JobToOutputDatasetAssociation ) \ .filter_by( dataset_id=converted_dataset.id ).first().job_id - job = trans.sa_session.query( trans.app.model.Job ).get( job_id ) - msg = { 'kind': messages.ERROR, 'message': job.stderr } - elif not converted_dataset or converted_dataset.state != trans.app.model.Dataset.states.OK: - msg = messages.PENDING + job = trans.sa_session.query( Job ).get( job_id ) + msg = { 'kind': self.conversion_messages.ERROR, 'message': job.stderr } + elif not converted_dataset or converted_dataset.state != Dataset.states.OK: + msg = self.conversion_messages.PENDING
return msg
@@ -2414,7 +2422,7 @@ elif not trans.app.config.smtp_server: comments = "Email notification failed as SMTP server not set in config file" if comments: - event = trans.app.model.RequestEvent( self, self.state, comments ) + event = RequestEvent( self, self.state, comments ) trans.sa_session.add( event ) trans.sa_session.flush() return comments
diff -r 6591b6ebfb4ea56118941deef03855412c2c00fd -r 0b3db8ce29d68bf7fe8e2a37699f58c2d9fdbddb lib/galaxy/visualization/data_providers/registry.py --- a/lib/galaxy/visualization/data_providers/registry.py +++ b/lib/galaxy/visualization/data_providers/registry.py @@ -1,6 +1,9 @@ from galaxy.visualization.data_providers.basic import ColumnDataProvider -from galaxy.visualization.data_providers.genome import * +from galaxy.visualization.data_providers import genome +from galaxy.model import NoConverterException from galaxy.visualization.data_providers.phyloviz import PhylovizDataProvider +from galaxy.datatypes.tabular import Tabular, Vcf +from galaxy.datatypes.interval import Interval, ENCODEPeak, ChromatinInteractions, Gtf, Gff, Bed from galaxy.datatypes.xml import Phyloxml from galaxy.datatypes.data import Newick, Nexus
@@ -15,20 +18,20 @@ # is original dataset type. self.dataset_type_name_to_data_provider = { "tabix": { - Vcf: VcfTabixDataProvider, - Bed: BedTabixDataProvider, - Gtf: GtfTabixDataProvider, - ENCODEPeak: ENCODEPeakTabixDataProvider, - Interval: IntervalTabixDataProvider, - ChromatinInteractions: ChromatinInteractionsTabixDataProvider, - "default" : TabixDataProvider + Vcf: genome.VcfTabixDataProvider, + Bed: genome.BedTabixDataProvider, + Gtf: genome.GtfTabixDataProvider, + ENCODEPeak: genome.ENCODEPeakTabixDataProvider, + Interval: genome.IntervalTabixDataProvider, + ChromatinInteractions: genome.ChromatinInteractionsTabixDataProvider, + "default" : genome.TabixDataProvider }, - "interval_index": IntervalIndexDataProvider, - "bai": BamDataProvider, - "bam": SamDataProvider, - "summary_tree": SummaryTreeDataProvider, - "bigwig": BigWigDataProvider, - "bigbed": BigBedDataProvider + "interval_index": genome.IntervalIndexDataProvider, + "bai": genome.BamDataProvider, + "bam": genome.SamDataProvider, + "summary_tree": genome.SummaryTreeDataProvider, + "bigwig": genome.BigWigDataProvider, + "bigbed": genome.BigBedDataProvider }
def get_data_provider( self, trans, name=None, source='data', raw=False, original_dataset=None ): @@ -41,15 +44,15 @@ if raw: # Working with raw data. if isinstance( original_dataset.datatype, Gff ): - data_provider_class = RawGFFDataProvider + data_provider_class = genome.RawGFFDataProvider elif isinstance( original_dataset.datatype, Bed ): - data_provider_class = RawBedDataProvider + data_provider_class = genome.RawBedDataProvider elif isinstance( original_dataset.datatype, Vcf ): - data_provider_class = RawVcfDataProvider + data_provider_class = genome.RawVcfDataProvider elif isinstance( original_dataset.datatype, Tabular ): - data_provider_class = ColumnDataProvider + data_provider_class = genome.ColumnDataProvider elif isinstance( original_dataset.datatype, ( Nexus, Newick, Phyloxml ) ): - data_provider_class = PhylovizDataProvider + data_provider_class = genome.PhylovizDataProvider
data_provider = data_provider_class( original_dataset=original_dataset )
@@ -87,10 +90,20 @@ # Get data provider mapping and data provider. _ , data_provider_mapping = original_dataset.datatype.get_track_type() if 'data_standalone' in data_provider_mapping: - data_provider_name = data_provider_mapping[ 'data_standalone' ] + data_provider = self.get_data_provider( trans, + name=data_provider_mapping[ 'data_standalone' ], + original_dataset=original_dataset ) else: - data_provider_name = data_provider_mapping[ source ] - - data_provider = self.get_data_provider( trans, name=data_provider_name, original_dataset=original_dataset ) + source_list = data_provider_mapping[ source ] + if isinstance( source_list, str ): + source_list = [ source_list ] + + # Find a valid data provider in the source list. + for source in source_list: + try: + data_provider = self.get_data_provider( trans, name=source, original_dataset=original_dataset ) + break + except NoConverterException: + pass
return data_provider \ No newline at end of file
diff -r 6591b6ebfb4ea56118941deef03855412c2c00fd -r 0b3db8ce29d68bf7fe8e2a37699f58c2d9fdbddb lib/galaxy/web/base/controller.py --- a/lib/galaxy/web/base/controller.py +++ b/lib/galaxy/web/base/controller.py @@ -190,37 +190,6 @@ # -- Mixins for working with Galaxy objects. -- #
-# Message strings returned to browser -messages = Bunch( - PENDING = "pending", - NO_DATA = "no data", - NO_CHROMOSOME = "no chromosome", - NO_CONVERTER = "no converter", - NO_TOOL = "no tool", - DATA = "data", - ERROR = "error", - OK = "ok" -) - -def get_highest_priority_msg( message_list ): - """ - Returns highest priority message from a list of messages. - """ - return_message = None - - # For now, priority is: job error (dict), no converter, pending. - for message in message_list: - if message is not None: - if isinstance(message, dict): - return_message = message - break - elif message == messages.NO_CONVERTER: - return_message = message - elif return_message == None and message == messages.PENDING: - return_message = message - return return_message - - class SharableItemSecurityMixin: """ Mixin for handling security for sharable items. """ def security_check( self, trans, item, check_ownership=False, check_accessible=False ): @@ -313,11 +282,11 @@ Returns a message if dataset is not ready to be used in visualization. """ if not dataset: - return messages.NO_DATA + return dataset.conversion_messages.NO_DATA if dataset.state == trans.app.model.Job.states.ERROR: - return messages.ERROR + return dataset.conversion_messages.ERROR if dataset.state != trans.app.model.Job.states.OK: - return messages.PENDING + return dataset.conversion_messages.PENDING return None
@@ -624,7 +593,7 @@
# If there are no messages (messages indicate data is not ready/available), get data. messages_list = [ data_source_dict[ 'message' ] for data_source_dict in data_sources.values() ] - message = get_highest_priority_msg( messages_list ) + message = self._get_highest_priority_msg( messages_list ) if message: rval = message else: @@ -646,6 +615,27 @@
return rval
+ # FIXME: this method probably belongs down in the model.Dataset class. + def _get_highest_priority_msg( self, message_list ): + """ + Returns highest priority message from a list of messages. + """ + return_message = None + + # For now, priority is: job error (dict), no converter, pending. + for message in message_list: + if message is not None: + if isinstance(message, dict): + return_message = message + break + elif message == "no converter": + return_message = message + elif return_message == None and message == "pending": + return_message = message + return return_message + + + class UsesStoredWorkflowMixin( SharableItemSecurityMixin ): """ Mixin for controllers that use StoredWorkflow objects. """ def get_stored_workflow( self, trans, id, check_ownership=True, check_accessible=False ):
diff -r 6591b6ebfb4ea56118941deef03855412c2c00fd -r 0b3db8ce29d68bf7fe8e2a37699f58c2d9fdbddb lib/galaxy/webapps/galaxy/api/datasets.py --- a/lib/galaxy/webapps/galaxy/api/datasets.py +++ b/lib/galaxy/webapps/galaxy/api/datasets.py @@ -4,7 +4,7 @@ import logging, os, string, shutil, urllib, re, socket from galaxy import util, datatypes, jobs, web, util from galaxy.visualization.data_providers.genome import FeatureLocationIndexDataProvider -from galaxy.web.base.controller import BaseAPIController, UsesVisualizationMixin, get_highest_priority_msg, messages +from galaxy.web.base.controller import BaseAPIController, UsesVisualizationMixin from galaxy.web.framework.helpers import is_true
log = logging.getLogger( __name__ ) @@ -64,7 +64,7 @@
msg = self.check_dataset_state( trans, dataset ) if not msg: - msg = messages.DATA + msg = dataset.conversion_messages.DATA
return msg
@@ -80,7 +80,7 @@ # Get datasources and check for messages (which indicate errors). Retry if flag is set. data_sources = dataset.get_datasources( trans ) messages_list = [ data_source_dict[ 'message' ] for data_source_dict in data_sources.values() ] - msg = get_highest_priority_msg( messages_list ) + msg = self._get_highest_priority_msg( messages_list ) if msg: if retry: # Clear datasources and then try again. @@ -92,12 +92,12 @@ # If there is a chrom, check for data on the chrom. if chrom: data_provider_registry = trans.app.data_provider_registry - data_provider = trans.app.data_provider_registry.get_data_provider( trans, original_dataset= dataset, source='index' ) + data_provider = trans.app.data_provider_registry.get_data_provider( trans, original_dataset=dataset, source='index' ) if not data_provider.has_data( chrom ): - return messages.NO_DATA + return dataset.conversion_messages.NO_DATA
# Have data if we get here - return { "status": messages.DATA, "valid_chroms": None } + return { "status": dataset.conversion_messages.DATA, "valid_chroms": None }
def _search_features( self, trans, dataset, query ): """ @@ -121,17 +121,17 @@
# Parameter check. if not chrom: - return messages.NO_DATA + return dataset.conversion_messages.NO_DATA
# Dataset check. msg = self.check_dataset_state( trans, dataset ) if msg: return msg
- # Get datasources and check for messages. + # Get datasources and check for essages. data_sources = dataset.get_datasources( trans ) messages_list = [ data_source_dict[ 'message' ] for data_source_dict in data_sources.values() ] - return_message = get_highest_priority_msg( messages_list ) + return_message = self._get_highest_priority_msg( messages_list ) if return_message: return return_message
diff -r 6591b6ebfb4ea56118941deef03855412c2c00fd -r 0b3db8ce29d68bf7fe8e2a37699f58c2d9fdbddb lib/galaxy/webapps/galaxy/api/tools.py --- a/lib/galaxy/webapps/galaxy/api/tools.py +++ b/lib/galaxy/webapps/galaxy/api/tools.py @@ -1,5 +1,5 @@ from galaxy import web, util -from galaxy.web.base.controller import BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesVisualizationMixin, messages, get_highest_priority_msg +from galaxy.web.base.controller import BaseAPIController, UsesHistoryDatasetAssociationMixin, UsesVisualizationMixin from galaxy.visualization.genome.visual_analytics import get_dataset_job from galaxy.visualization.genomes import GenomeRegion from galaxy.util.json import to_json_string, from_json_string @@ -57,7 +57,7 @@ tool_id = payload[ 'tool_id' ] tool = trans.app.toolbox.get_tool( tool_id ) if not tool: - return { "message": { "type": "error", "text" : messages.NO_TOOL } } + return { "message": { "type": "error", "text" : trans.app.model.Dataset.conversion_messages.NO_TOOL } }
# Set running history from payload parameters. # History not set correctly as part of this API call for @@ -123,7 +123,7 @@ # tool = trans.app.toolbox.get_tool( tool_id ) if not tool: - return messages.NO_TOOL + return trans.app.model.Dataset.conversion_messages.NO_TOOL
# HACK: add run button so that tool.handle_input will run tool. kwargs['runtool_btn'] = 'Execute' @@ -199,7 +199,7 @@ original_job = get_dataset_job( original_dataset ) tool = trans.app.toolbox.get_tool( original_job.tool_id ) if not tool: - return messages.NO_TOOL + return trans.app.model.Dataset.conversion_messages.NO_TOOL tool_params = dict( [ ( p.name, p.value ) for p in original_job.parameters ] )
# TODO: rather than set new inputs using dict of json'ed value, unpack parameters and set using set_param_value below. @@ -226,7 +226,7 @@ messages_list.append( msg )
# Return any messages generated during conversions. - return_message = get_highest_priority_msg( messages_list ) + return_message = self._get_highest_priority_msg( messages_list ) if return_message: return to_json_string( return_message )
@@ -290,7 +290,7 @@ break
if not success: - return messages.ERROR + return trans.app.model.Dataset.conversion_messages.ERROR
# # Set input datasets for tool. If running on regions, extract and use subset
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
galaxy-commits@lists.galaxyproject.org