commit/galaxy-central: jgoecks: Trackster visual analytics: enable datasets that cannot be indexed--and hence cannot be subseted--to be used as tool inputs.
1 new changeset in galaxy-central: http://bitbucket.org/galaxy/galaxy-central/changeset/dc8701726b13/ changeset: r5293:dc8701726b13 user: jgoecks date: 2011-03-31 23:22:10 summary: Trackster visual analytics: enable datasets that cannot be indexed--and hence cannot be subseted--to be used as tool inputs. affected #: 1 file (621 bytes) --- a/lib/galaxy/web/controllers/tracks.py Thu Mar 31 15:07:02 2011 -0400 +++ b/lib/galaxy/web/controllers/tracks.py Thu Mar 31 17:22:10 2011 -0400 @@ -670,6 +670,8 @@ if not tool: return messages.NO_TOOL tool_params = dict( [ ( p.name, p.value ) for p in original_job.parameters ] ) + # TODO: need to handle updates to conditional parameters; conditional + # params are stored in dicts (and dicts within dicts). tool_params.update( dict( [ ( key, value ) for key, value in kwargs.items() if key in tool.inputs ] ) ) tool_params = tool.params_from_strings( tool_params, self.app ) @@ -685,13 +687,16 @@ messages_list = [] for jida in original_job.input_datasets: input_dataset = jida.dataset - track_type, data_sources = input_dataset.datatype.get_track_type() - # Convert to datasource that provides 'data' because we need to - # extract the original data. - data_source = data_sources[ 'data' ] - msg = self._convert_dataset( trans, input_dataset, data_source ) - if msg is not None: - messages_list.append( msg ) + # TODO: put together more robust way to determine if a dataset can be indexed. + if hasattr( input_dataset, 'get_track_type' ): + # Can index dataset. + track_type, data_sources = input_dataset.datatype.get_track_type() + # Convert to datasource that provides 'data' because we need to + # extract the original data. + data_source = data_sources[ 'data' ] + msg = self._convert_dataset( trans, input_dataset, data_source ) + if msg is not None: + messages_list.append( msg ) # Return any messages generated during conversions. return_message = _get_highest_priority_msg( messages_list ) @@ -707,38 +712,42 @@ messages_list = [] for jida in original_job.input_datasets: input_dataset = jida.dataset - track_type, data_sources = input_dataset.datatype.get_track_type() - data_source = data_sources[ 'data' ] - converted_dataset = input_dataset.get_converted_dataset( trans, data_source ) + if hasattr( input_dataset, 'get_track_type' ): + # + # Dataset can be indexed and hence a subset can be extracted. + # + track_type, data_sources = input_dataset.datatype.get_track_type() + data_source = data_sources[ 'data' ] + converted_dataset = input_dataset.get_converted_dataset( trans, data_source ) - # - # Create new HDA for input dataset's subset. - # - subset_dataset = trans.app.model.HistoryDatasetAssociation( extension=input_dataset.ext, \ - dbkey=input_dataset.dbkey, \ - create_dataset=True, \ - sa_session=trans.sa_session, - name="Subset [%s:%i-%i] of data %i" % \ - ( chrom, low, high, input_dataset.hid ), - visible=False ) - job_history.add_dataset( subset_dataset ) - trans.sa_session.add( subset_dataset ) - trans.app.security_agent.set_all_dataset_permissions( subset_dataset.dataset, hda_permissions ) + # + # Create new HDA for input dataset's subset. + # + new_dataset = trans.app.model.HistoryDatasetAssociation( extension=input_dataset.ext, \ + dbkey=input_dataset.dbkey, \ + create_dataset=True, \ + sa_session=trans.sa_session, + name="Subset [%s:%i-%i] of data %i" % \ + ( chrom, low, high, input_dataset.hid ), + visible=False ) + job_history.add_dataset( new_dataset ) + trans.sa_session.add( new_dataset ) + trans.app.security_agent.set_all_dataset_permissions( new_dataset.dataset, hda_permissions ) - # Write data subset to new HDA. - data_provider_class = get_data_provider( original_dataset=input_dataset ) - data_provider = data_provider_class( original_dataset=input_dataset, - converted_dataset=converted_dataset ) - data_provider.write_data_to_file( chrom, low, high, subset_dataset.file_name ) + # Write subset of data to new dataset + data_provider_class = get_data_provider( original_dataset=input_dataset ) + data_provider = data_provider_class( original_dataset=input_dataset, + converted_dataset=converted_dataset ) + data_provider.write_data_to_file( chrom, low, high, new_dataset.file_name ) - # TODO: size not working. - subset_dataset.set_size() - subset_dataset.info = "Data subset for trackster" - subset_dataset.set_dataset_state( trans.app.model.Dataset.states.OK ) - trans.sa_session.flush() + # TODO: size not working. + new_dataset.set_size() + new_dataset.info = "Data subset for trackster" + new_dataset.set_dataset_state( trans.app.model.Dataset.states.OK ) + trans.sa_session.flush() - # Add dataset to tool's parameters. - tool_params[ jida.name ] = subset_dataset + # Add dataset to tool's parameters. + tool_params[ jida.name ] = new_dataset # # Start tool and handle outputs. Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
Bitbucket