details: http://www.bx.psu.edu/hg/galaxy/rev/0984c3800775 changeset: 3022:0984c3800775 user: Greg Von Kuster <greg@bx.psu.edu> date: Thu Nov 12 15:25:48 2009 -0500 description: Eliminate the _monkeypatch_session_method from assignmapper by cleaning up remaining object flushes. The _monkeypatch_query_method remains due to a single object query in ~/datatypes/metadata.py in the FileParameter.wrap() method. The sqlalchemy session is now also passed into the __init__ methods for both HistoryDatasetAssociation and LibraryDatasetDatasetAssociation when the create_dataset param is True to enable methods in the DatasetInstance class to correctly add the created dataset to the sqlalchemy session and flush it. diffstat: lib/galaxy/datatypes/metadata.py | 25 +++++++----- lib/galaxy/model/__init__.py | 65 +++++++++++++++++++++----------- lib/galaxy/model/mapping_tests.py | 5 +- lib/galaxy/model/orm/ext/assignmapper.py | 18 +-------- lib/galaxy/tools/__init__.py | 15 ++++++- lib/galaxy/tools/actions/__init__.py | 2 +- lib/galaxy/tools/actions/upload_common.py | 8 ++- lib/galaxy/tools/parameters/basic.py | 2 +- lib/galaxy/web/controllers/async.py | 2 +- lib/galaxy/web/controllers/requests.py | 3 +- lib/galaxy/web/controllers/root.py | 7 +++- tools/data_source/microbial_import_code.py | 2 +- tools/maf/maf_to_bed_code.py | 2 +- 13 files changed, 94 insertions(+), 62 deletions(-) diffs (505 lines): diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/datatypes/metadata.py --- a/lib/galaxy/datatypes/metadata.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/datatypes/metadata.py Thu Nov 12 15:25:48 2009 -0500 @@ -4,6 +4,7 @@ from galaxy.util.odict import odict from galaxy.web import form_builder import galaxy.model +from sqlalchemy.orm import object_session import pkg_resources pkg_resources.require("simplejson") @@ -298,7 +299,6 @@ if not isinstance( value, list ): return [value] return value - class DBKeyParameter( SelectParameter ): def get_html_field( self, value=None, context={}, other_values={}, values=None, **kwd): try: @@ -387,26 +387,28 @@ return "<div>No display available for Metadata Files</div>" def wrap( self, value ): + if value is None: + return None if isinstance( value, galaxy.model.MetadataFile ) or isinstance( value, MetadataTempFile ): return value if DATABASE_CONNECTION_AVAILABLE: try: - # FIXME: GVK ( 11/11/09 ) had to add the monkey patch back into assignmapper for the get - # method for this since Metadata has no hook into mapping.context ( the salalchemy session ). + # FIXME: this query requires a monkey patch in assignmapper.py since + # MetadataParameters do not have a handle to the sqlalchemy session return galaxy.model.MetadataFile.get( value ) except: #value was not a valid id return None - elif value is not None: + else: mf = galaxy.model.MetadataFile() mf.id = value #we assume this is a valid id, since we cannot check it return mf - return None - def make_copy( self, value, target_context = None, source_context = None ): + def make_copy( self, value, target_context, source_context ): value = self.wrap( value ) if value: new_value = galaxy.model.MetadataFile( dataset = target_context.parent, name = self.spec.name ) - new_value.flush() + object_session( target_context.parent ).add( new_value ) + object_session( target_context.parent ).flush() shutil.copy( value.file_name, new_value.file_name ) return self.unwrap( new_value ) return None @@ -441,7 +443,8 @@ def new_file( self, dataset = None, **kwds ): if DATABASE_CONNECTION_AVAILABLE: mf = galaxy.model.MetadataFile( name = self.spec.name, dataset = dataset, **kwds ) - mf.flush() #flush to assign id + object_session( dataset ).add( mf ) + object_session( dataset ).flush() #flush to assign id return mf else: #we need to make a tmp file that is accessable to the head node, @@ -557,7 +560,8 @@ #file to store kwds passed to set_meta() metadata_files.filename_kwds = relpath( tempfile.NamedTemporaryFile( dir = tmp_dir, prefix = "metadata_kwds_%s_" % key ).name ) simplejson.dump( kwds, open( metadata_files.filename_kwds, 'wb+' ), ensure_ascii=True ) - metadata_files.flush() + sa_session.add( metadata_files ) + sa_session.flush() metadata_files_list.append( metadata_files ) #return command required to build return "%s %s %s %s %s %s" % ( os.path.join( exec_dir, 'set_metadata.sh' ), dataset_files_path, tmp_dir, config_root, datatypes_config, " ".join( map( __metadata_files_list_to_cmd_line, metadata_files_list ) ) ) @@ -586,4 +590,5 @@ def set_job_runner_external_pid( self, pid, sa_session ): for metadata_files in sa_session.query( galaxy.model.Job ).get( self.job_id ).external_output_metadata: metadata_files.job_runner_external_pid = pid - metadata_files.flush() + sa_session.add( metadata_files ) + sa_session.flush() diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/model/__init__.py --- a/lib/galaxy/model/__init__.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/model/__init__.py Thu Nov 12 15:25:48 2009 -0500 @@ -17,6 +17,7 @@ from galaxy.web.form_builder import * import logging log = logging.getLogger( __name__ ) +from sqlalchemy.orm import object_session datatypes_registry = galaxy.datatypes.registry.Registry() #Default Value Required for unit tests @@ -205,7 +206,8 @@ def add_dataset( self, dataset, parent_id=None, genome_build=None, set_hid = True ): if isinstance( dataset, Dataset ): dataset = HistoryDatasetAssociation( dataset = dataset, copied_from = dataset ) - dataset.flush() + object_session( self ).add( dataset ) + object_session( self ).flush() elif not isinstance( dataset, HistoryDatasetAssociation ): raise TypeError, "You can only add Dataset and HistoryDatasetAssociation instances to a history ( you tried to add %s )." % str( dataset ) if parent_id: @@ -229,7 +231,8 @@ if not target_user: target_user = self.user new_history = History( name=name, user=target_user ) - new_history.flush() + object_session( self ).add( new_history ) + object_session( self ).flush() if activatable: hdas = self.activatable_datasets else: @@ -237,9 +240,11 @@ for hda in hdas: new_hda = hda.copy( copy_children=True, target_history=new_history ) new_history.add_dataset( new_hda, set_hid = False ) - new_hda.flush() + object_session( self ).add( new_hda ) + object_session( self ).flush() new_history.hid_counter = self.hid_counter - new_history.flush() + object_session( self ).add( new_history ) + object_session( self ).flush() return new_history @property def activatable_datasets( self ): @@ -439,7 +444,7 @@ permitted_actions = Dataset.permitted_actions def __init__( self, id=None, hid=None, name=None, info=None, blurb=None, peek=None, extension=None, dbkey=None, metadata=None, history=None, dataset=None, deleted=False, designation=None, - parent_id=None, validation_errors=None, visible=True, create_dataset=False ): + parent_id=None, validation_errors=None, visible=True, create_dataset=False, sa_session=None ): self.name = name or "Unnamed dataset" self.id = id self.info = info @@ -454,8 +459,10 @@ self.visible = visible # Relationships if not dataset and create_dataset: + # Had to pass the sqlalchemy session in order to create a new dataset dataset = Dataset( state=Dataset.states.NEW ) - dataset.flush() + sa_session.add( dataset ) + sa_session.flush() self.dataset = dataset self.parent_id = parent_id self.validation_errors = validation_errors @@ -466,7 +473,8 @@ return self.dataset.state def set_dataset_state ( self, state ): self.dataset.state = state - self.dataset.flush() #flush here, because hda.flush() won't flush the Dataset object + object_session( self ).add( self.dataset ) + object_session( self ).flush() #flush here, because hda.flush() won't flush the Dataset object state = property( get_dataset_state, set_dataset_state ) def get_file_name( self ): return self.dataset.get_file_name() @@ -616,8 +624,11 @@ history = None, copied_from_history_dataset_association = None, copied_from_library_dataset_dataset_association = None, + sa_session = None, **kwd ): - DatasetInstance.__init__( self, **kwd ) + # FIXME: sa_session is must be passed to DataSetInstance if the create_dataset + # parameter is True so that the new object can be flushed. Is there a better way? + DatasetInstance.__init__( self, sa_session=sa_session, **kwd ) self.hid = hid # Relationships self.history = history @@ -637,7 +648,8 @@ parent_id=parent_id, copied_from_history_dataset_association=self, history = target_history ) - hda.flush() + object_session( self ).add( hda ) + object_session( self ).flush() hda.set_size() # Need to set after flushed, as MetadataFiles require dataset.id hda.metadata = self.metadata @@ -647,7 +659,7 @@ if not self.datatype.copy_safe_peek: # In some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs hda.set_peek() - hda.flush() + object_session( self ).flush() return hda def to_library_dataset_dataset_association( self, target_folder, replace_dataset=None, parent_id=None, user=None ): if replace_dataset: @@ -657,7 +669,8 @@ # If replace_dataset is None, the Library level permissions will be taken from the folder and applied to the new # LibraryDataset, and the current user's DefaultUserPermissions will be applied to the associated Dataset. library_dataset = LibraryDataset( folder=target_folder, name=self.name, info=self.info ) - library_dataset.flush() + object_session( self ).add( library_dataset ) + object_session( self ).flush() if not user: user = self.history.user ldda = LibraryDatasetDatasetAssociation( name=self.name, @@ -673,15 +686,18 @@ parent_id=parent_id, copied_from_history_dataset_association=self, user=user ) - ldda.flush() + object_session( self ).add( ldda ) + object_session( self ).flush() # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset # Must set metadata after ldda flushed, as MetadataFiles require ldda.id ldda.metadata = self.metadata if not replace_dataset: target_folder.add_library_dataset( library_dataset, genome_build=ldda.dbkey ) - target_folder.flush() + object_session( self ).add( target_folder ) + object_session( self ).flush() library_dataset.library_dataset_dataset_association_id = ldda.id - library_dataset.flush() + object_session( self ).add( library_dataset ) + object_session( self ).flush() for child in self.children: child_copy = child.to_library_dataset_dataset_association( target_folder=target_folder, replace_dataset=replace_dataset, @@ -690,7 +706,7 @@ if not self.datatype.copy_safe_peek: # In some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs ldda.set_peek() - ldda.flush() + object_session( self ).flush() return ldda def clear_associated_files( self, metadata_safe = False, purge = False ): # metadata_safe = True means to only clear when assoc.metadata_safe == False @@ -814,8 +830,8 @@ def set_library_dataset_dataset_association( self, ldda ): self.library_dataset_dataset_association = ldda ldda.library_dataset = self - ldda.flush() - self.flush() + object_session( self ).add_all( ( ldda, self ) ) + object_session( self ).flush() def get_info( self ): if self.library_dataset_dataset_association: return self.library_dataset_dataset_association.info @@ -853,8 +869,11 @@ copied_from_library_dataset_dataset_association=None, library_dataset=None, user=None, + sa_session=None, **kwd ): - DatasetInstance.__init__( self, **kwd ) + # FIXME: sa_session is must be passed to DataSetInstance if the create_dataset + # parameter in kwd is True so that the new object can be flushed. Is there a better way? + DatasetInstance.__init__( self, sa_session=sa_session, **kwd ) self.copied_from_history_dataset_association = copied_from_history_dataset_association self.copied_from_library_dataset_dataset_association = copied_from_library_dataset_dataset_association self.library_dataset = library_dataset @@ -872,7 +891,8 @@ parent_id=parent_id, copied_from_library_dataset_dataset_association=self, history=target_history ) - hda.flush() + object_session( self ).add( hda ) + object_session( self ).flush() hda.metadata = self.metadata #need to set after flushed, as MetadataFiles require dataset.id if add_to_history and target_history: target_history.add_dataset( hda ) @@ -880,7 +900,7 @@ child_copy = child.to_history_dataset_association( target_history = target_history, parent_id = hda.id, add_to_history = False ) if not self.datatype.copy_safe_peek: hda.set_peek() #in some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs - hda.flush() + object_session( self ).flush() return hda def copy( self, copy_children = False, parent_id = None, target_folder = None ): ldda = LibraryDatasetDatasetAssociation( name=self.name, @@ -895,7 +915,8 @@ parent_id=parent_id, copied_from_library_dataset_dataset_association=self, folder=target_folder ) - ldda.flush() + object_session( self ).add( ldda ) + object_session( self ).flush() # Need to set after flushed, as MetadataFiles require dataset.id ldda.metadata = self.metadata if copy_children: @@ -904,7 +925,7 @@ if not self.datatype.copy_safe_peek: # In some instances peek relies on dataset_id, i.e. gmaj.zip for viewing MAFs ldda.set_peek() - ldda.flush() + object_session( self ).flush() return ldda def clear_associated_files( self, metadata_safe = False, purge = False ): return diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/model/mapping_tests.py --- a/lib/galaxy/model/mapping_tests.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/model/mapping_tests.py Thu Nov 12 15:25:48 2009 -0500 @@ -15,11 +15,12 @@ #h1.queries.append( model.Query( "h1->q1" ) ) #h1.queries.append( model.Query( "h1->q2" ) ) h2 = model.History( name=( "H" * 1024 ) ) + model.session.add_all( ( u, h1, h2 ) ) #q1 = model.Query( "h2->q1" ) - d1 = model.HistoryDatasetAssociation( extension="interval", metadata=dict(chromCol=1,startCol=2,endCol=3 ), history=h2, create_dataset=True ) + d1 = model.HistoryDatasetAssociation( extension="interval", metadata=dict(chromCol=1,startCol=2,endCol=3 ), history=h2, create_dataset=True, sa_session=model.session ) #h2.queries.append( q1 ) #h2.queries.append( model.Query( "h2->q2" ) ) - model.session.add_all( ( u, h1, h2, d1 ) ) + model.session.add( ( d1 ) ) model.session.flush() model.session.expunge_all() # Check diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/model/orm/ext/assignmapper.py --- a/lib/galaxy/model/orm/ext/assignmapper.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/model/orm/ext/assignmapper.py Thu Nov 12 15:25:48 2009 -0500 @@ -19,7 +19,6 @@ from sqlalchemy.orm import mapper as sqla_mapper def _monkeypatch_query_method( name, session, class_ ): - # TODO: eliminate this method by fixing the single query in ~/datatypes/metadata.py ( line 396 ) def do(self, *args, **kwargs): return getattr( class_.query, name)(*args, **kwargs) try: @@ -28,20 +27,6 @@ pass if not hasattr(class_, name): setattr(class_, name, classmethod(do)) -def _monkeypatch_session_method( name, session, class_ ): - # TODO: eliminate this method by fixing the session flushes in ~/model/__init__.py ( 20 of them ) - # and ~/datatypes/metadata.py ( 4 of them ). The affected objects have no known hook into mapping.context - # ( i.e., sqlalchemy session ). - def do( self, *args, **kwargs ): - if self not in session.deleted: - session.add( self ) - return session.flush() - try: - do.__name__ = name - except: - pass - if not hasattr( class_, name ): - setattr( class_, name, do ) def session_mapper( scoped_session, class_, *args, **kwargs ): def mapper( cls, *arg, **kw ): validate = kw.pop( 'validate', False ) @@ -54,8 +39,9 @@ setattr( self, key, value ) cls.__init__ = __init__ cls.query = scoped_session.query_property() + # FIXME: eliminate the need for the following monkey patch by fixing the single + # query in ~/datatypes/metadata.py in the FileParameter.wrap() method _monkeypatch_query_method( 'get', scoped_session, cls ) - _monkeypatch_session_method( 'flush', scoped_session, cls ) return sqla_mapper( cls, *arg, **kw ) return mapper( class_, *args, **kwargs ) def assign_mapper( session, class_, *args, **kwargs ): diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/tools/__init__.py Thu Nov 12 15:25:48 2009 -0500 @@ -1495,7 +1495,13 @@ if visible == "visible": visible = True else: visible = False ext = fields.pop(0).lower() - child_dataset = self.app.model.HistoryDatasetAssociation( extension=ext, parent_id=outdata.id, designation=designation, visible=visible, dbkey=outdata.dbkey, create_dataset=True ) + child_dataset = self.app.model.HistoryDatasetAssociation( extension=ext, + parent_id=outdata.id, + designation=designation, + visible=visible, + dbkey=outdata.dbkey, + create_dataset=True, + sa_session=self.sa_session ) self.app.security_agent.copy_dataset_permissions( outdata.dataset, child_dataset.dataset ) # Move data from temp location to dataset location shutil.move( filename, child_dataset.file_name ) @@ -1548,7 +1554,12 @@ if fields: dbkey = fields[ 0 ] # Create new primary dataset - primary_data = self.app.model.HistoryDatasetAssociation( extension=ext, designation=designation, visible=visible, dbkey=dbkey, create_dataset=True ) + primary_data = self.app.model.HistoryDatasetAssociation( extension=ext, + designation=designation, + visible=visible, + dbkey=dbkey, + create_dataset=True, + sa_session=self.sa_session ) self.app.security_agent.copy_dataset_permissions( outdata.dataset, primary_data.dataset ) self.sa_session.add( primary_data ) self.sa_session.flush() diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/tools/actions/__init__.py --- a/lib/galaxy/tools/actions/__init__.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/tools/actions/__init__.py Thu Nov 12 15:25:48 2009 -0500 @@ -198,7 +198,7 @@ if check is not None: if str( getattr( check, when_elem.get( 'attribute' ) ) ) == when_elem.get( 'value', None ): ext = when_elem.get( 'format', ext ) - data = trans.app.model.HistoryDatasetAssociation( extension=ext, create_dataset=True ) + data = trans.app.model.HistoryDatasetAssociation( extension=ext, create_dataset=True, sa_session=trans.sa_session ) # Commit the dataset immediately so it gets database assigned unique id trans.sa_session.add( data ) trans.sa_session.flush() diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/tools/actions/upload_common.py --- a/lib/galaxy/tools/actions/upload_common.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/tools/actions/upload_common.py Thu Nov 12 15:25:48 2009 -0500 @@ -112,7 +112,8 @@ extension = uploaded_dataset.file_type, dbkey = uploaded_dataset.dbkey, history = trans.history, - create_dataset = True ) + create_dataset = True, + sa_session = trans.sa_session ) if state: hda.state = state else: @@ -159,13 +160,14 @@ dbkey = uploaded_dataset.dbkey, library_dataset = ld, user = trans.user, - create_dataset = True ) + create_dataset = True, + sa_session = trans.sa_session ) + trans.sa_session.add( ldda ) if state: ldda.state = state else: ldda.state = ldda.states.QUEUED ldda.message = library_bunch.message - trans.sa_session.add( ldda ) trans.sa_session.flush() # Permissions must be the same on the LibraryDatasetDatasetAssociation and the associated LibraryDataset trans.app.security_agent.copy_library_permissions( ld, ldda ) diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/tools/parameters/basic.py --- a/lib/galaxy/tools/parameters/basic.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/tools/parameters/basic.py Thu Nov 12 15:25:48 2009 -0500 @@ -730,7 +730,7 @@ >>> hist = History() >>> sa_session.add( hist ) >>> sa_session.flush() - >>> hist.add_dataset( HistoryDatasetAssociation( id=1, extension='interval', create_dataset=True ) ) + >>> hist.add_dataset( HistoryDatasetAssociation( id=1, extension='interval', create_dataset=True, sa_session=sa_session ) ) >>> dtp = DataToolParameter( None, XML( '<param name="blah" type="data" format="interval"/>' ) ) >>> print dtp.name blah diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/web/controllers/async.py --- a/lib/galaxy/web/controllers/async.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/web/controllers/async.py Thu Nov 12 15:25:48 2009 -0500 @@ -103,7 +103,7 @@ #data.state = jobs.JOB_OK #history.datasets.add_dataset( data ) - data = trans.app.model.HistoryDatasetAssociation( create_dataset = True, extension = GALAXY_TYPE ) + data = trans.app.model.HistoryDatasetAssociation( create_dataset=True, sa_session=trans.sa_session, extension=GALAXY_TYPE ) trans.app.security_agent.set_all_dataset_permissions( data.dataset, trans.app.security_agent.history_get_default_permissions( trans.history ) ) data.name = GALAXY_NAME data.dbkey = GALAXY_BUILD diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/web/controllers/requests.py --- a/lib/galaxy/web/controllers/requests.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/web/controllers/requests.py Thu Nov 12 15:25:48 2009 -0500 @@ -667,7 +667,8 @@ request.library = library request.folder = folder request.state = trans.app.model.Request.states.UNSUBMITTED - request.flush() + trans.sa_session.add( request ) + trans.sa_session.flush() return request @web.expose @web.require_login( "create/submit sequencing requests" ) diff -r 8bc85721cbce -r 0984c3800775 lib/galaxy/web/controllers/root.py --- a/lib/galaxy/web/controllers/root.py Thu Nov 12 13:20:19 2009 -0500 +++ b/lib/galaxy/web/controllers/root.py Thu Nov 12 15:25:48 2009 -0500 @@ -501,7 +501,12 @@ """Adds a POSTed file to a History""" try: history = trans.sa_session.query( trans.app.model.History ).get( history_id ) - data = trans.app.model.HistoryDatasetAssociation( name = name, info = info, extension = ext, dbkey = dbkey, create_dataset = True ) + data = trans.app.model.HistoryDatasetAssociation( name = name, + info = info, + extension = ext, + dbkey = dbkey, + create_dataset = True, + sa_session = trans.sa_session ) if copy_access_from: copy_access_from = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( copy_access_from ) trans.app.security_agent.copy_dataset_permissions( copy_access_from.dataset, data.dataset ) diff -r 8bc85721cbce -r 0984c3800775 tools/data_source/microbial_import_code.py --- a/tools/data_source/microbial_import_code.py Thu Nov 12 13:20:19 2009 -0500 +++ b/tools/data_source/microbial_import_code.py Thu Nov 12 15:25:48 2009 -0500 @@ -131,7 +131,7 @@ dbkey = fields[3] filepath = fields[4] file_type = fields[5] - newdata = app.model.HistoryDatasetAssociation( create_dataset = True ) #This import should become a library + newdata = app.model.HistoryDatasetAssociation( create_dataset = True, sa_session = app.model.context ) #This import should become a library newdata.set_size() newdata.extension = file_type newdata.name = basic_name + " (" + microbe_info[kingdom][org]['chrs'][chr]['data'][description]['feature'] +" for "+microbe_info[kingdom][org]['name']+":"+chr + ")" diff -r 8bc85721cbce -r 0984c3800775 tools/maf/maf_to_bed_code.py --- a/tools/maf/maf_to_bed_code.py Thu Nov 12 13:20:19 2009 -0500 +++ b/tools/maf/maf_to_bed_code.py Thu Nov 12 15:25:48 2009 -0500 @@ -27,7 +27,7 @@ fields = line.split("\t") dbkey = fields[1] filepath = fields[2] - newdata = app.model.HistoryDatasetAssociation( create_dataset = True ) + newdata = app.model.HistoryDatasetAssociation( create_dataset = True, sa_session = app.model.context ) newdata.set_size() newdata.extension = "bed" newdata.name = basic_name + " (" + dbkey + ")"