commit/galaxy-central: natefoo: Move a number of dataset-related methods from the root controller to the dataset controller and encode their ids. Also add user disk usage accounting.
1 new changeset in galaxy-central: http://bitbucket.org/galaxy/galaxy-central/changeset/3d2fd67bc0ca/ changeset: 3d2fd67bc0ca user: natefoo date: 2011-07-26 19:30:07 summary: Move a number of dataset-related methods from the root controller to the dataset controller and encode their ids. Also add user disk usage accounting. affected #: 13 files (38.1 KB) --- a/lib/galaxy/jobs/__init__.py Mon Jul 25 20:55:42 2011 -0400 +++ b/lib/galaxy/jobs/__init__.py Tue Jul 26 13:30:07 2011 -0400 @@ -646,9 +646,14 @@ tool=self.tool, stdout=stdout, stderr=stderr ) job.command_line = self.command_line + bytes = 0 # Once datasets are collected, set the total dataset size (includes extra files) for dataset_assoc in job.output_datasets + job.output_library_datasets: dataset_assoc.dataset.dataset.set_total_size() + bytes += dataset_assoc.dataset.dataset.get_total_size() + + if job.user: + job.user.total_disk_usage += bytes # fix permissions for path in [ dp.real_path for dp in self.get_output_fnames() ]: --- a/lib/galaxy/model/__init__.py Mon Jul 25 20:55:42 2011 -0400 +++ b/lib/galaxy/model/__init__.py Tue Jul 26 13:30:07 2011 -0400 @@ -70,6 +70,27 @@ if role not in roles: roles.append( role ) return roles + def get_disk_usage( self, nice_size=False ): + rval = 0 + if self.disk_usage is not None: + rval = self.disk_usage + if nice_size: + rval = galaxy.datatypes.data.nice_size( rval ) + return rval + def set_disk_usage( self, bytes ): + self.disk_usage = bytes + total_disk_usage = property( get_disk_usage, set_disk_usage ) + def calculate_disk_usage( self ): + dataset_ids = [] + total = 0 + # this can be a huge number and can run out of memory, so we avoid the mappers + db_session = object_session( self ) + for history in db_session.query( History ).enable_eagerloads( False ).filter_by( user_id=self.id ).yield_per( 1000 ): + for hda in db_session.query( HistoryDatasetAssociation ).enable_eagerloads( False ).filter_by( history_id=history.id, purged=False ).yield_per( 1000 ): + if not hda.dataset.id in dataset_ids and not hda.dataset.purged and not hda.dataset.library_associations: + dataset_ids.append( hda.dataset.id ) + total += hda.dataset.get_total_size() + return total class Job( object ): """ @@ -349,7 +370,7 @@ self.galaxy_sessions.append( GalaxySessionToHistoryAssociation( galaxy_session, self ) ) else: self.galaxy_sessions.append( association ) - def add_dataset( self, dataset, parent_id=None, genome_build=None, set_hid = True ): + def add_dataset( self, dataset, parent_id=None, genome_build=None, set_hid=True, quota=True ): if isinstance( dataset, Dataset ): dataset = HistoryDatasetAssociation(dataset=dataset) object_session( self ).add( dataset ) @@ -367,6 +388,8 @@ else: if set_hid: dataset.hid = self._next_hid() + if quota and self.user: + self.user.total_disk_usage += dataset.quota_amount( self.user ) dataset.history = self if genome_build not in [None, '?']: self.genome_build = genome_build @@ -378,6 +401,9 @@ name = self.name if not target_user: target_user = self.user + quota = True + if target_user == self.user: + quota = False new_history = History( name=name, user=target_user ) db_session = object_session( self ) db_session.add( new_history ) @@ -393,8 +419,8 @@ hdas = self.active_datasets for hda in hdas: # Copy HDA. - new_hda = hda.copy( copy_children=True, target_history=new_history ) - new_history.add_dataset( new_hda, set_hid = False ) + new_hda = hda.copy( copy_children=True ) + new_history.add_dataset( new_hda, set_hid = False, quota=quota ) db_session.add( new_hda ) db_session.flush() # Copy annotation. @@ -741,6 +767,10 @@ def set_size( self ): """Returns the size of the data on disk""" return self.dataset.set_size() + def get_total_size( self ): + return self.dataset.get_total_size() + def set_total_size( self ): + return self.dataset.set_total_size() def has_data( self ): """Detects whether there is any data""" return self.dataset.has_data() @@ -922,7 +952,7 @@ self.history = history self.copied_from_history_dataset_association = copied_from_history_dataset_association self.copied_from_library_dataset_dataset_association = copied_from_library_dataset_dataset_association - def copy( self, copy_children = False, parent_id = None, target_history = None ): + def copy( self, copy_children = False, parent_id = None ): hda = HistoryDatasetAssociation( hid=self.hid, name=self.name, info=self.info, @@ -934,8 +964,7 @@ visible=self.visible, deleted=self.deleted, parent_id=parent_id, - copied_from_history_dataset_association=self, - history = target_history ) + copied_from_history_dataset_association=self ) object_session( self ).add( hda ) object_session( self ).flush() hda.set_size() @@ -1017,6 +1046,26 @@ return hda_name def get_access_roles( self, trans ): return self.dataset.get_access_roles( trans ) + def quota_amount( self, user ): + """ + If the user has multiple instances of this dataset, it will not affect their disk usage statistic. + """ + rval = 0 + # Anon users are handled just by their single history size. + if not user: + return rval + # Gets an HDA and its children's disk usage, if the user does not already have an association of the same dataset + if not self.dataset.library_associations and not self.purged and not self.dataset.purged: + for hda in self.dataset.history_associations: + if hda.id == self.id: + continue + if not hda.purged and hda.history and hda.history.user and hda.history.user == user: + break + else: + rval += self.get_total_size() + for child in self.children: + rval += child.get_disk_usage( user ) + return rval class HistoryDatasetAssociationDisplayAtAuthorization( object ): def __init__( self, hda=None, user=None, site=None ): @@ -1467,6 +1516,13 @@ self.histories.append( GalaxySessionToHistoryAssociation( self, history ) ) else: self.histories.append( association ) + def get_disk_usage( self ): + if self.disk_usage is None: + return 0 + return self.disk_usage + def set_disk_usage( self, bytes ): + self.disk_usage = bytes + total_disk_usage = property( get_disk_usage, set_disk_usage ) class GalaxySessionToHistoryAssociation( object ): def __init__( self, galaxy_session, history ): --- a/lib/galaxy/web/controllers/dataset.py Mon Jul 25 20:55:42 2011 -0400 +++ b/lib/galaxy/web/controllers/dataset.py Tue Jul 26 13:30:07 2011 -0400 @@ -9,6 +9,7 @@ from galaxy.util import inflector from galaxy.model.item_attrs import * from galaxy.model import LibraryDatasetDatasetAssociation, HistoryDatasetAssociation +from galaxy.web.framework.helpers import to_unicode import pkg_resources; pkg_resources.require( "Paste" ) @@ -383,6 +384,188 @@ return trans.stream_template_mako( "/dataset/large_file.mako", truncated_data = open( data.file_name ).read(max_peek_size), data = data ) + + @web.expose + def edit(self, trans, dataset_id=None, filename=None, hid=None, **kwd): + """Allows user to modify parameters of an HDA.""" + message = None + status = 'done' + refresh_frames = [] + error = False + def __ok_to_edit_metadata( dataset_id ): + #prevent modifying metadata when dataset is queued or running as input/output + #This code could be more efficient, i.e. by using mappers, but to prevent slowing down loading a History panel, we'll leave the code here for now + for job_to_dataset_association in trans.sa_session.query( self.app.model.JobToInputDatasetAssociation ) \ + .filter_by( dataset_id=dataset_id ) \ + .all() \ + + trans.sa_session.query( self.app.model.JobToOutputDatasetAssociation ) \ + .filter_by( dataset_id=dataset_id ) \ + .all(): + if job_to_dataset_association.job.state not in [ job_to_dataset_association.job.states.OK, job_to_dataset_association.job.states.ERROR, job_to_dataset_association.job.states.DELETED ]: + return False + return True + if hid is not None: + history = trans.get_history() + # TODO: hid handling + data = history.datasets[ int( hid ) - 1 ] + id = None + elif dataset_id is not None: + id = trans.app.security.decode_id( dataset_id ) + data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) + else: + trans.log_event( "dataset_id and hid are both None, cannot load a dataset to edit" ) + return trans.show_error_message( "You must provide a history dataset id to edit" ) + if data is None: + trans.log_event( "Problem retrieving dataset (encoded: %s, decoded: %s) with history id %s." % ( str( dataset_id ), str( id ), str( hid ) ) ) + return trans.show_error_message( "History dataset id is invalid" ) + if dataset_id is not None and data.history.user is not None and data.history.user != trans.user: + trans.log_event( "User attempted to edit an HDA they do not own (encoded: %s, decoded: %s)" % ( dataset_id, id ) ) + # Do not reveal the dataset's existence + return trans.show_error_message( "History dataset id is invalid" ) + current_user_roles = trans.get_current_user_roles() + if data.history.user and not data.dataset.has_manage_permissions_roles( trans ): + # Permission setting related to DATASET_MANAGE_PERMISSIONS was broken for a period of time, + # so it is possible that some Datasets have no roles associated with the DATASET_MANAGE_PERMISSIONS + # permission. In this case, we'll reset this permission to the hda user's private role. + manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) + permissions = { manage_permissions_action : [ trans.app.security_agent.get_private_user_role( data.history.user ) ] } + trans.app.security_agent.set_dataset_permission( data.dataset, permissions ) + if trans.app.security_agent.can_access_dataset( current_user_roles, data.dataset ): + if data.state == trans.model.Dataset.states.UPLOAD: + return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to edit its metadata." ) + params = util.Params( kwd, sanitize=False ) + if params.change: + # The user clicked the Save button on the 'Change data type' form + if data.datatype.allow_datatype_change and trans.app.datatypes_registry.get_datatype_by_extension( params.datatype ).allow_datatype_change: + #prevent modifying datatype when dataset is queued or running as input/output + if not __ok_to_edit_metadata( data.id ): + message = "This dataset is currently being used as input or output. You cannot change datatype until the jobs have completed or you have canceled them." + error = True + else: + trans.app.datatypes_registry.change_datatype( data, params.datatype, set_meta = not trans.app.config.set_metadata_externally ) + trans.sa_session.flush() + if trans.app.config.set_metadata_externally: + trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming = { 'input1':data }, overwrite = False ) #overwrite is False as per existing behavior + message = "Changed the type of dataset '%s' to %s" % ( to_unicode( data.name ), params.datatype ) + refresh_frames=['history'] + else: + message = "You are unable to change datatypes in this manner. Changing %s to %s is not allowed." % ( data.extension, params.datatype ) + error = True + elif params.save: + # The user clicked the Save button on the 'Edit Attributes' form + data.name = params.name + data.info = params.info + message = '' + if __ok_to_edit_metadata( data.id ): + # The following for loop will save all metadata_spec items + for name, spec in data.datatype.metadata_spec.items(): + if spec.get("readonly"): + continue + optional = params.get("is_"+name, None) + other = params.get("or_"+name, None) + if optional and optional == 'true': + # optional element... == 'true' actually means it is NOT checked (and therefore omitted) + setattr(data.metadata, name, None) + else: + if other: + setattr( data.metadata, name, other ) + else: + setattr( data.metadata, name, spec.unwrap( params.get (name, None) ) ) + data.datatype.after_setting_metadata( data ) + # Sanitize annotation before adding it. + if params.annotation: + annotation = sanitize_html( params.annotation, 'utf-8', 'text/html' ) + self.add_item_annotation( trans.sa_session, trans.get_user(), data, annotation ) + # If setting metadata previously failed and all required elements have now been set, clear the failed state. + if data._state == trans.model.Dataset.states.FAILED_METADATA and not data.missing_meta(): + data._state = None + trans.sa_session.flush() + message = "Attributes updated%s" % message + refresh_frames=['history'] + else: + trans.sa_session.flush() + message = "Attributes updated, but metadata could not be changed because this dataset is currently being used as input or output. You must cancel or wait for these jobs to complete before changing metadata." + status = "warning" + refresh_frames=['history'] + elif params.detect: + # The user clicked the Auto-detect button on the 'Edit Attributes' form + #prevent modifying metadata when dataset is queued or running as input/output + if not __ok_to_edit_metadata( data.id ): + message = "This dataset is currently being used as input or output. You cannot change metadata until the jobs have completed or you have canceled them." + error = True + else: + for name, spec in data.metadata.spec.items(): + # We need to be careful about the attributes we are resetting + if name not in [ 'name', 'info', 'dbkey', 'base_name' ]: + if spec.get( 'default' ): + setattr( data.metadata, name, spec.unwrap( spec.get( 'default' ) ) ) + if trans.app.config.set_metadata_externally: + message = 'Attributes have been queued to be updated' + trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming = { 'input1':data } ) + else: + message = 'Attributes updated' + data.set_meta() + data.datatype.after_setting_metadata( data ) + trans.sa_session.flush() + refresh_frames=['history'] + elif params.convert_data: + target_type = kwd.get("target_type", None) + if target_type: + message = data.datatype.convert_dataset(trans, data, target_type) + refresh_frames=['history'] + elif params.update_roles_button: + if not trans.user: + return trans.show_error_message( "You must be logged in if you want to change permissions." ) + if trans.app.security_agent.can_manage_dataset( current_user_roles, data.dataset ): + access_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action ) + manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) + # The user associated the DATASET_ACCESS permission on the dataset with 1 or more roles. We + # need to ensure that they did not associate roles that would cause accessibility problems. + permissions, in_roles, error, message = \ + trans.app.security_agent.derive_roles_from_access( trans, data.dataset.id, 'root', **kwd ) + if error: + # Keep the original role associations for the DATASET_ACCESS permission on the dataset. + permissions[ access_action ] = data.dataset.get_access_roles( trans ) + status = 'error' + else: + error = trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions ) + if error: + message += error + status = 'error' + else: + message = 'Your changes completed successfully.' + trans.sa_session.refresh( data.dataset ) + else: + message = "You are not authorized to change this dataset's permissions" + error = True + else: + if "dbkey" in data.datatype.metadata_spec and not data.metadata.dbkey: + # Copy dbkey into metadata, for backwards compatability + # This looks like it does nothing, but getting the dbkey + # returns the metadata dbkey unless it is None, in which + # case it resorts to the old dbkey. Setting the dbkey + # sets it properly in the metadata + #### This is likely no longer required, since the dbkey exists entirely within metadata (the old_dbkey field is gone): REMOVE ME? + data.metadata.dbkey = data.dbkey + # let's not overwrite the imported datatypes module with the variable datatypes? + # the built-in 'id' is overwritten in lots of places as well + ldatatypes = [ dtype_name for dtype_name, dtype_value in trans.app.datatypes_registry.datatypes_by_extension.iteritems() if dtype_value.allow_datatype_change ] + ldatatypes.sort() + all_roles = trans.app.security_agent.get_legitimate_roles( trans, data.dataset, 'root' ) + if error: + status = 'error' + return trans.fill_template( "/dataset/edit_attributes.mako", + data=data, + data_annotation=self.get_item_annotation_str( trans.sa_session, trans.user, data ), + datatypes=ldatatypes, + current_user_roles=current_user_roles, + all_roles=all_roles, + message=message, + status=status, + dataset_id=dataset_id, + refresh_frames=refresh_frames ) + else: + return trans.show_error_message( "You do not have permission to edit this dataset's ( id: %s ) information." % str( dataset_id ) ) @web.expose @web.require_login( "see all available datasets" ) @@ -654,111 +837,190 @@ return trans.fill_template_mako( "dataset/display_application/display.mako", msg = msg, display_app = display_app, display_link = display_link, refresh = refresh ) return trans.show_error_message( 'You do not have permission to view this dataset at an external display application.' ) - def _undelete( self, trans, id ): + def _delete( self, trans, dataset_id ): + message = None + status = 'done' + id = None try: - id = int( id ) - except ValueError, e: - return False - history = trans.get_history() - data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) - if data and data.undeletable: + id = trans.app.security.decode_id( dataset_id ) + history = trans.get_history() + hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) + assert hda, 'Invalid HDA: %s' % id # Walk up parent datasets to find the containing history - topmost_parent = data + topmost_parent = hda + while topmost_parent.parent: + topmost_parent = topmost_parent.parent + assert topmost_parent in trans.history.datasets, "Data does not belong to current history" + # Mark deleted and cleanup + hda.mark_deleted() + hda.clear_associated_files() + trans.log_event( "Dataset id %s marked as deleted" % str(id) ) + if hda.parent_id is None and len( hda.creating_job_associations ) > 0: + # Mark associated job for deletion + job = hda.creating_job_associations[0].job + if job.state in [ self.app.model.Job.states.QUEUED, self.app.model.Job.states.RUNNING, self.app.model.Job.states.NEW ]: + # Are *all* of the job's other output datasets deleted? + if job.check_if_output_datasets_deleted(): + job.mark_deleted( self.app.config.get_bool( 'enable_job_running', True ), + self.app.config.get_bool( 'track_jobs_in_database', False ) ) + self.app.job_manager.job_stop_queue.put( job.id ) + trans.sa_session.flush() + except Exception, e: + msg = 'HDA deletion failed (encoded: %s, decoded: %s)' % ( dataset_id, id ) + log.exception( msg ) + trans.log_event( msg ) + message = 'Dataset deletion failed' + status = 'error' + return ( message, status ) + + def _undelete( self, trans, dataset_id ): + message = None + status = 'done' + id = None + try: + id = trans.app.security.decode_id( dataset_id ) + history = trans.get_history() + hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) + assert hda and hda.undeletable, 'Invalid HDA: %s' % id + # Walk up parent datasets to find the containing history + topmost_parent = hda while topmost_parent.parent: topmost_parent = topmost_parent.parent assert topmost_parent in history.datasets, "Data does not belong to current history" # Mark undeleted - data.mark_undeleted() + hda.mark_undeleted() trans.sa_session.flush() trans.log_event( "Dataset id %s has been undeleted" % str(id) ) - return True - return False + except Exception, e: + msg = 'HDA undeletion failed (encoded: %s, decoded: %s)' % ( dataset_id, id ) + log.exception( msg ) + trans.log_event( msg ) + message = 'Dataset undeletion failed' + status = 'error' + return ( message, status ) - def _unhide( self, trans, id ): + def _unhide( self, trans, dataset_id ): try: - id = int( id ) - except ValueError, e: + id = trans.app.security.decode_id( dataset_id ) + except: return False history = trans.get_history() - data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) - if data: + hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) + if hda: # Walk up parent datasets to find the containing history - topmost_parent = data + topmost_parent = hda while topmost_parent.parent: topmost_parent = topmost_parent.parent assert topmost_parent in history.datasets, "Data does not belong to current history" # Mark undeleted - data.mark_unhidden() + hda.mark_unhidden() trans.sa_session.flush() trans.log_event( "Dataset id %s has been unhidden" % str(id) ) return True return False - def _purge( self, trans, id ): + def _purge( self, trans, dataset_id ): + message = None + status = 'done' try: - id = int( id ) - except ValueError, e: - return False - hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) - # Invalid HDA or not deleted - if not hda or not hda.history or not hda.deleted: - return False - # If the user is anonymous, make sure the HDA is owned by the current session. - if not hda.history.user and trans.galaxy_session.id not in [ s.id for s in hda.history.galaxy_sessions ]: - return False - # If the user is known, make sure the HDA is owned by the current user. - if hda.history.user and hda.history.user != trans.user: - return False - # HDA is purgeable - hda.purged = True - trans.sa_session.add( hda ) - trans.log_event( "HDA id %s has been purged" % hda.id ) - # Don't delete anything if there are active HDAs or any LDDAs, even if - # the LDDAs are deleted. Let the cleanup scripts get it in the latter - # case. - if hda.dataset.user_can_purge: - try: - hda.dataset.full_delete() - trans.log_event( "Dataset id %s has been purged upon the the purge of HDA id %s" % ( hda.dataset.id, hda.id ) ) - trans.sa_session.add( hda.dataset ) - except: - log.exception( 'Unable to purge dataset (%s) on purge of hda (%s):' % ( hda.dataset.id, hda.id ) ) - trans.sa_session.flush() - return True + id = trans.app.security.decode_id( dataset_id ) + history = trans.get_history() + user = trans.get_user() + hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) + # Invalid HDA + assert hda, 'Invalid history dataset ID' + # Walk up parent datasets to find the containing history + topmost_parent = hda + while topmost_parent.parent: + topmost_parent = topmost_parent.parent + assert topmost_parent in history.datasets, "Data does not belong to current history" + # If the user is anonymous, make sure the HDA is owned by the current session. + if not user: + assert trans.galaxy_session.id in [ s.id for s in hda.history.galaxy_sessions ], 'Invalid history dataset ID' + # If the user is known, make sure the HDA is owned by the current user. + else: + assert topmost_parent.history.user == trans.user, 'Invalid history dataset ID' + # HDA is not deleted + assert hda.deleted, 'History dataset is not marked as deleted' + # HDA is purgeable + # Decrease disk usage first + if user: + user.total_disk_usage -= hda.quota_amount( user ) + # Mark purged + hda.purged = True + trans.sa_session.add( hda ) + trans.log_event( "HDA id %s has been purged" % hda.id ) + # Don't delete anything if there are active HDAs or any LDDAs, even if + # the LDDAs are deleted. Let the cleanup scripts get it in the latter + # case. + if hda.dataset.user_can_purge: + try: + hda.dataset.full_delete() + trans.log_event( "Dataset id %s has been purged upon the the purge of HDA id %s" % ( hda.dataset.id, hda.id ) ) + trans.sa_session.add( hda.dataset ) + except: + log.exception( 'Unable to purge dataset (%s) on purge of HDA (%s):' % ( hda.dataset.id, hda.id ) ) + trans.sa_session.flush() + except Exception, e: + msg = 'HDA purge failed (encoded: %s, decoded: %s)' % ( dataset_id, id ) + log.exception( msg ) + trans.log_event( msg ) + message = 'Dataset removal from disk failed' + status = 'error' + return ( message, status ) @web.expose - def undelete( self, trans, id ): - if self._undelete( trans, id ): - return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted = True ) ) - raise Exception( "Error undeleting" ) + def delete( self, trans, dataset_id, filename, show_deleted_on_refresh = False ): + message, status = self._delete( trans, dataset_id ) + return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status ) ) @web.expose - def unhide( self, trans, id ): - if self._unhide( trans, id ): + def delete_async( self, trans, dataset_id, filename ): + message, status = self._delete( trans, dataset_id ) + if status == 'done': + return "OK" + else: + raise Exception( message ) + + @web.expose + def undelete( self, trans, dataset_id, filename ): + message, status = self._undelete( trans, dataset_id ) + return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted = True, message=message, status=status ) ) + + @web.expose + def undelete_async( self, trans, dataset_id, filename ): + message, status =self._undelete( trans, dataset_id ) + if status == 'done': + return "OK" + else: + raise Exception( message ) + + @web.expose + def unhide( self, trans, dataset_id, filename ): + if self._unhide( trans, dataset_id ): return trans.response.send_redirect( web.url_for( controller='root', action='history', show_hidden = True ) ) raise Exception( "Error unhiding" ) @web.expose - def undelete_async( self, trans, id ): - if self._undelete( trans, id ): - return "OK" - raise Exception( "Error undeleting" ) - - @web.expose - def purge( self, trans, id ): - if not trans.app.config.allow_user_dataset_purge: - raise Exception( "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator." ) - if self._purge( trans, id ): - return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted = True ) ) - raise Exception( "Error removing disk file" ) + def purge( self, trans, dataset_id, filename, show_deleted_on_refresh = False ): + if trans.app.config.allow_user_dataset_purge: + message, status = self._purge( trans, dataset_id ) + else: + message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator." + status = 'error' + return trans.response.send_redirect( web.url_for( controller='root', action='history', show_deleted=show_deleted_on_refresh, message=message, status=status ) ) @web.expose - def purge_async( self, trans, id ): - if not trans.app.config.allow_user_dataset_purge: - raise Exception( "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator." ) - if self._purge( trans, id ): + def purge_async( self, trans, dataset_id, filename ): + if trans.app.config.allow_user_dataset_purge: + message, status = self._purge( trans, dataset_id ) + else: + message = "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator." + status = 'error' + if status == 'done': return "OK" - raise Exception( "Error removing disk file" ) + else: + raise Exception( message ) @web.expose def show_params( self, trans, dataset_id=None, from_noframe=None, **kwd ): --- a/lib/galaxy/web/controllers/root.py Mon Jul 25 20:55:42 2011 -0400 +++ b/lib/galaxy/web/controllers/root.py Tue Jul 26 13:30:07 2011 -0400 @@ -8,7 +8,6 @@ from galaxy.util.sanitize_html import sanitize_html from galaxy.model.orm import * from galaxy.model.item_attrs import UsesAnnotations -from galaxy.web.framework.helpers import to_unicode log = logging.getLogger( __name__ ) @@ -99,11 +98,14 @@ return trans.fill_template_mako( "/my_data.mako" ) @web.expose - def history( self, trans, as_xml=False, show_deleted=False, show_hidden=False, hda_id=None ): + def history( self, trans, as_xml=False, show_deleted=False, show_hidden=False, hda_id=None, **kwd ): """ Display the current history, creating a new history if necessary. NOTE: No longer accepts "id" or "template" options for security reasons. """ + params = util.Params( kwd ) + message = params.get( 'message', None ) + status = params.get( 'status', 'done' ) if trans.app.config.require_login and not trans.user: return trans.fill_template( '/no_access.mako', message = 'Please log in to access Galaxy histories.' ) history = trans.get_history( create=True ) @@ -123,7 +125,9 @@ datasets = datasets, hda_id = hda_id, show_deleted = show_deleted, - show_hidden=show_hidden ) + show_hidden=show_hidden, + message=message, + status=status ) @web.expose def dataset_state ( self, trans, id=None, stamp=None ): @@ -160,9 +164,13 @@ # Create new HTML for any that have changed rval = {} if ids is not None and states is not None: - ids = map( int, ids.split( "," ) ) + ids = ids.split( "," ) states = states.split( "," ) - for id, state in zip( ids, states ): + for encoded_id, state in zip( ids, states ): + try: + id = int( trans.app.security.decode_id( encoded_id ) ) + except: + id = int( encoded_id ) data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) if data.state != state: job_hda = data @@ -175,7 +183,7 @@ force_history_refresh = tool.force_history_refresh if not job_hda.visible: force_history_refresh = True - rval[id] = { + rval[encoded_id] = { "state": data.state, "html": unicode( trans.fill_template( "root/history_item.mako", data=data, hid=data.hid ), 'utf-8' ), "force_history_refresh": force_history_refresh @@ -288,237 +296,6 @@ else: yield "No data with id=%d" % id - @web.expose - def edit(self, trans, id=None, hid=None, **kwd): - """Allows user to modify parameters of an HDA.""" - message = '' - error = False - def __ok_to_edit_metadata( dataset_id ): - #prevent modifying metadata when dataset is queued or running as input/output - #This code could be more efficient, i.e. by using mappers, but to prevent slowing down loading a History panel, we'll leave the code here for now - for job_to_dataset_association in trans.sa_session.query( self.app.model.JobToInputDatasetAssociation ) \ - .filter_by( dataset_id=dataset_id ) \ - .all() \ - + trans.sa_session.query( self.app.model.JobToOutputDatasetAssociation ) \ - .filter_by( dataset_id=dataset_id ) \ - .all(): - if job_to_dataset_association.job.state not in [ job_to_dataset_association.job.states.OK, job_to_dataset_association.job.states.ERROR, job_to_dataset_association.job.states.DELETED ]: - return False - return True - if hid is not None: - history = trans.get_history() - # TODO: hid handling - data = history.datasets[ int( hid ) - 1 ] - elif id is not None: - data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) - else: - trans.log_event( "Problem loading dataset id %s with history id %s." % ( str( id ), str( hid ) ) ) - return trans.show_error_message( "Problem loading dataset." ) - if data is None: - trans.log_event( "Problem retrieving dataset id %s with history id." % ( str( id ), str( hid ) ) ) - return trans.show_error_message( "Problem retrieving dataset." ) - if id is not None and data.history.user is not None and data.history.user != trans.user: - return trans.show_error_message( "This instance of a dataset (%s) in a history does not belong to you." % ( data.id ) ) - current_user_roles = trans.get_current_user_roles() - if data.history.user and not data.dataset.has_manage_permissions_roles( trans ): - # Permission setting related to DATASET_MANAGE_PERMISSIONS was broken for a period of time, - # so it is possible that some Datasets have no roles associated with the DATASET_MANAGE_PERMISSIONS - # permission. In this case, we'll reset this permission to the hda user's private role. - manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) - permissions = { manage_permissions_action : [ trans.app.security_agent.get_private_user_role( data.history.user ) ] } - trans.app.security_agent.set_dataset_permission( data.dataset, permissions ) - if trans.app.security_agent.can_access_dataset( current_user_roles, data.dataset ): - if data.state == trans.model.Dataset.states.UPLOAD: - return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to edit its metadata." ) - params = util.Params( kwd, sanitize=False ) - if params.change: - # The user clicked the Save button on the 'Change data type' form - if data.datatype.allow_datatype_change and trans.app.datatypes_registry.get_datatype_by_extension( params.datatype ).allow_datatype_change: - #prevent modifying datatype when dataset is queued or running as input/output - if not __ok_to_edit_metadata( data.id ): - return trans.show_error_message( "This dataset is currently being used as input or output. You cannot change datatype until the jobs have completed or you have canceled them." ) - trans.app.datatypes_registry.change_datatype( data, params.datatype, set_meta = not trans.app.config.set_metadata_externally ) - trans.sa_session.flush() - if trans.app.config.set_metadata_externally: - trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming = { 'input1':data }, overwrite = False ) #overwrite is False as per existing behavior - return trans.show_ok_message( "Changed the type of dataset '%s' to %s" % ( to_unicode( data.name ), params.datatype ), refresh_frames=['history'] ) - else: - return trans.show_error_message( "You are unable to change datatypes in this manner. Changing %s to %s is not allowed." % ( data.extension, params.datatype ) ) - elif params.save: - # The user clicked the Save button on the 'Edit Attributes' form - data.name = params.name - data.info = params.info - message = '' - if __ok_to_edit_metadata( data.id ): - # The following for loop will save all metadata_spec items - for name, spec in data.datatype.metadata_spec.items(): - if spec.get("readonly"): - continue - optional = params.get("is_"+name, None) - other = params.get("or_"+name, None) - if optional and optional == 'true': - # optional element... == 'true' actually means it is NOT checked (and therefore omitted) - setattr(data.metadata, name, None) - else: - if other: - setattr( data.metadata, name, other ) - else: - setattr( data.metadata, name, spec.unwrap( params.get (name, None) ) ) - data.datatype.after_setting_metadata( data ) - # Sanitize annotation before adding it. - if params.annotation: - annotation = sanitize_html( params.annotation, 'utf-8', 'text/html' ) - self.add_item_annotation( trans.sa_session, trans.get_user(), data, annotation ) - # If setting metadata previously failed and all required elements have now been set, clear the failed state. - if data._state == trans.model.Dataset.states.FAILED_METADATA and not data.missing_meta(): - data._state = None - trans.sa_session.flush() - return trans.show_ok_message( "Attributes updated%s" % message, refresh_frames=['history'] ) - else: - trans.sa_session.flush() - return trans.show_warn_message( "Attributes updated, but metadata could not be changed because this dataset is currently being used as input or output. You must cancel or wait for these jobs to complete before changing metadata.", refresh_frames=['history'] ) - elif params.detect: - # The user clicked the Auto-detect button on the 'Edit Attributes' form - #prevent modifying metadata when dataset is queued or running as input/output - if not __ok_to_edit_metadata( data.id ): - return trans.show_error_message( "This dataset is currently being used as input or output. You cannot change metadata until the jobs have completed or you have canceled them." ) - for name, spec in data.metadata.spec.items(): - # We need to be careful about the attributes we are resetting - if name not in [ 'name', 'info', 'dbkey', 'base_name' ]: - if spec.get( 'default' ): - setattr( data.metadata, name, spec.unwrap( spec.get( 'default' ) ) ) - if trans.app.config.set_metadata_externally: - message = 'Attributes have been queued to be updated' - trans.app.datatypes_registry.set_external_metadata_tool.tool_action.execute( trans.app.datatypes_registry.set_external_metadata_tool, trans, incoming = { 'input1':data } ) - else: - message = 'Attributes updated' - data.set_meta() - data.datatype.after_setting_metadata( data ) - trans.sa_session.flush() - return trans.show_ok_message( message, refresh_frames=['history'] ) - elif params.convert_data: - target_type = kwd.get("target_type", None) - if target_type: - message = data.datatype.convert_dataset(trans, data, target_type) - return trans.show_ok_message( message, refresh_frames=['history'] ) - elif params.update_roles_button: - if not trans.user: - return trans.show_error_message( "You must be logged in if you want to change permissions." ) - if trans.app.security_agent.can_manage_dataset( current_user_roles, data.dataset ): - access_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_ACCESS.action ) - manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) - # The user associated the DATASET_ACCESS permission on the dataset with 1 or more roles. We - # need to ensure that they did not associate roles that would cause accessibility problems. - permissions, in_roles, error, message = \ - trans.app.security_agent.derive_roles_from_access( trans, data.dataset.id, 'root', **kwd ) - if error: - # Keep the original role associations for the DATASET_ACCESS permission on the dataset. - permissions[ access_action ] = data.dataset.get_access_roles( trans ) - status = 'error' - else: - error = trans.app.security_agent.set_all_dataset_permissions( data.dataset, permissions ) - if error: - message += error - status = 'error' - else: - message = 'Your changes completed successfully.' - trans.sa_session.refresh( data.dataset ) - else: - return trans.show_error_message( "You are not authorized to change this dataset's permissions" ) - if "dbkey" in data.datatype.metadata_spec and not data.metadata.dbkey: - # Copy dbkey into metadata, for backwards compatability - # This looks like it does nothing, but getting the dbkey - # returns the metadata dbkey unless it is None, in which - # case it resorts to the old dbkey. Setting the dbkey - # sets it properly in the metadata - #### This is likely no longer required, since the dbkey exists entirely within metadata (the old_dbkey field is gone): REMOVE ME? - data.metadata.dbkey = data.dbkey - # let's not overwrite the imported datatypes module with the variable datatypes? - # the built-in 'id' is overwritten in lots of places as well - ldatatypes = [ dtype_name for dtype_name, dtype_value in trans.app.datatypes_registry.datatypes_by_extension.iteritems() if dtype_value.allow_datatype_change ] - ldatatypes.sort() - all_roles = trans.app.security_agent.get_legitimate_roles( trans, data.dataset, 'root' ) - if error: - status = 'error' - else: - status = 'done' - return trans.fill_template( "/dataset/edit_attributes.mako", - data=data, - data_annotation=self.get_item_annotation_str( trans.sa_session, trans.user, data ), - datatypes=ldatatypes, - current_user_roles=current_user_roles, - all_roles=all_roles, - message=message, - status=status ) - else: - return trans.show_error_message( "You do not have permission to edit this dataset's ( id: %s ) information." % str( id ) ) - - def __delete_dataset( self, trans, id ): - data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) - if data: - # Walk up parent datasets to find the containing history - topmost_parent = data - while topmost_parent.parent: - topmost_parent = topmost_parent.parent - assert topmost_parent in trans.history.datasets, "Data does not belong to current history" - # Mark deleted and cleanup - data.mark_deleted() - data.clear_associated_files() - trans.log_event( "Dataset id %s marked as deleted" % str(id) ) - if data.parent_id is None and len( data.creating_job_associations ) > 0: - # Mark associated job for deletion - job = data.creating_job_associations[0].job - if job.state in [ self.app.model.Job.states.QUEUED, self.app.model.Job.states.RUNNING, self.app.model.Job.states.NEW ]: - # Are *all* of the job's other output datasets deleted? - if job.check_if_output_datasets_deleted(): - job.mark_deleted( self.app.config.get_bool( 'enable_job_running', True ), - self.app.config.get_bool( 'track_jobs_in_database', False ) ) - self.app.job_manager.job_stop_queue.put( job.id ) - trans.sa_session.flush() - - @web.expose - def delete( self, trans, id = None, show_deleted_on_refresh = False, **kwd): - if id: - if isinstance( id, list ): - dataset_ids = id - else: - dataset_ids = [ id ] - history = trans.get_history() - for id in dataset_ids: - try: - id = int( id ) - except: - continue - self.__delete_dataset( trans, id ) - return self.history( trans, show_deleted = show_deleted_on_refresh ) - - @web.expose - def delete_async( self, trans, id = None, **kwd): - if id: - try: - id = int( id ) - except: - return "Dataset id '%s' is invalid" %str( id ) - self.__delete_dataset( trans, id ) - return "OK" - - @web.expose - def purge( self, trans, id = None, show_deleted_on_refresh = False, **kwd ): - if not trans.app.config.allow_user_dataset_purge: - return trans.show_error_message( "Removal of datasets by users is not allowed in this Galaxy instance. Please contact your Galaxy administrator." ) - hda = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( int( id ) ) - if bool( hda.dataset.active_history_associations or hda.dataset.library_associations ): - return trans.show_error_message( "Unable to purge: LDDA(s) or active HDA(s) exist" ) - elif hda.dataset.purged: - return trans.show_error_message( "Unable to purge: dataset is already purged" ) - os.unlink( hda.dataset.file_name ) - if os.path.exists( hda.extra_files_path ): - shutil.rmtree( hda.extra_files_path ) - hda.dataset.purged = True - trans.sa_session.add( hda.dataset ) - trans.sa_session.flush() - return self.history( trans, show_deleted = show_deleted_on_refresh ) - ## ---- History management ----------------------------------------------- @web.expose --- a/lib/galaxy/web/framework/__init__.py Mon Jul 25 20:55:42 2011 -0400 +++ b/lib/galaxy/web/framework/__init__.py Tue Jul 26 13:30:07 2011 -0400 @@ -471,6 +471,7 @@ - associate new session with user - if old session had a history and it was not associated with a user, associate it with the new session, otherwise associate the current session's history with the user + - add the disk usage of the current session to the user's total disk usage """ # Set the previous session prev_galaxy_session = self.galaxy_session @@ -494,6 +495,10 @@ # If the previous galaxy session had a history, associate it with the new # session, but only if it didn't belong to a different user. history = prev_galaxy_session.current_history + if prev_galaxy_session.user is None: + # Increase the user's disk usage by the amount of the previous history's datasets if they didn't already own it. + for hda in history.datasets: + user.total_disk_usage += hda.quota_amount( user ) elif self.galaxy_session.current_history: history = self.galaxy_session.current_history if not history and \ --- a/templates/dataset/edit_attributes.mako Mon Jul 25 20:55:42 2011 -0400 +++ b/templates/dataset/edit_attributes.mako Tue Jul 26 13:30:07 2011 -0400 @@ -1,5 +1,6 @@ <%inherit file="/base.mako"/><%namespace file="/message.mako" import="render_msg" /> +<%namespace file="/message.mako" name="message_ns" import="javascripts" /><%def name="title()">${_('Edit Dataset Attributes')}</%def> @@ -10,6 +11,7 @@ <%def name="javascripts()"> ${parent.javascripts()} ${h.js( "galaxy.base", "jquery.autocomplete", "autocomplete_tagging" )} + ${message_ns.javascripts()} </%def><%def name="datatype( dataset, datatypes )"> @@ -31,8 +33,7 @@ <div class="toolForm"><div class="toolFormTitle">${_('Edit Attributes')}</div><div class="toolFormBody"> - <form name="edit_attributes" action="${h.url_for( controller='root', action='edit' )}" method="post"> - <input type="hidden" name="id" value="${data.id}"/> + <form name="edit_attributes" action="${h.url_for( controller='dataset', action='edit', dataset_id=dataset_id )}" method="post"><div class="form-row"><label> Name: @@ -80,8 +81,7 @@ <input type="submit" name="save" value="${_('Save')}"/></div></form> - <form name="auto_detect" action="${h.url_for( controller='root', action='edit' )}" method="post"> - <input type="hidden" name="id" value="${data.id}"/> + <form name="auto_detect" action="${h.url_for( controller='dataset', action='edit', dataset_id=dataset_id )}" method="post"><div class="form-row"><div style="float: left; width: 250px; margin-right: 10px;"><input type="submit" name="detect" value="${_('Auto-detect')}"/> @@ -104,8 +104,7 @@ <div class="toolForm"><div class="toolFormTitle">${_('Convert to new format')}</div><div class="toolFormBody"> - <form name="convert_data" action="${h.url_for( controller='root', action='edit' )}" method="post"> - <input type="hidden" name="id" value="${data.id}"/> + <form name="convert_data" action="${h.url_for( controller='dataset', action='edit', dataset_id=dataset_id )}" method="post"><div class="form-row"><div style="float: left; width: 250px; margin-right: 10px;"><select name="target_type"> @@ -132,8 +131,7 @@ <div class="toolFormTitle">${_('Change data type')}</div><div class="toolFormBody"> %if data.datatype.allow_datatype_change: - <form name="change_datatype" action="${h.url_for( controller='root', action='edit' )}" method="post"> - <input type="hidden" name="id" value="${data.id}"/> + <form name="change_datatype" action="${h.url_for( controller='dataset', action='edit', dataset_id=dataset_id )}" method="post"><div class="form-row"><label> ${_('New Type')}: @@ -161,7 +159,7 @@ %if trans.app.security_agent.can_manage_dataset( current_user_roles, data.dataset ): <%namespace file="/dataset/security_common.mako" import="render_permission_form" /> - ${render_permission_form( data.dataset, data.get_display_name(), h.url_for( controller='root', action='edit', id=data.id ), all_roles )} + ${render_permission_form( data.dataset, data.get_display_name(), h.url_for( controller='dataset', action='edit', dataset_id=dataset_id ), all_roles )} %elif trans.user: <div class="toolForm"><div class="toolFormTitle">View Permissions</div> --- a/templates/root/history.mako Mon Jul 25 20:55:42 2011 -0400 +++ b/templates/root/history.mako Tue Jul 26 13:30:07 2011 -0400 @@ -1,3 +1,5 @@ +<%namespace file="/message.mako" import="render_msg" /> + <% _=n_ %><!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> @@ -105,6 +107,11 @@ }); }; +// Update the message for async operations +function render_message(message, status) { + $("div#message-container").html( "<div class=\"" + status + "message\">" + message + "</div><br/>" ); +} + $(function() { var historywrapper = $("div.historyItemWrapper"); init_history_items(historywrapper); @@ -115,8 +122,8 @@ $(this).click( function() { $( '#historyItem-' + data_id + "> div.historyItemTitleBar" ).addClass( "spinner" ); $.ajax({ - url: "${h.url_for( action='delete_async', id='XXX' )}".replace( 'XXX', data_id ), - error: function() { alert( "Delete failed" ); }, + url: "${h.url_for( controller='dataset', action='delete_async', dataset_id='XXX' )}".replace( 'XXX', data_id ), + error: function() { render_message( "Dataset deletion failed", "error" ); }, success: function(msg) { if (msg === "OK") { %if show_deleted: @@ -133,7 +140,7 @@ %endif $(".tipsy").remove(); } else { - alert( "Delete failed" ); + render_message( "Dataset deletion failed", "error" ); } } }); @@ -147,8 +154,8 @@ $(this).click( function() { $( '#historyItem-' + data_id + " > div.historyItemTitleBar" ).addClass( "spinner" ); $.ajax({ - url: "${h.url_for( controller='dataset', action='undelete_async', id='XXX' )}".replace( 'XXX', data_id ), - error: function() { alert( "Undelete failed" ) }, + url: "${h.url_for( controller='dataset', action='undelete_async', dataset_id='XXX' )}".replace( 'XXX', data_id ), + error: function() { render_message( "Dataset undeletion failed", "error" ); }, success: function() { var to_update = {}; to_update[data_id] = "none"; @@ -165,8 +172,8 @@ $(this).click( function() { $( '#historyItem-' + data_id + " > div.historyItemTitleBar" ).addClass( "spinner" ); $.ajax({ - url: "${h.url_for( controller='dataset', action='purge_async', id='XXX' )}".replace( 'XXX', data_id ), - error: function() { alert( "Removal from disk failed" ) }, + url: "${h.url_for( controller='dataset', action='purge_async', dataset_id='XXX' )}".replace( 'XXX', data_id ), + error: function() { render_message( "Dataset removal from disk failed", "error" ) }, success: function() { var to_update = {}; to_update[data_id] = "none"; @@ -258,7 +265,7 @@ // Updater updater( - ${ h.to_json_string( dict([(data.id, data.state) for data in reversed( datasets ) if data.visible and data.state not in TERMINAL_STATES]) ) } + ${ h.to_json_string( dict([(trans.app.security.encode_id(data.id), data.state) for data in reversed( datasets ) if data.visible and data.state not in TERMINAL_STATES]) ) } ); // Navigate to a dataset. @@ -311,11 +318,11 @@ if ( val.force_history_refresh ){ force_history_refresh = true; } - delete tracked_datasets[ parseInt(id) ]; + delete tracked_datasets[id]; // When a dataset becomes terminal, check for changes in history disk size check_history_size = true; } else { - tracked_datasets[ parseInt(id) ] = val.state; + tracked_datasets[id] = val.state; } }); if ( force_history_refresh ) { @@ -458,6 +465,12 @@ </div> %endif +<div id="message-container"> + %if message: + ${render_msg( message, status )} + %endif +</div> + %if not datasets: <div class="infomessagesmall" id="emptyHistoryMessage"> @@ -467,7 +480,7 @@ ## Render requested datasets, ordered from newest to oldest %for data in reversed( datasets ): %if data.visible or show_hidden: - <div class="historyItemContainer" id="historyItemContainer-${data.id}"> + <div class="historyItemContainer" id="historyItemContainer-${trans.app.security.encode_id(data.id)}"> ${render_dataset( data, data.hid, show_deleted_on_refresh = show_deleted, for_editing = True )} </div> %endif --- a/templates/root/history_common.mako Mon Jul 25 20:55:42 2011 -0400 +++ b/templates/root/history_common.mako Tue Jul 26 13:30:07 2011 -0400 @@ -39,9 +39,9 @@ can_edit = not ( data.deleted or data.purged ) %> %if not trans.user_is_admin() and not trans.app.security_agent.can_access_dataset( current_user_roles, data.dataset ): - <div class="historyItemWrapper historyItem historyItem-${data_state} historyItem-noPermission" id="historyItem-${data.id}"> + <div class="historyItemWrapper historyItem historyItem-${data_state} historyItem-noPermission" id="historyItem-${dataset_id}"> %else: - <div class="historyItemWrapper historyItem historyItem-${data_state}" id="historyItem-${data.id}"> + <div class="historyItemWrapper historyItem historyItem-${data_state}" id="historyItem-${dataset_id}"> %endif %if data.deleted or data.purged or data.dataset.purged: @@ -51,9 +51,9 @@ %else: This dataset has been deleted. %if for_editing: - Click <a href="${h.url_for( controller='dataset', action='undelete', id=data.id )}" class="historyItemUndelete" id="historyItemUndeleter-${data.id}" target="galaxy_history">here</a> to undelete + Click <a href="${h.url_for( controller='dataset', action='undelete', dataset_id=dataset_id )}" class="historyItemUndelete" id="historyItemUndeleter-${dataset_id}" target="galaxy_history">here</a> to undelete %if trans.app.config.allow_user_dataset_purge: - or <a href="${h.url_for( controller='dataset', action='purge', id=data.id )}" class="historyItemPurge" id="historyItemPurger-${data.id}" target="galaxy_history">here</a> to immediately remove it from disk. + or <a href="${h.url_for( controller='dataset', action='purge', dataset_id=dataset_id )}" class="historyItemPurge" id="historyItemPurger-${dataset_id}" target="galaxy_history">here</a> to immediately remove it from disk. %else: it. %endif @@ -64,7 +64,7 @@ %if data.visible is False: <div class="warningmessagesmall"> - <strong>This dataset has been hidden. Click <a href="${h.url_for( controller='dataset', action='unhide', id=data.id )}" class="historyItemUnhide" id="historyItemUnhider-${data.id}" target="galaxy_history">here</a> to unhide.</strong> + <strong>This dataset has been hidden. Click <a href="${h.url_for( controller='dataset', action='unhide', dataset_id=dataset_id )}" class="historyItemUnhide" id="historyItemUnhider-${dataset_id}" target="galaxy_history">here</a> to unhide.</strong></div> %endif @@ -110,13 +110,13 @@ %elif data.purged: <span title="Cannot edit attributes of datasets removed from disk" class="icon-button edit_disabled tooltip"></span> %else: - <a class="icon-button edit tooltip" title="Edit attributes" href="${h.url_for( controller='root', action='edit', id=data.id )}" target="galaxy_main"></a> + <a class="icon-button edit tooltip" title="Edit attributes" href="${h.url_for( controller='dataset', action='edit', dataset_id=dataset_id )}" target="galaxy_main"></a> %endif %endif %endif %if for_editing: %if can_edit: - <a class="icon-button delete tooltip" title="Delete" href="${h.url_for( action='delete', id=data.id, show_deleted_on_refresh=show_deleted_on_refresh )}" id="historyItemDeleter-${data.id}"></a> + <a class="icon-button delete tooltip" title="Delete" href="${h.url_for( controller='dataset', action='delete', dataset_id=dataset_id, show_deleted_on_refresh=show_deleted_on_refresh )}" id="historyItemDeleter-${dataset_id}"></a> %else: <span title="Dataset is already deleted" class="icon-button delete_disabled tooltip"></span> %endif @@ -184,7 +184,7 @@ <div class="warningmessagesmall" style="margin: 4px 0 4px 0"> An error occurred setting the metadata for this dataset. %if can_edit: - You may be able to <a href="${h.url_for( controller='root', action='edit', id=data.id )}" target="galaxy_main">set it manually or retry auto-detection</a>. + You may be able to <a href="${h.url_for( controller='dataset', action='edit', dataset_id=dataset_id )}" target="galaxy_main">set it manually or retry auto-detection</a>. %endif </div> %endif @@ -193,7 +193,7 @@ format: <span class="${data.ext}">${data.ext}</span>, database: %if data.dbkey == '?' and can_edit: - <a href="${h.url_for( controller='root', action='edit', id=data.id )}" target="galaxy_main">${_(data.dbkey)}</a> + <a href="${h.url_for( controller='dataset', action='edit', dataset_id=dataset_id )}" target="galaxy_main">${_(data.dbkey)}</a> %else: <span class="${data.dbkey}">${_(data.dbkey)}</span> %endif --- a/templates/user/index.mako Mon Jul 25 20:55:42 2011 -0400 +++ b/templates/user/index.mako Tue Jul 26 13:30:07 2011 -0400 @@ -22,6 +22,7 @@ <li><a href="${h.url_for( controller='user', action='manage_user_info', cntrller=cntrller, webapp='community' )}">${_('Manage your information')}</a></li> %endif </ul> + <p>You are currently using <strong>${trans.user.get_disk_usage( nice_size=True )}</strong> of disk space in this Galaxy instance.</p> %else: %if not message: <p>${n_('You are currently not logged in.')}</p> --- a/templates/webapps/community/repository/common.mako Mon Jul 25 20:55:42 2011 -0400 +++ b/templates/webapps/community/repository/common.mako Tue Jul 26 13:30:07 2011 -0400 @@ -50,39 +50,4 @@ onActivate: function(dtnode) { var cell = $("#file_contents"); var selected_value; - if (dtnode.data.key == 'root') { - selected_value = "${repository.repo_path}/"; - } else { - selected_value = dtnode.data.key; - }; - if (selected_value.charAt(selected_value.length-1) != '/') { - // Make ajax call - $.ajax( { - type: "POST", - url: "${h.url_for( controller='repository', action='get_file_contents' )}", - dataType: "json", - data: { file_path: selected_value }, - success : function ( data ) { - cell.html( '<label>'+data+'</label>' ) - } - }); - } else { - cell.html( '' ); - }; - }, - }); - }); - </script> -</%def> - -<%def name="render_clone_str( repository )"> - <% - protocol, base = trans.request.base.split( '://' ) - if trans.user: - username = '%s@' % trans.user.username - else: - username = '' - clone_str = '%s://%s%s/repos/%s/%s' % ( protocol, username, base, repository.user.username, repository.name ) - %> - hg clone <a href="${clone_str}">${clone_str}</a> -</%def> \ No newline at end of file + \ No newline at end of file --- a/test/base/twilltestcase.py Mon Jul 25 20:55:42 2011 -0400 +++ b/test/base/twilltestcase.py Tue Jul 26 13:30:07 2011 -0400 @@ -474,7 +474,7 @@ elem = data_list[-1] hid = int( elem.get('hid') ) self.assertTrue( hid ) - self.visit_page( "edit?hid=%s" % hid ) + self.visit_page( "dataset/edit?hid=%s" % hid ) for subpatt in patt.split(): tc.find(subpatt) def delete_history_item( self, hda_id, strings_displayed=[] ): @@ -483,7 +483,7 @@ hda_id = int( hda_id ) except: raise AssertionError, "Invalid hda_id '%s' - must be int" % hda_id - self.visit_url( "%s/root/delete?show_deleted_on_refresh=False&id=%s" % ( self.url, hda_id ) ) + self.visit_url( "%s/datasets/%s/delete?show_deleted_on_refresh=False" % ( self.url, self.security.encode_id( hda_id ) ) ) for check_str in strings_displayed: self.check_page_for_string( check_str ) def undelete_history_item( self, hda_id, strings_displayed=[] ): @@ -492,7 +492,7 @@ hda_id = int( hda_id ) except: raise AssertionError, "Invalid hda_id '%s' - must be int" % hda_id - self.visit_url( "%s/dataset/undelete?id=%s" % ( self.url, hda_id ) ) + self.visit_url( "%s/datasets/%s/undelete" % ( self.url, self.security.encode_id( hda_id ) ) ) for check_str in strings_displayed: self.check_page_for_string( check_str ) def display_history_item( self, hda_id, strings_displayed=[] ): @@ -511,7 +511,7 @@ strings_displayed=[], strings_not_displayed=[] ): """Edit history_dataset_association attribute information""" self.home() - self.visit_url( "%s/root/edit?id=%s" % ( self.url, hda_id ) ) + self.visit_url( "%s/datasets/%s/edit" % ( self.url, self.security.encode_id( hda_id ) ) ) submit_required = False self.check_page_for_string( 'Edit Attributes' ) if new_name: @@ -545,9 +545,9 @@ def auto_detect_metadata( self, hda_id ): """Auto-detect history_dataset_association metadata""" self.home() - self.visit_url( "%s/root/edit?id=%s" % ( self.url, hda_id ) ) + self.visit_url( "%s/datasets/%s/edit" % ( self.url, self.security.encode_id( hda_id ) ) ) self.check_page_for_string( 'This will inspect the dataset and attempt' ) - tc.fv( 'auto_detect', 'id', hda_id ) + tc.fv( 'auto_detect', 'detect', 'Auto-detect' ) tc.submit( 'detect' ) try: self.check_page_for_string( 'Attributes have been queued to be updated' ) @@ -559,7 +559,7 @@ def convert_format( self, hda_id, target_type ): """Convert format of history_dataset_association""" self.home() - self.visit_url( "%s/root/edit?id=%s" % ( self.url, hda_id ) ) + self.visit_url( "%s/datasets/%s/edit" % ( self.url, self.security.encode_id( hda_id ) ) ) self.check_page_for_string( 'This will inspect the dataset and attempt' ) tc.fv( 'convert_data', 'target_type', target_type ) tc.submit( 'convert_data' ) @@ -569,7 +569,7 @@ def change_datatype( self, hda_id, datatype ): """Change format of history_dataset_association""" self.home() - self.visit_url( "%s/root/edit?id=%s" % ( self.url, hda_id ) ) + self.visit_url( "%s/datasets/%s/edit" % ( self.url, self.security.encode_id( hda_id ) ) ) self.check_page_for_string( 'This will change the datatype of the existing dataset but' ) tc.fv( 'change_datatype', 'datatype', datatype ) tc.submit( 'change' ) --- a/test/functional/test_history_functions.py Mon Jul 25 20:55:42 2011 -0400 +++ b/test/functional/test_history_functions.py Tue Jul 26 13:30:07 2011 -0400 @@ -664,7 +664,7 @@ .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ) \ .first() self.home() - self.visit_url( "%s/root/delete?show_deleted_on_refresh=False&id=%s" % ( self.url, str( latest_hda.id ) ) ) + self.delete_history_item( str( latest_hda.id ) ) self.check_history_for_string( 'Your history is empty' ) self.home() self.visit_url( "%s/history/?show_deleted=True" % self.url ) Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
Bitbucket