1 new commit in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/changeset/ff62ddc66b1a/ changeset: ff62ddc66b1a user: dannon date: 2012-04-11 17:52:52 summary: Incremental display for tabular datatypes. affected #: 9 files diff -r 60061d1a369635facf858766c7a992272b806750 -r ff62ddc66b1a454e9b3ba11297c0bd8e2a74dc8d lib/galaxy/datatypes/binary.py --- a/lib/galaxy/datatypes/binary.py +++ b/lib/galaxy/datatypes/binary.py @@ -34,6 +34,17 @@ """Returns the mime type of the datatype""" return 'application/octet-stream' + def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, size=None, offset=None, **kwd): + trans.response.set_content_type(dataset.get_mime()) + trans.log_event( "Display dataset id: %s" % str( dataset.id ) ) + trans.response.headers['Content-Length'] = int( os.stat( dataset.file_name ).st_size ) + to_ext = dataset.extension + valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' + fname = ''.join(c in valid_chars and c or '_' for c in dataset.name)[0:150] + trans.response.set_content_type( "application/octet-stream" ) #force octet-stream so Safari doesn't append mime extensions to filename + trans.response.headers["Content-Disposition"] = 'attachment; filename="Galaxy%s-[%s].%s"' % (dataset.hid, fname, to_ext) + return open( dataset.file_name ) + class Ab1( Binary ): """Class describing an ab1 binary sequence file""" file_ext = "ab1" diff -r 60061d1a369635facf858766c7a992272b806750 -r ff62ddc66b1a454e9b3ba11297c0bd8e2a74dc8d lib/galaxy/datatypes/data.py --- a/lib/galaxy/datatypes/data.py +++ b/lib/galaxy/datatypes/data.py @@ -39,7 +39,7 @@ 'test' >>> type( DataTest.metadata_spec.test.param ) <class 'galaxy.datatypes.metadata.MetadataParameter'> - + """ __metaclass__ = DataMeta # Add metadata elements @@ -60,7 +60,7 @@ primary_file_name = 'index' #A per datatype setting (inherited): max file size (in bytes) for setting optional metadata _max_optional_metadata_filesize = None - + def __init__(self, **kwd): """Initialize the datatype""" object.__init__(self, **kwd) @@ -118,7 +118,7 @@ to_check = dataset.metadata.items() for key, value in to_check: if key in skip or ( not check and dataset.metadata.spec[key].get( "optional" ) ): - continue #we skip check for optional and nonrequested values here + continue #we skip check for optional and nonrequested values here if not value: return True return False @@ -142,6 +142,7 @@ else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' + def display_peek(self, dataset ): """Create HTML table, used for displaying peek""" out = ['<table cellspacing="0" cellpadding="3">'] @@ -163,6 +164,158 @@ except Exception, exc: out = "Can't create peek %s" % str( exc ) return out + + def _archive_composite_dataset( self, trans, data=None, **kwd ): + # save a composite object into a compressed archive for downloading + params = util.Params( kwd ) + valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' + outfname = data.name[0:150] + outfname = ''.join(c in valid_chars and c or '_' for c in outfname) + if (params.do_action == None): + params.do_action = 'zip' # default + msg = util.restore_text( params.get( 'msg', '' ) ) + messagetype = params.get( 'messagetype', 'done' ) + if not data: + msg = "You must select at least one dataset" + messagetype = 'error' + else: + error = False + try: + if (params.do_action == 'zip'): + # Can't use mkstemp - the file must not exist first + tmpd = tempfile.mkdtemp() + tmpf = os.path.join( tmpd, 'library_download.' + params.do_action ) + if ziptype == '64': + archive = zipfile.ZipFile( tmpf, 'w', zipfile.ZIP_DEFLATED, True ) + else: + archive = zipfile.ZipFile( tmpf, 'w', zipfile.ZIP_DEFLATED ) + archive.add = lambda x, y: archive.write( x, y.encode('CP437') ) + elif params.do_action == 'tgz': + archive = util.streamball.StreamBall( 'w|gz' ) + elif params.do_action == 'tbz': + archive = util.streamball.StreamBall( 'w|bz2' ) + except (OSError, zipfile.BadZipFile): + error = True + log.exception( "Unable to create archive for download" ) + msg = "Unable to create archive for %s for download, please report this error" % outfname + messagetype = 'error' + if not error: + current_user_roles = trans.get_current_user_roles() + ext = data.extension + path = data.file_name + fname = os.path.split(path)[-1] + efp = data.extra_files_path + htmlname = os.path.splitext(outfname)[0] + if not htmlname.endswith(ext): + htmlname = '%s_%s' % (htmlname,ext) + archname = '%s.html' % htmlname # fake the real nature of the html file + try: + archive.add(data.file_name,archname) + except IOError: + error = True + log.exception( "Unable to add composite parent %s to temporary library download archive" % data.file_name) + msg = "Unable to create archive for download, please report this error" + messagetype = 'error' + for root, dirs, files in os.walk(efp): + for fname in files: + fpath = os.path.join(root,fname) + rpath = os.path.relpath(fpath,efp) + try: + archive.add( fpath,rpath ) + except IOError: + error = True + log.exception( "Unable to add %s to temporary library download archive" % rpath) + msg = "Unable to create archive for download, please report this error" + messagetype = 'error' + continue + if not error: + if params.do_action == 'zip': + archive.close() + tmpfh = open( tmpf ) + # CANNOT clean up - unlink/rmdir was always failing because file handle retained to return - must rely on a cron job to clean up tmp + trans.response.set_content_type( "application/x-zip-compressed" ) + trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.zip"' % outfname + return tmpfh + else: + trans.response.set_content_type( "application/x-tar" ) + outext = 'tgz' + if params.do_action == 'tbz': + outext = 'tbz' + trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.%s"' % (outfname,outext) + archive.wsgi_status = trans.response.wsgi_status() + archive.wsgi_headeritems = trans.response.wsgi_headeritems() + return archive.stream + return trans.show_error_message( msg ) + + def _serve_raw(self, trans, dataset, to_ext): + trans.response.headers['Content-Length'] = int( os.stat( dataset.file_name ).st_size ) + valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' + fname = ''.join(c in valid_chars and c or '_' for c in dataset.name)[0:150] + trans.response.set_content_type( "application/octet-stream" ) #force octet-stream so Safari doesn't append mime extensions to filename + trans.response.headers["Content-Disposition"] = 'attachment; filename="Galaxy%s-[%s].%s"' % (dataset.hid, fname, to_ext) + return open( dataset.file_name ) + + def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, size=None, offset=None, **kwd): + """ Old display method, for transition """ + #Relocate all composite datatype display to a common location. + composite_extensions = trans.app.datatypes_registry.get_composite_extensions( ) + composite_extensions.append('html') # for archiving composite datatypes + if isinstance( dataset, basestring ): + return dataset + if filename and filename != "index": + # For files in extra_files_path + file_path = trans.app.object_store.get_filename(dataset, extra_dir='dataset_%s_files' % dataset.id, alt_name=filename) + if os.path.exists( file_path ): + if os.path.isdir( file_path ): + return trans.show_error_message( "Directory listing is not allowed." ) #TODO: Reconsider allowing listing of directories? + mime, encoding = mimetypes.guess_type( file_path ) + if not mime: + try: + mime = trans.app.datatypes_registry.get_mimetype_by_extension( ".".split( file_path )[-1] ) + except: + mime = "text/plain" + trans.response.set_content_type( mime ) + return open( file_path ) + else: + return trans.show_error_message( "Could not find '%s' on the extra files path %s." % ( filename, file_path ) ) + trans.response.set_content_type(dataset.get_mime()) + trans.log_event( "Display dataset id: %s" % str( dataset.id ) ) + from galaxy import datatypes #DBTODO REMOVE THIS AT REFACTOR + if to_ext or isinstance(dataset.datatype, datatypes.binary.Binary): # Saving the file, or binary file + if dataset.extension in composite_extensions: + return self._archive_composite_dataset( trans, dataset, **kwd ) + else: + trans.response.headers['Content-Length'] = int( os.stat( dataset.file_name ).st_size ) + if not to_ext: + to_ext = dataset.extension + valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' + fname = ''.join(c in valid_chars and c or '_' for c in dataset.name)[0:150] + trans.response.set_content_type( "application/octet-stream" ) #force octet-stream so Safari doesn't append mime extensions to filename + trans.response.headers["Content-Disposition"] = 'attachment; filename="Galaxy%s-[%s].%s"' % (dataset.hid, fname, to_ext) + return open( dataset.file_name ) + if not os.path.exists( dataset.file_name ): + raise paste.httpexceptions.HTTPNotFound( "File Not Found (%s)." % dataset.file_name ) + max_peek_size = 1000000 # 1 MB + if isinstance(dataset.datatype, datatypes.images.Html): + max_peek_size = 10000000 # 10 MB for html + if not preview or isinstance(dataset.datatype, datatypes.images.Image) or os.stat( dataset.file_name ).st_size < max_peek_size: + if trans.app.config.sanitize_all_html and trans.response.get_content_type() == "text/html": + # Sanitize anytime we respond with plain text/html content. + return sanitize_html(open( dataset.file_name ).read()) + return open( dataset.file_name ) + else: + trans.response.set_content_type( "text/html" ) + return trans.stream_template_mako( "/dataset/large_file.mako", + truncated_data = open( dataset.file_name ).read(max_peek_size), + data = dataset ) + """Returns dataset contents for display. + This allows for customization of subtype displays""" + file_path = trans.app.object_store.get_filename(dataset, extra_dir='dataset_%s_files' % dataset.id, alt_name=filename) + if size: + return open(dataset.file_path).read(size) + else: + open(dataset.file_path) + def display_name(self, dataset): """Returns formatted html of dataset name""" try: @@ -183,11 +336,11 @@ info = info.replace( '\r', '<br/>' ) if info.find( '\n' ) >= 0: info = info.replace( '\n', '<br/>' ) - + # Convert to unicode to display non-ascii characters. if type( info ) is not unicode: info = unicode( info, 'utf-8') - + return info except: return "info unavailable" @@ -272,7 +425,7 @@ def convert_dataset(self, trans, original_dataset, target_type, return_output=False, visible=True, deps=None, set_output_history=True): """This function adds a job to the queue to convert a dataset to another type. Returns a message about success/failure.""" converter = trans.app.datatypes_registry.get_converter_by_target_type( original_dataset.ext, target_type ) - + if converter is None: raise Exception( "A converter does not exist for %s to %s." % ( original_dataset.ext, target_type ) ) #Generate parameter dictionary @@ -284,7 +437,7 @@ params[value.name] = deps[value.name] elif value.type == 'data': input_name = key - + params[input_name] = original_dataset #Run converter, job is dispatched through Queue converted_dataset = converter.execute( trans, incoming=params, set_output_hid=visible, set_output_history=set_output_history)[1] @@ -351,18 +504,18 @@ @property def has_resolution(self): return False - - - def merge( split_files, output_file): + + + def merge( split_files, output_file): """ TODO: Do we need to merge gzip files using gzjoin? cat seems to work, but might be brittle. Need to revisit this. """ if len(split_files) == 1: - cmd = 'mv -f %s %s' % ( split_files[0], output_file ) + cmd = 'mv -f %s %s' % ( split_files[0], output_file ) else: - cmd = 'cat %s > %s' % ( ' '.join(split_files), output_file ) + cmd = 'cat %s > %s' % ( ' '.join(split_files), output_file ) result = os.system(cmd) if result != 0: raise Exception('Result %s from %s' % (result, cmd)) @@ -377,7 +530,7 @@ def write_from_stream(self, dataset, stream): """Writes data from a stream""" - # write it twice for now + # write it twice for now fd, temp_name = tempfile.mkstemp() while 1: chunk = stream.read(1048576) @@ -468,11 +621,11 @@ """ if split_params is None: return - + if len(input_datasets) > 1: raise Exception("Text file splitting does not support multiple files") input_files = [ds.file_name for ds in input_datasets] - + lines_per_file = None chunk_size = None if split_params['split_mode'] == 'number_of_parts': @@ -501,7 +654,7 @@ chunk_size = int(split_params['split_size']) else: raise Exception('Unsupported split mode %s' % split_params['split_mode']) - + f = open(input_files[0], 'rt') try: chunk_idx = 0 @@ -562,7 +715,7 @@ def get_file_peek( file_name, is_multi_byte=False, WIDTH=256, LINE_COUNT=5, skipchars=[] ): """ Returns the first LINE_COUNT lines wrapped to WIDTH - + ## >>> fname = get_test_fname('4.bed') ## >>> get_file_peek(fname) ## 'chr22 30128507 31828507 uc003bnx.1_cds_2_0_chr22_29227_f 0 +\n' @@ -601,11 +754,12 @@ lines.append( line ) count += 1 temp.close() - if file_type in [ 'gzipped', 'binary' ]: - text = "%s file" % file_type + if file_type in [ 'gzipped', 'binary' ]: + text = "%s file" % file_type else: try: text = unicode( '\n'.join( lines ), 'utf-8' ) except UnicodeDecodeError: text = "binary/unknown file" return text + diff -r 60061d1a369635facf858766c7a992272b806750 -r ff62ddc66b1a454e9b3ba11297c0bd8e2a74dc8d lib/galaxy/datatypes/tabular.py --- a/lib/galaxy/datatypes/tabular.py +++ b/lib/galaxy/datatypes/tabular.py @@ -13,11 +13,13 @@ from galaxy.datatypes.metadata import MetadataElement import galaxy_utils.sequence.vcf from sniff import * +from galaxy.util.json import to_json_string log = logging.getLogger(__name__) class Tabular( data.Text ): """Tab delimited data""" + CHUNK_SIZE = 20000 """Add metadata elements""" MetadataElement( name="comment_lines", default=0, desc="Number of comment lines", readonly=False, optional=True, no_value=0 ) @@ -33,15 +35,15 @@ that contain numerical values in the dataset. A skip parameter is used because various tabular data types reuse this function, and their data type classes are responsible to determine how many invalid - comment lines should be skipped. Using None for skip will cause skip - to be zero, but the first line will be processed as a header. A - max_data_lines parameter is used because various tabular data types - reuse this function, and their data type classes are responsible to + comment lines should be skipped. Using None for skip will cause skip + to be zero, but the first line will be processed as a header. A + max_data_lines parameter is used because various tabular data types + reuse this function, and their data type classes are responsible to determine how many data lines should be processed to ensure that the - non-optional metadata parameters are properly set; if used, optional - metadata parameters will be set to None, unless the entire file has - already been read. Using None (default) for max_data_lines will - process all data lines. + non-optional metadata parameters are properly set; if used, optional + metadata parameters will be set to None, unless the entire file has + already been read. Using None (default) for max_data_lines will + process all data lines. Items of interest: 1. We treat 'overwrite' as always True (we always want to set tabular metadata when called). @@ -58,7 +60,7 @@ column_type_set_order = [ 'int', 'float', 'list', 'str' ] #Order to set column types in default_column_type = column_type_set_order[-1] # Default column type is lowest in list column_type_compare_order = list( column_type_set_order ) #Order to compare column types - column_type_compare_order.reverse() + column_type_compare_order.reverse() def type_overrules_type( column_type1, column_type2 ): if column_type1 is None or column_type1 == column_type2: return False @@ -75,13 +77,13 @@ try: int( column_text ) return True - except: + except: return False def is_float( column_text ): try: float( column_text ) return True - except: + except: if column_text.strip().lower() == 'na': return True #na is special cased to be a float return False @@ -126,7 +128,7 @@ if type_overrules_type( column_type, column_types[field_count] ): column_types[field_count] = column_type if i == 0 and requested_skip is None: - # This is our first line, people seem to like to upload files that have a header line, but do not + # This is our first line, people seem to like to upload files that have a header line, but do not # start with '#' (i.e. all column types would then most likely be detected as str). We will assume # that the first line is always a header (this was previous behavior - it was always skipped). When # the requested skip is None, we only use the data from the first line if we have no other data for @@ -148,7 +150,7 @@ break i += 1 dataset_fh.close() - + #we error on the larger number of columns #first we pad our column_types by using data from first line if len( first_line_column_types ) > len( column_types ): @@ -177,6 +179,7 @@ except Exception, exc: out = "Can't create peek %s" % str( exc ) return out + def make_html_peek_header( self, dataset, skipchars=[], column_names=[], column_number_format='%s', column_parameter_alias={}, **kwargs ): out = [] try: @@ -212,6 +215,7 @@ except Exception, exc: raise Exception, "Can't create peek header %s" % str( exc ) return "".join( out ) + def make_html_peek_rows( self, dataset, skipchars=[], **kwargs ): out = [] try: @@ -233,6 +237,28 @@ except Exception, exc: raise Exception, "Can't create peek rows %s" % str( exc ) return "".join( out ) + + def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, chunk=None): + #TODO Prevent failure when displaying extremely long > 50kb lines. + if to_ext: + return self._serve_raw(trans, dataset, to_ext) + if chunk: + ck_index = int(chunk) + f = open(dataset.file_name) + f.seek(ck_index * self.CHUNK_SIZE) + # If we aren't at the start of the file, seek to next newline. Do this better eventually. + if f.tell() != 0: + cursor = f.read(1) + while cursor and cursor != '\n': + cursor = f.read(1) + ck_data = f.read(self.CHUNK_SIZE) + cursor = f.read(1) + while cursor and ck_data[-1] != '\n': + ck_data += cursor + cursor = f.read(1) + return to_json_string({'ck_data': ck_data, 'ck_index': ck_index+1}) + return trans.fill_template( "/dataset/tabular_chunked.mako",dataset = dataset) + def set_peek( self, dataset, line_count=None, is_multi_byte=False): super(Tabular, self).set_peek( dataset, line_count=line_count, is_multi_byte=is_multi_byte) if dataset.metadata.comment_lines: @@ -281,7 +307,7 @@ def sniff( self, filename ): """ Determines whether the file is in SAM format - + A file in SAM format consists of lines of tab-separated data. The following header line may be the first line: @QNAME FLAG RNAME POS MAPQ CIGAR MRNM MPOS ISIZE SEQ QUAL @@ -290,12 +316,12 @@ Data in the OPT column is optional and can consist of tab-separated data For complete details see http://samtools.sourceforge.net/SAM1.pdf - + Rules for sniffing as True: There must be 11 or more columns of data on each line Columns 2 (FLAG), 4(POS), 5 (MAPQ), 8 (MPOS), and 9 (ISIZE) must be numbers (9 can be negative) We will only check that up to the first 5 alignments are correctly formatted. - + >>> fname = get_test_fname( 'sequence.maf' ) >>> Sam().sniff( fname ) False @@ -311,7 +337,7 @@ line = line.strip() if not line: break #EOF - if line: + if line: if line[0] != '@': linePieces = line.split('\t') if len(linePieces) < 11: @@ -373,10 +399,10 @@ if result != 0: raise Exception('Result %s from %s' % (result, cmd)) merge = staticmethod(merge) - + def get_track_type( self ): return "ReadTrack", {"data": "bam", "index": "summary_tree"} - + class Pileup( Tabular ): """Tab delimited data in pileup (6- or 10-column) format""" file_ext = "pileup" @@ -402,7 +428,7 @@ """ Checks for 'pileup-ness' - There are two main types of pileup: 6-column and 10-column. For both, + There are two main types of pileup: 6-column and 10-column. For both, the first three and last two columns are the same. We only check the first three to allow for some personalization of the format. @@ -436,27 +462,27 @@ class ElandMulti( Tabular ): file_ext = 'elandmulti' - + def sniff( self, filename ): return False - + class Vcf( Tabular ): """ Variant Call Format for describing SNPs and other simple genome variations. """ - + file_ext = 'vcf' column_names = [ 'Chrom', 'Pos', 'ID', 'Ref', 'Alt', 'Qual', 'Filter', 'Info', 'Format', 'data' ] - + MetadataElement( name="columns", default=10, desc="Number of columns", readonly=True, visible=False ) MetadataElement( name="column_types", default=['str','int','str','str','str','int','str','list','str','str'], param=metadata.ColumnTypesParameter, desc="Column types", readonly=True, visible=False ) MetadataElement( name="viz_filter_cols", desc="Score column for visualization", default=[5], param=metadata.ColumnParameter, multiple=True ) - + def sniff( self, filename ): headers = get_headers( filename, '\n', count=1 ) return headers[0][0].startswith("##fileformat=VCF") def display_peek( self, dataset ): """Returns formated html of peek""" return Tabular.make_html_table( self, dataset, column_names=self.column_names ) - + def get_track_type( self ): return "VcfTrack", {"data": "tabix", "index": "summary_tree"} @@ -500,10 +526,10 @@ def sniff( self, filename ): """ Determines whether the file is in ELAND export format - + A file in ELAND export format consists of lines of tab-separated data. There is no header. - + Rules for sniffing as True: There must be 22 columns on each line LANE, TILEm X, Y, INDEX, READ_NO, SEQ, QUAL, POSITION, *STRAND, FILT must be correct @@ -522,7 +548,7 @@ line = line.strip() if not line: break #EOF - if line: + if line: linePieces = line.split('\t') if len(linePieces) != 22: return False @@ -568,7 +594,7 @@ #else: # # Otherwise, read the whole thing and set num data lines. for i, line in enumerate(dataset_fh): - if line: + if line: linePieces = line.split('\t') if len(linePieces) != 22: raise Exception('%s:%d:Corrupt line!' % (dataset.file_name,i)) @@ -586,5 +612,5 @@ dataset.metadata.tiles = ["%04d" % int(t) for t in tiles.keys()] dataset.metadata.barcodes = filter(lambda x: x != '0', barcodes.keys()) + ['NoIndex' for x in barcodes.keys() if x == '0'] dataset.metadata.reads = reads.keys() - - + + diff -r 60061d1a369635facf858766c7a992272b806750 -r ff62ddc66b1a454e9b3ba11297c0bd8e2a74dc8d lib/galaxy/web/controllers/dataset.py --- a/lib/galaxy/web/controllers/dataset.py +++ b/lib/galaxy/web/controllers/dataset.py @@ -1,17 +1,22 @@ -import logging, os, string, shutil, re, socket, mimetypes, urllib, tempfile, zipfile, glob, sys +import logging +import mimetypes +import os +import string +import sys +import tempfile +import urllib +import zipfile from galaxy.web.base.controller import * from galaxy.web.framework.helpers import time_ago, iff, grids -from galaxy import util, datatypes, jobs, web, model -from cgi import escape, FieldStorage +from galaxy import util, datatypes, web, model from galaxy.datatypes.display_applications.util import encode_dataset_user, decode_dataset_user from galaxy.util.sanitize_html import sanitize_html from galaxy.util import inflector from galaxy.model.item_attrs import * -from galaxy.model import LibraryDatasetDatasetAssociation, HistoryDatasetAssociation from galaxy.web.framework.helpers import to_unicode -import pkg_resources; +import pkg_resources; pkg_resources.require( "Paste" ) import paste.httpexceptions @@ -32,7 +37,7 @@ except RuntimeError: log.exception( "Compression error when testing zip compression. This option will be disabled for library downloads." ) except (TypeError, zipfile.LargeZipFile): # ZIP64 is only in Python2.5+. Remove TypeError when 2.4 support is dropped - log.warning( 'Max zip file size is 2GB, ZIP64 not supported' ) + log.warning( 'Max zip file size is 2GB, ZIP64 not supported' ) comptypes.append( 'zip' ) try: os.unlink( tmpf ) @@ -53,7 +58,7 @@ ----------------------------------------------------------------------------- You should be able to view the history containing the related history item -${hid}: ${history_item_name} +${hid}: ${history_item_name} by logging in as a Galaxy admin user to the Galaxy instance referenced above and pointing your browser to the following link. @@ -90,7 +95,7 @@ class HistoryColumn( grids.GridColumn ): def get_value( self, trans, grid, hda): return hda.history.name - + class StatusColumn( grids.GridColumn ): def get_value( self, trans, grid, hda ): if hda.deleted: @@ -111,19 +116,19 @@ template='/dataset/grid.mako' default_sort_key = "-update_time" columns = [ - grids.TextColumn( "Name", key="name", + grids.TextColumn( "Name", key="name", # Link name to dataset's history. link=( lambda item: iff( item.history.deleted, None, dict( operation="switch", id=item.id ) ) ), filterable="advanced", attach_popup=True ), - HistoryColumn( "History", key="history", + HistoryColumn( "History", key="history", link=( lambda item: iff( item.history.deleted, None, dict( operation="switch_history", id=item.id ) ) ) ), grids.IndividualTagsColumn( "Tags", key="tags", model_tag_association_class=model.HistoryDatasetAssociationTagAssociation, filterable="advanced", grid_name="HistoryDatasetAssocationListGrid" ), StatusColumn( "Status", key="deleted", attach_popup=False ), grids.GridColumn( "Last Updated", key="update_time", format=time_ago ), ] - columns.append( - grids.MulticolFilterColumn( - "Search", - cols_to_filter=[ columns[0], columns[2] ], + columns.append( + grids.MulticolFilterColumn( + "Search", + cols_to_filter=[ columns[0], columns[2] ], key="free-text-search", visible=False, filterable="standard" ) ) operations = [ @@ -136,17 +141,17 @@ num_rows_per_page = 50 def build_initial_query( self, trans, **kwargs ): # Show user's datasets that are not deleted, not in deleted histories, and not hidden. - # To filter HDAs by user, need to join model class/HDA and History table so that it is - # possible to filter by user. However, for dictionary-based filtering to work, need a + # To filter HDAs by user, need to join model class/HDA and History table so that it is + # possible to filter by user. However, for dictionary-based filtering to work, need a # primary table for the query. return trans.sa_session.query( self.model_class ).select_from( self.model_class.table.join( model.History.table ) ) \ .filter( model.History.user == trans.user ) \ .filter( self.model_class.deleted==False ) \ .filter( model.History.deleted==False ) \ .filter( self.model_class.visible==True ) - + class DatasetInterface( BaseUIController, UsesAnnotations, UsesHistory, UsesHistoryDatasetAssociation, UsesItemRatings ): - + stored_list_grid = HistoryDatasetAssociationListGrid() @web.expose @@ -202,7 +207,7 @@ job_stdout=job.stdout, job_info=job.info, job_traceback=job.traceback, - email=email, + email=email, message=message ) frm = to_address # Check email a bit @@ -219,130 +224,45 @@ return trans.show_ok_message( "Your error report has been sent" ) except Exception, e: return trans.show_error_message( "An error occurred sending the report by email: %s" % str( e ) ) - + @web.expose def default(self, trans, dataset_id=None, **kwd): return 'This link may not be followed from within Galaxy.' - - @web.expose - def archive_composite_dataset( self, trans, data=None, **kwd ): - # save a composite object into a compressed archive for downloading - params = util.Params( kwd ) - valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' - outfname = data.name[0:150] - outfname = ''.join(c in valid_chars and c or '_' for c in outfname) - if (params.do_action == None): - params.do_action = 'zip' # default - msg = util.restore_text( params.get( 'msg', '' ) ) - messagetype = params.get( 'messagetype', 'done' ) - if not data: - msg = "You must select at least one dataset" - messagetype = 'error' - else: - error = False - try: - if (params.do_action == 'zip'): - # Can't use mkstemp - the file must not exist first - tmpd = tempfile.mkdtemp() - tmpf = os.path.join( tmpd, 'library_download.' + params.do_action ) - if ziptype == '64': - archive = zipfile.ZipFile( tmpf, 'w', zipfile.ZIP_DEFLATED, True ) - else: - archive = zipfile.ZipFile( tmpf, 'w', zipfile.ZIP_DEFLATED ) - archive.add = lambda x, y: archive.write( x, y.encode('CP437') ) - elif params.do_action == 'tgz': - archive = util.streamball.StreamBall( 'w|gz' ) - elif params.do_action == 'tbz': - archive = util.streamball.StreamBall( 'w|bz2' ) - except (OSError, zipfile.BadZipFile): - error = True - log.exception( "Unable to create archive for download" ) - msg = "Unable to create archive for %s for download, please report this error" % outfname - messagetype = 'error' - if not error: - current_user_roles = trans.get_current_user_roles() - ext = data.extension - path = data.file_name - fname = os.path.split(path)[-1] - efp = data.extra_files_path - htmlname = os.path.splitext(outfname)[0] - if not htmlname.endswith(ext): - htmlname = '%s_%s' % (htmlname,ext) - archname = '%s.html' % htmlname # fake the real nature of the html file - try: - archive.add(data.file_name,archname) - except IOError: - error = True - log.exception( "Unable to add composite parent %s to temporary library download archive" % data.file_name) - msg = "Unable to create archive for download, please report this error" - messagetype = 'error' - for root, dirs, files in os.walk(efp): - for fname in files: - fpath = os.path.join(root,fname) - rpath = os.path.relpath(fpath,efp) - try: - archive.add( fpath,rpath ) - except IOError: - error = True - log.exception( "Unable to add %s to temporary library download archive" % rpath) - msg = "Unable to create archive for download, please report this error" - messagetype = 'error' - continue - if not error: - if params.do_action == 'zip': - archive.close() - tmpfh = open( tmpf ) - # CANNOT clean up - unlink/rmdir was always failing because file handle retained to return - must rely on a cron job to clean up tmp - trans.response.set_content_type( "application/x-zip-compressed" ) - trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.zip"' % outfname - return tmpfh - else: - trans.response.set_content_type( "application/x-tar" ) - outext = 'tgz' - if params.do_action == 'tbz': - outext = 'tbz' - trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.%s"' % (outfname,outext) - archive.wsgi_status = trans.response.wsgi_status() - archive.wsgi_headeritems = trans.response.wsgi_headeritems() - return archive.stream - return trans.show_error_message( msg ) - + @web.expose def get_metadata_file(self, trans, hda_id, metadata_name): """ Allows the downloading of metadata files associated with datasets (eg. bai index for bam files) """ data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( trans.security.decode_id( hda_id ) ) if not data or not trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), data.dataset ): return trans.show_error_message( "You are not allowed to access this dataset" ) - + valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' fname = ''.join(c in valid_chars and c or '_' for c in data.name)[0:150] - + file_ext = data.metadata.spec.get(metadata_name).get("file_ext", metadata_name) trans.response.headers["Content-Type"] = "application/octet-stream" trans.response.headers["Content-Disposition"] = 'attachment; filename="Galaxy%s-[%s].%s"' % (data.hid, fname, file_ext) return open(data.metadata.get(metadata_name).file_name) - - def _check_dataset(self, trans, dataset_id): + + def _check_dataset(self, trans, hda_id): # DEPRECATION: We still support unencoded ids for backward compatibility try: - data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( trans.security.decode_id( dataset_id ) ) + data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( trans.security.decode_id( hda_id) ) if data is None: - raise ValueError( 'Invalid reference dataset id: %s.' % dataset_id ) + raise ValueError( 'Invalid reference dataset id: %s.' % hda_id) except: try: - data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( int( dataset_id ) ) + data = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( int( hda_id ) ) except: data = None if not data: - raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) ) + raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( hda_id ) ) if not trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), data.dataset ): return trans.show_error_message( "You are not allowed to access this dataset" ) - if data.state == trans.model.Dataset.states.UPLOAD: return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to view it." ) - return data - + @web.expose @web.json def transfer_status(self, trans, dataset_id, filename=None): @@ -352,7 +272,7 @@ if isinstance( data, basestring ): return data log.debug( "Checking transfer status for dataset %s..." % data.dataset.id ) - + # Pulling files in extra_files_path into cache is not handled via this # method but that's primarily because those files are typically linked to # through tool's output page anyhow so tying a JavaScript event that will @@ -361,63 +281,11 @@ return True else: return trans.app.object_store.file_ready(data.dataset) - + @web.expose - def display(self, trans, dataset_id=None, preview=False, filename=None, to_ext=None, **kwd): - """Catches the dataset id and displays file contents as directed""" - composite_extensions = trans.app.datatypes_registry.get_composite_extensions( ) - composite_extensions.append('html') # for archiving composite datatypes + def display(self, trans, dataset_id=None, preview=False, filename=None, to_ext=None, chunk=None, **kwd): data = self._check_dataset(trans, dataset_id) - if isinstance( data, basestring ): - return data - - if filename and filename != "index": - # For files in extra_files_path - file_path = trans.app.object_store.get_filename(data.dataset, extra_dir='dataset_%s_files' % data.dataset.id, alt_name=filename) - if os.path.exists( file_path ): - if os.path.isdir( file_path ): - return trans.show_error_message( "Directory listing is not allowed." ) #TODO: Reconsider allowing listing of directories? - mime, encoding = mimetypes.guess_type( file_path ) - if not mime: - try: - mime = trans.app.datatypes_registry.get_mimetype_by_extension( ".".split( file_path )[-1] ) - except: - mime = "text/plain" - trans.response.set_content_type( mime ) - return open( file_path ) - else: - return trans.show_error_message( "Could not find '%s' on the extra files path %s." % ( filename, file_path ) ) - - trans.response.set_content_type(data.get_mime()) - trans.log_event( "Display dataset id: %s" % str( dataset_id ) ) - - if to_ext or isinstance(data.datatype, datatypes.binary.Binary): # Saving the file, or binary file - if data.extension in composite_extensions: - return self.archive_composite_dataset( trans, data, **kwd ) - else: - trans.response.headers['Content-Length'] = int( os.stat( data.file_name ).st_size ) - if not to_ext: - to_ext = data.extension - valid_chars = '.,^_-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' - fname = ''.join(c in valid_chars and c or '_' for c in data.name)[0:150] - trans.response.set_content_type( "application/octet-stream" ) #force octet-stream so Safari doesn't append mime extensions to filename - trans.response.headers["Content-Disposition"] = 'attachment; filename="Galaxy%s-[%s].%s"' % (data.hid, fname, to_ext) - return open( data.file_name ) - if not os.path.exists( data.file_name ): - raise paste.httpexceptions.HTTPNotFound( "File Not Found (%s)." % data.file_name ) - max_peek_size = 1000000 # 1 MB - if isinstance(data.datatype, datatypes.images.Html): - max_peek_size = 10000000 # 10 MB for html - if not preview or isinstance(data.datatype, datatypes.images.Image) or os.stat( data.file_name ).st_size < max_peek_size: - if trans.app.config.sanitize_all_html and trans.response.get_content_type() == "text/html": - # Sanitize anytime we respond with plain text/html content. - return sanitize_html(open( data.file_name ).read()) - return open( data.file_name ) - else: - trans.response.set_content_type( "text/html" ) - return trans.stream_template_mako( "/dataset/large_file.mako", - truncated_data = open( data.file_name ).read(max_peek_size), - data = data ) + return data.datatype.display_data(trans, data, preview, filename, to_ext, chunk, **kwd) @web.expose def edit(self, trans, dataset_id=None, filename=None, hid=None, **kwd): @@ -443,7 +311,7 @@ # TODO: hid handling data = history.datasets[ int( hid ) - 1 ] id = None - elif dataset_id is not None: + elif dataset_id is not None: id = trans.app.security.decode_id( dataset_id ) data = trans.sa_session.query( self.app.model.HistoryDatasetAssociation ).get( id ) else: @@ -463,7 +331,7 @@ # permission. In this case, we'll reset this permission to the hda user's private role. manage_permissions_action = trans.app.security_agent.get_action( trans.app.security_agent.permitted_actions.DATASET_MANAGE_PERMISSIONS.action ) permissions = { manage_permissions_action : [ trans.app.security_agent.get_private_user_role( data.history.user ) ] } - trans.app.security_agent.set_dataset_permission( data.dataset, permissions ) + trans.app.security_agent.set_dataset_permission( data.dataset, permissions ) if trans.app.security_agent.can_access_dataset( current_user_roles, data.dataset ): if data.state == trans.model.Dataset.states.UPLOAD: return trans.show_error_message( "Please wait until this dataset finishes uploading before attempting to edit its metadata." ) @@ -600,7 +468,7 @@ refresh_frames=refresh_frames ) else: return trans.show_error_message( "You do not have permission to edit this dataset's ( id: %s ) information." % str( dataset_id ) ) - + @web.expose @web.require_login( "see all available datasets" ) def list( self, trans, **kwargs ): @@ -610,7 +478,7 @@ if 'operation' in kwargs: operation = kwargs['operation'].lower() hda_ids = util.listify( kwargs.get( 'id', [] ) ) - + # Display no message by default status, message = None, None @@ -630,15 +498,15 @@ if hdas: if operation == "switch" or operation == "switch_history": # Switch to a history that the HDA resides in. - + # Convert hda to histories. histories = [] for hda in hdas: histories.append( hda.history ) - + # Use history controller to switch the history. TODO: is this reasonable? status, message = trans.webapp.controllers['history']._list_switch( trans, histories ) - + # Current history changed, refresh history frame; if switching to a dataset, set hda seek. trans.template_context['refresh_frames'] = ['history'] if operation == "switch": @@ -648,35 +516,35 @@ # Copy a dataset to the current history. target_histories = [ trans.get_history() ] status, message = self._copy_datasets( trans, hda_ids, target_histories ) - + # Current history changed, refresh history frame. trans.template_context['refresh_frames'] = ['history'] # Render the list view return self.stored_list_grid( trans, status=status, message=message, **kwargs ) - + @web.expose def imp( self, trans, dataset_id=None, **kwd ): """ Import another user's dataset via a shared URL; dataset is added to user's current history. """ msg = "" - + # Set referer message. referer = trans.request.referer if referer is not "": referer_message = "<a href='%s'>return to the previous page</a>" % referer else: referer_message = "<a href='%s'>go to Galaxy's start page</a>" % url_for( '/' ) - + # Error checking. if not dataset_id: return trans.show_error_message( "You must specify a dataset to import. You can %s." % referer_message, use_panels=True ) - + # Do import. cur_history = trans.get_history( create=True ) status, message = self._copy_datasets( trans, [ dataset_id ], [ cur_history ], imported=True ) message = "Dataset imported. <br>You can <a href='%s'>start using the dataset</a> or %s." % ( url_for('/'), referer_message ) return trans.show_message( message, type=status, use_panels=True ) - + @web.expose @web.json @web.require_login( "use Galaxy datasets" ) @@ -685,7 +553,7 @@ dataset = self.get_dataset( trans, id, False, True ) return_dict = { "name" : dataset.name, "link" : url_for( action="display_by_username_and_slug", username=dataset.history.user.username, slug=trans.security.encode_id( dataset.id ) ) } return return_dict - + @web.expose def get_embed_html_async( self, trans, id ): """ Returns HTML for embedding a dataset in a page. """ @@ -698,7 +566,7 @@ def set_accessible_async( self, trans, id=None, accessible=False ): """ Does nothing because datasets do not have an importable/accessible attribute. This method could potentially set another attribute. """ return - + @web.expose @web.require_login( "rate items" ) @web.json @@ -713,7 +581,7 @@ dataset_rating = self.rate_item( rate_item, trans.get_user(), dataset, rating ) return self.get_ave_item_rating_data( trans.sa_session, dataset ) - + @web.expose def display_by_username_and_slug( self, trans, username, slug, filename=None, preview=True ): """ Display dataset by username and slug; because datasets do not yet have slugs, the slug is the dataset's id. """ @@ -722,10 +590,10 @@ # Filename used for composite types. if filename: return self.display( trans, dataset_id=slug, filename=filename) - + truncated, dataset_data = self.get_data( dataset, preview ) dataset.annotation = self.get_item_annotation_str( trans.sa_session, dataset.history.user, dataset ) - + # If data is binary or an image, stream without template; otherwise, use display template. # TODO: figure out a way to display images in display template. if isinstance(dataset.datatype, datatypes.binary.Binary) or isinstance(dataset.datatype, datatypes.images.Image) or isinstance(dataset.datatype, datatypes.images.Html): @@ -741,12 +609,12 @@ else: user_item_rating = 0 ave_item_rating, num_ratings = self.get_ave_item_rating_data( trans.sa_session, dataset ) - + return trans.fill_template_mako( "/dataset/display.mako", item=dataset, item_data=dataset_data, truncated=truncated, user_item_rating = user_item_rating, ave_item_rating=ave_item_rating, num_ratings=num_ratings ) else: raise web.httpexceptions.HTTPNotFound() - + @web.expose def get_item_content_async( self, trans, id ): """ Returns item content in HTML format. """ @@ -758,7 +626,7 @@ # Get annotation. dataset.annotation = self.get_item_annotation_str( trans.sa_session, trans.user, dataset ) return trans.stream_template_mako( "/dataset/item_content.mako", item=dataset, item_data=dataset_data, truncated=truncated ) - + @web.expose def annotate_async( self, trans, id, new_annotation=None, **kwargs ): dataset = self.get_dataset( trans, id, False, True ) @@ -770,7 +638,7 @@ self.add_item_annotation( trans.sa_session, trans.get_user(), dataset, new_annotation ) trans.sa_session.flush() return new_annotation - + @web.expose def get_annotation_async( self, trans, id ): dataset = self.get_dataset( trans, id, False, True ) @@ -841,7 +709,7 @@ if app_action in [ 'data', 'param' ]: assert action_param, "An action param must be provided for a data or param action" #data is used for things with filenames that could be passed off to a proxy - #in case some display app wants all files to be in the same 'directory', + #in case some display app wants all files to be in the same 'directory', #data can be forced to param, but not the other way (no filename for other direction) #get param name from url param name try: @@ -960,7 +828,7 @@ trans.log_event( "Dataset id %s has been unhidden" % str(id) ) return True return False - + def _purge( self, trans, dataset_id ): message = None status = 'done' @@ -1037,7 +905,7 @@ return "OK" else: raise Exception( message ) - + @web.expose def unhide( self, trans, dataset_id, filename ): if self._unhide( trans, dataset_id ): @@ -1070,7 +938,7 @@ """ Show the parameters used for an HDA """ - + def source_dataset_chain( dataset, lst ): try: cp_from_ldda = dataset.copied_from_library_dataset_dataset_association @@ -1084,13 +952,13 @@ except: pass return lst - + hda = trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( trans.security.decode_id( dataset_id ) ) if not hda: raise paste.httpexceptions.HTTPRequestRangeNotSatisfiable( "Invalid reference dataset id: %s." % str( dataset_id ) ) if not trans.app.security_agent.can_access_dataset( trans.get_current_user_roles(), hda.dataset ): return trans.show_error_message( "You are not allowed to access this dataset" ) - + # Get the associated job, if any. If this hda was copied from another, # we need to find the job that created the origial hda params_objects = None @@ -1102,7 +970,7 @@ job = None for assoc in job_hda.creating_job_associations: job = assoc.job - break + break if job: # Get the tool object try: @@ -1113,10 +981,10 @@ params_objects = job.get_param_values( trans.app ) except: pass - + inherit_chain = source_dataset_chain(hda, []) return trans.fill_template( "show_params.mako", inherit_chain=inherit_chain, history=trans.get_history(), hda=hda, tool=tool, params_objects=params_objects ) - + @web.expose def copy_datasets( self, trans, source_history=None, source_dataset_ids="", target_history_id=None, target_history_ids="", new_history_name="", do_copy=False, **kwd ): params = util.Params( kwd ) @@ -1175,7 +1043,7 @@ if history in target_histories: refresh_frames = ['history'] trans.sa_session.flush() - hist_names_str = ", ".join( ['<a href="%s" target="_top">%s</a>' % + hist_names_str = ", ".join( ['<a href="%s" target="_top">%s</a>' % ( url_for( controller="history", action="switch_to_history", \ hist_id=trans.security.encode_id( hist.id ) ), hist.name ) \ for hist in target_histories ] ) @@ -1186,7 +1054,7 @@ source_datasets = history.visible_datasets target_histories = [history] if user: - target_histories = user.active_histories + target_histories = user.active_histories return trans.fill_template( "/dataset/copy_view.mako", source_history = history, current_history = trans.get_history(), @@ -1204,7 +1072,7 @@ """ Helper method for copying datasets. """ user = trans.get_user() done_msg = error_msg = "" - + invalid_datasets = 0 if not dataset_ids or not target_histories: error_msg = "You must provide both source datasets and target histories." @@ -1229,7 +1097,7 @@ done_msg = "%i dataset%s copied to %i histor%s." % \ ( num_datasets_copied, iff( num_datasets_copied == 1, "", "s"), len( target_histories ), iff( len ( target_histories ) == 1, "y", "ies") ) trans.sa_session.refresh( history ) - + if error_msg != "": status = ERROR message = error_msg diff -r 60061d1a369635facf858766c7a992272b806750 -r ff62ddc66b1a454e9b3ba11297c0bd8e2a74dc8d static/june_2007_style/base.less --- a/static/june_2007_style/base.less +++ b/static/june_2007_style/base.less @@ -1700,5 +1700,22 @@ position: relative; } +// Dataset Display Styles + +#loading_indicator{ + position:fixed; + right:10px; + top:10px; + height:32px; + width:32px; + background:url(largespinner.gif); +} + +#content_table td{ + text-align:right; + white-space:nowrap; + padding:2px 10px; +} + @import "base_sprites"; diff -r 60061d1a369635facf858766c7a992272b806750 -r ff62ddc66b1a454e9b3ba11297c0bd8e2a74dc8d static/june_2007_style/blue/base.css --- a/static/june_2007_style/blue/base.css +++ b/static/june_2007_style/blue/base.css @@ -723,6 +723,8 @@ div.toolSectionBody div.toolPanelLabel{padding-top:5px;padding-bottom:5px;margin-left:16px;margin-right:10px;display:list-item;list-style:none outside;} div.toolTitleNoSection{padding-bottom:5px;font-weight:bold;} #tool-search{padding-top:5px;padding-bottom:10px;position:relative;} +#loading_indicator{position:fixed;right:10px;top:10px;height:32px;width:32px;background:url(largespinner.gif);} +#content_table td{text-align:right;white-space:nowrap;padding:2px 10px;} .icon-button.display{background:url(history-buttons.png) no-repeat 0px 0px;} .icon-button.display:hover{background:url(history-buttons.png) no-repeat 0px -26px;} .icon-button.display_disabled{background:url(history-buttons.png) no-repeat 0px -52px;} diff -r 60061d1a369635facf858766c7a992272b806750 -r ff62ddc66b1a454e9b3ba11297c0bd8e2a74dc8d static/june_2007_style/blue/largespinner.gif Binary file static/june_2007_style/blue/largespinner.gif has changed diff -r 60061d1a369635facf858766c7a992272b806750 -r ff62ddc66b1a454e9b3ba11297c0bd8e2a74dc8d templates/dataset/tabular_chunked.mako --- /dev/null +++ b/templates/dataset/tabular_chunked.mako @@ -0,0 +1,65 @@ +<%inherit file="/base.mako"/> + + +<%def name="title()">Dataset Display</%def> + +<%def name="javascripts()"> + ${parent.javascripts()} + <script type="text/javascript"> + var DATASET_URL = "${h.url_for( controller='/dataset', action='display', dataset_id=trans.security.encode_id( dataset.id ))}"; + var DATASET_COLS = ${dataset.metadata.columns}; + var current_chunk = 0; + + function fillTable(){ + if (current_chunk !== -1){ + var table = $('#content_table'); + $.getJSON(DATASET_URL, {chunk: current_chunk}, function (result) { + if (result.ck_data !== ""){ + var lines = result.ck_data.split('\n'); + $.each(lines, function(){ + var line = this; + var cells = line.split('\t'); + /* Check length of cells to ensure this is a complete row. */ + if (cells.length == DATASET_COLS){ + table.append('<tr><td>' + cells.join('</td><td>') + '</td></tr>'); + } + else{ + table.append('<tr><td colspan="'+ DATASET_COLS+ '">' + line + '</td></tr>'); + } + }); + current_chunk = result.ck_index; + } + else { + current_chunk = -1; + } + }); + } + } + + $(document).ready(function(){ + fillTable(); + $(window).scroll(function(){ + console.log($(window).scrollTop()); + console.log($(document).height()); + console.log($(window).height()); + // if ($(window).scrollTop() == $(document).height() - $(window).height()){ + if ($(document).height() - $(window).scrollTop() <= $(window).height()){ + fillTable(); + } + }); + $('#loading_indicator').ajaxStart(function(){ + $(this).show(); + }).ajaxStop(function(){ + $(this).hide(); + }); + }); + </script> +</%def> + +<%def name="stylesheets()"> + ${parent.stylesheets()} +</%def> + +<div id="loading_indicator" ></div> +<table id="content_table" cellpadding="0"> +</table> diff -r 60061d1a369635facf858766c7a992272b806750 -r ff62ddc66b1a454e9b3ba11297c0bd8e2a74dc8d templates/display_base.mako --- a/templates/display_base.mako +++ b/templates/display_base.mako @@ -361,4 +361,4 @@ </div></div> -</%def> \ No newline at end of file +</%def> Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.