commit/galaxy-central: 11 new changesets
11 new changesets in galaxy-central: http://bitbucket.org/galaxy/galaxy-central/changeset/c959d32f2405/ changeset: c959d32f2405 user: jdu...@Bruno-Proto6.illumina.com date: 2011-05-25 00:15:03 summary: Added new "multi" parallelism option for tools affected #: 8 files (-1 bytes) --- a/lib/galaxy/datatypes/data.py Tue May 24 05:33:07 2011 -0400 +++ b/lib/galaxy/datatypes/data.py Tue May 24 15:15:03 2011 -0700 @@ -1,4 +1,4 @@ -import logging, os, sys, time, tempfile +import logging, os, sys, time, tempfile, gzip from galaxy import util from galaxy.util.odict import odict from galaxy.util.bunch import Bunch @@ -351,6 +351,32 @@ @property def has_resolution(self): return False + + + + def merge( split_files, output_file): + """ + Export files are usually compressed, but it doesn't have to be so. In the case that they are, use + zcat to cat the files and gzip -c to recompress the result, otherwise use cat + TODO: Move to a faster gzjoin-based technique + """ + #TODO: every time I try to import this from sniff, the parser dies + def is_gzip( filename ): + temp = open( filename, "U" ) + magic_check = temp.read( 2 ) + temp.close() + if magic_check != util.gzip_magic: + return False + return True + + if len(split_files) == 1: + os.system( 'mv -f %s %s' % ( split_files[0], output_file ) ) + return + if is_gzip(split_files[0]): + os.system( 'zcat %s | gzip -c > %s' % ( ' '.join(split_files), output_file ) ) + else: + os.system( 'cat %s > %s' % ( ' '.join(split_files), output_file ) ) + merge = staticmethod(merge) class Text( Data ): file_ext = 'txt' @@ -446,6 +472,80 @@ dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' + def split( input_files, subdir_generator_function, split_params): + """ + Split the input files by line. + """ + if split_params is None: + return + + if len(input_files) > 1: + raise Exception("Text file splitting does not support multiple files") + + lines_per_file = None + chunk_size = None + if split_params['split_mode'] == 'number_of_parts': + lines_per_file = [] + # Computing the length is expensive! + def _file_len(fname): + i = 0 + f = open(fname) + for i, l in enumerate(f): + pass + f.close() + return i + 1 + length = _file_len(input_files[0]) + parts = int(split_params['split_size']) + if length < parts: + parts = length + len_each, remainder = divmod(length, parts) + while length > 0: + chunk = len_each + if remainder > 0: + chunk += 1 + lines_per_file.append(chunk) + remainder=- 1 + length -= chunk + elif split_params['split_mode'] == 'to_size': + chunk_size = int(split_params['split_size']) + else: + raise Exception('Unsupported split mode %s' % split_params['split_mode']) + + f = open(input_files[0], 'rt') + try: + chunk_idx = 0 + file_done = False + part_file = None + while not file_done: + if lines_per_file is None: + this_chunk_size = chunk_size + elif chunk_idx < len(lines_per_file): + this_chunk_size = lines_per_file[chunk_idx] + chunk_idx += 1 + lines_remaining = this_chunk_size + part_file = None + while lines_remaining > 0: + a_line = f.readline() + if a_line == '': + file_done = True + break + if part_file is None: + part_dir = subdir_generator_function() + part_path = os.path.join(part_dir, os.path.basename(input_files[0])) + part_file = open(part_path, 'w') + part_file.write(a_line) + lines_remaining -= 1 + if part_file is not None: + part_file.close() + except Exception, e: + log.error('Unable to split files: %s' % str(e)) + f.close() + if part_file is not None: + part_file.close() + raise + f.close() + split = staticmethod(split) + class Newick( Text ): pass --- a/lib/galaxy/datatypes/sequence.py Tue May 24 05:33:07 2011 -0400 +++ b/lib/galaxy/datatypes/sequence.py Tue May 24 15:15:03 2011 -0700 @@ -49,6 +49,95 @@ else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' + + def split( input_files, subdir_generator_function, split_params): + """ + FASTQ files are split on cluster boundaries, in increments of 4 lines + """ + if split_params is None: + return + + def split_one( input_file, get_dir, clusters_per_file, default_clusters=None): + in_file = open(input_file, 'rt') + part_file = None + part = 0 + if clusters_per_file is None: + local_clusters_per_file = [default_clusters] + else: + local_clusters_per_file = [x for x in clusters_per_file] + + for i, line in enumerate(in_file): + cluster_number, line_in_cluster = divmod(i, 4) + current_part, remainder = divmod(cluster_number, local_clusters_per_file[part]) + + if (current_part != part or part_file is None): + if (part_file): + part_file.close() + part = current_part + part_dir = get_dir() + part_path = os.path.join(part_dir, os.path.basename(input_file)) + part_file = open(part_path, 'w') + if clusters_per_file is None: + local_clusters_per_file.append(default_clusters) + part_file.write(line) + if (part_file): + part_file.close() + in_file.close() + local_clusters_per_file[part] = remainder + 1 + return local_clusters_per_file + + directories = [] + def create_subdir(): + dir = subdir_generator_function() + directories.append(dir) + return dir + + clusters_per_file = None + if split_params['split_mode'] == 'number_of_parts': + # legacy splitting. To keep things simple, just scan the 0th file and count the clusters, + # then split it + clusters_per_file = [] + in_file = open(input_files[0], 'rt') + for i, line in enumerate(in_file): + pass + in_file.close() + length = (i+1)/4 + + if length <= 0: + raise Exception('Invalid sequence file %s' % input_files[0]) + parts = int(split_params['split_size']) + if length < parts: + parts = length + len_each, remainder = divmod(length, parts) + while length > 0: + chunk = len_each + if remainder > 0: + chunk += 1 + clusters_per_file.append(chunk) + remainder=- 1 + length -= chunk + split_one(input_files[0], create_subdir, clusters_per_file) + elif split_params['split_mode'] == 'to_size': + # split one file and see what the cluster sizes turn out to be + clusters_per_file = split_one(input_files[0], create_subdir, None, int(split_params['split_size'])) + else: + raise Exception('Unsupported split mode %s' % split_params['split_mode']) + + # split the rest, using the same number of clusters for each file + current_dir_idx = [0] # use a list to get around Python 2.x lame closure support + def get_subdir(): + if len(directories) <= current_dir_idx[0]: + raise Exception('FASTQ files do not have the same number of clusters - splitting failed') + result = directories[current_dir_idx[0]] + current_dir_idx[0] = current_dir_idx[0] + 1 + return result + + for i in range(1, len(input_files)): + current_dir_idx[0] = 0 + split_one(input_files[i], get_subdir, clusters_per_file) + split = staticmethod(split) + + class Alignment( data.Text ): """Class describing an alignment""" --- a/lib/galaxy/jobs/__init__.py Tue May 24 05:33:07 2011 -0400 +++ b/lib/galaxy/jobs/__init__.py Tue May 24 15:15:03 2011 -0700 @@ -291,6 +291,7 @@ self.working_directory = \ os.path.join( self.app.config.job_working_directory, str( self.job_id ) ) self.output_paths = None + self.output_dataset_paths = None self.tool_provided_job_metadata = None # Wrapper holding the info required to restore and clean up from files used for setting metadata externally self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job ) @@ -657,23 +658,35 @@ def get_session_id( self ): return self.session_id + def get_input_dataset_fnames( self, ds ): + filenames = [] + filenames = [ ds.file_name ] + #we will need to stage in metadata file names also + #TODO: would be better to only stage in metadata files that are actually needed (found in command line, referenced in config files, etc.) + for key, value in ds.metadata.items(): + if isinstance( value, model.MetadataFile ): + filenames.append( value.file_name ) + return filenames + def get_input_fnames( self ): job = self.get_job() filenames = [] for da in job.input_datasets: #da is JobToInputDatasetAssociation object if da.dataset: - filenames.append( da.dataset.file_name ) - #we will need to stage in metadata file names also - #TODO: would be better to only stage in metadata files that are actually needed (found in command line, referenced in config files, etc.) - for key, value in da.dataset.metadata.items(): - if isinstance( value, model.MetadataFile ): - filenames.append( value.file_name ) + filenames.extend(self.get_input_dataset_fnames(da.dataset)) return filenames def get_output_fnames( self ): - if self.output_paths is not None: - return self.output_paths + if self.output_paths is None: + self.compute_outputs() + return self.output_paths + def get_output_datasets_and_fnames( self ): + if self.output_dataset_paths is None: + self.compute_outputs() + return self.output_dataset_paths + + def compute_outputs( self ) : class DatasetPath( object ): def __init__( self, dataset_id, real_path, false_path = None ): self.dataset_id = dataset_id @@ -688,19 +701,25 @@ job = self.get_job() # Job output datasets are combination of output datasets, library datasets, and jeha datasets. jeha = self.sa_session.query( model.JobExportHistoryArchive ).filter_by( job=job ).first() + jeha_false_path = None if self.app.config.outputs_to_working_directory: self.output_paths = [] + output_dataset_paths = {} for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets + job.output_library_datasets ]: false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % data.id ) ) - self.output_paths.append( DatasetPath( data.id, data.file_name, false_path ) ) + dsp = DatasetPath( data.id, data.file_name, false_path ) + self.output_paths.append( dsp ) + self.output_dataset_paths[name] = data, dsp if jeha: - false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % jeha.dataset.id ) ) - self.output_paths.append( DatasetPath( jeha.dataset.id, jeha.dataset.file_name, false_path ) ) + jeha_false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % jeha.dataset.id ) ) else: - self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets + job.output_library_datasets ] - if jeha: - self.output_paths.append( DatasetPath( jeha.dataset.id, jeha.dataset.file_name ) ) - + results = [ (da.name, da.dataset, DatasetPath( da.dataset.dataset.id, da.dataset.file_name )) for da in job.output_datasets + job.output_library_datasets ] + self.output_paths = [t[2] for t in results] + self.output_dataset_paths = dict([(t[0], t[1:]) for t in results]) + + if jeha: + dsp = DatasetPath( jeha.dataset.id, jeha.dataset.file_name, jeha_false_path ) + self.output_paths.append( dsp ) return self.output_paths def get_output_file_id( self, file ): @@ -807,12 +826,7 @@ def __init__(self, task, queue): super(TaskWrapper, self).__init__(task.job, queue) self.task_id = task.id - self.parallelism = None - if task.part_file: - #do this better - self.working_directory = os.path.dirname(task.part_file) - else: - self.working_directory = None + self.working_directory = task.working_directory self.status = task.states.NEW def get_job( self ): --- a/lib/galaxy/jobs/runners/tasks.py Tue May 24 05:33:07 2011 -0400 +++ b/lib/galaxy/jobs/runners/tasks.py Tue May 24 15:15:03 2011 -0700 @@ -60,71 +60,58 @@ if command_line: try: # DBTODO read tool info and use the right kind of parallelism. - # For now, the only splitter is the 'basic' one, n-ways split on one input, one output. - # This is incredibly simplified. Parallelism ultimately needs to describe which inputs, how, etc. + # For now, the only splitter is the 'basic' one job_wrapper.change_state( model.Job.states.RUNNING ) self.sa_session.flush() - parent_job = job_wrapper.get_job() # Split with the tool-defined method. - if job_wrapper.tool.parallelism == "basic": - from galaxy.jobs.splitters import basic - if len(job_wrapper.get_input_fnames()) > 1 or len(job_wrapper.get_output_fnames()) > 1: - log.error("The basic splitter is not capable of handling jobs with multiple inputs or outputs.") - job_wrapper.change_state( model.Job.states.ERROR ) - job_wrapper.fail("Job Splitting Failed, the basic splitter only handles tools with one input and one output") - # Requeue as a standard job? - return - input_file = job_wrapper.get_input_fnames()[0] - working_directory = job_wrapper.working_directory - # DBTODO execute an external task to do the splitting, this should happen at refactor. - # Regarding number of ways split, use "hints" in tool config? - # If the number of tasks is sufficiently high, we can use it to calculate job completion % and give a running status. - basic.split(input_file, working_directory, - 20, #Needs serious experimentation to find out what makes the most sense. - parent_job.input_datasets[0].dataset.ext) - # Tasks in this parts list are in alphabetical listdir order (15 before 5), but that should not matter. - parts = [os.path.join(os.path.abspath(job_wrapper.working_directory), p, os.path.basename(input_file)) - for p in os.listdir(job_wrapper.working_directory) - if p.startswith('task_')] - else: + try: + splitter = getattr(__import__('galaxy.jobs.splitters', globals(), locals(), [job_wrapper.tool.parallelism.method]), job_wrapper.tool.parallelism.method) + except: job_wrapper.change_state( model.Job.states.ERROR ) job_wrapper.fail("Job Splitting Failed, no match for '%s'" % job_wrapper.tool.parallelism) - # Assemble parts into task_wrappers + return + tasks = splitter.do_split(job_wrapper) # Not an option for now. Task objects don't *do* anything useful yet, but we'll want them tracked outside this thread to do anything. # if track_tasks_in_database: - tasks = [] task_wrappers = [] - for part in parts: - task = model.Task(parent_job, part) + for task in tasks: self.sa_session.add(task) - tasks.append(task) self.sa_session.flush() + # Must flush prior to the creation and queueing of task wrappers. for task in tasks: tw = TaskWrapper(task, job_wrapper.queue) task_wrappers.append(tw) self.app.job_manager.dispatcher.put(tw) tasks_incomplete = False + count_complete = 0 sleep_time = 1 + # sleep/loop until no more progress can be made. That is when + # all tasks are one of { OK, ERROR, DELETED } + completed_states = [ model.Task.states.OK, \ + model.Task.states.ERROR, \ + model.Task.states.DELETED ] + # TODO: Should we report an error (and not merge outputs) if one of the subtasks errored out? + # Should we prevent any that are pending from being started in that case? while tasks_incomplete is False: + count_complete = 0 tasks_incomplete = True for tw in task_wrappers: - if not tw.get_state() == model.Task.states.OK: + task_state = tw.get_state() + if not task_state in completed_states: tasks_incomplete = False - sleep( sleep_time ) - if sleep_time < 8: - sleep_time *= 2 - output_filename = job_wrapper.get_output_fnames()[0].real_path - basic.merge(working_directory, output_filename) - log.debug('execution finished: %s' % command_line) - for tw in task_wrappers: - # Prevent repetitive output, e.g. "Sequence File Aligned"x20 - # Eventually do a reduce for jobs that output "N reads mapped", combining all N for tasks. - if stdout.strip() != tw.get_task().stdout.strip(): - stdout += tw.get_task().stdout - if stderr.strip() != tw.get_task().stderr.strip(): - stderr += tw.get_task().stderr + else: + count_complete = count_complete + 1 + if tasks_incomplete is False: + # log.debug('Tasks complete: %s. Sleeping %s' % (count_complete, sleep_time)) + sleep( sleep_time ) + if sleep_time < 8: + sleep_time *= 2 + + log.debug('execution finished - beginning merge: %s' % command_line) + stdout, stderr = splitter.do_merge(job_wrapper, task_wrappers) + except Exception: job_wrapper.fail( "failure running job", exception=True ) log.exception("failure running job %d" % job_wrapper.job_id) --- a/lib/galaxy/jobs/splitters/basic.py Tue May 24 05:33:07 2011 -0400 +++ b/lib/galaxy/jobs/splitters/basic.py Tue May 24 15:15:03 2011 -0700 @@ -1,91 +1,23 @@ -import os, logging +import logging +import multi + log = logging.getLogger( __name__ ) -def _file_len(fname): - i = 0 - f = open(fname) - for i, l in enumerate(f): - pass - f.close() - return i + 1 +def set_basic_defaults(job_wrapper): + parent_job = job_wrapper.get_job() + job_wrapper.tool.parallelism.attributes['split_inputs'] = parent_job.input_datasets[0].name + job_wrapper.tool.parallelism.attributes['merge_outputs'] = job_wrapper.get_output_datasets_and_fnames().keys()[0] -def _fq_seq_count(fname): - count = 0 - f = open(fname) - for i, l in enumerate(f): - if l.startswith('@'): - count += 1 - f.close() - return count - -def split_fq(input_file, working_directory, parts): - # Temporary, switch this to use the fq reader in lib/galaxy_utils/sequence. - outputs = [] - length = _fq_seq_count(input_file) - if length < 1: - return outputs - if length < parts: - parts = length - len_each, remainder = divmod(length, parts) - f = open(input_file, 'rt') - for p in range(0, parts): - part_dir = os.path.join( os.path.abspath(working_directory), 'task_%s' % p) - if not os.path.exists( part_dir ): - os.mkdir( part_dir ) - part_path = os.path.join(part_dir, os.path.basename(input_file)) - part_file = open(part_path, 'w') - for l in range(0, len_each): - part_file.write(f.readline()) - part_file.write(f.readline()) - part_file.write(f.readline()) - part_file.write(f.readline()) - if remainder > 0: - part_file.write(f.readline()) - part_file.write(f.readline()) - part_file.write(f.readline()) - part_file.write(f.readline()) - remainder -= 1 - outputs.append(part_path) - part_file.close() - f.close() - return outputs - -def split_txt(input_file, working_directory, parts): - outputs = [] - length = _file_len(input_file) - if length < parts: - parts = length - len_each, remainder = divmod(length, parts) - f = open(input_file, 'rt') - for p in range(0, parts): - part_dir = os.path.join( os.path.abspath(working_directory), 'task_%s' % p) - if not os.path.exists( part_dir ): - os.mkdir( part_dir ) - part_path = os.path.join(part_dir, os.path.basename(input_file)) - part_file = open(part_path, 'w') - for l in range(0, len_each): - part_file.write(f.readline()) - if remainder > 0: - part_file.write(f.readline()) - remainder -= 1 - outputs.append(part_path) - part_file.close() - f.close() - return outputs +def do_split (job_wrapper): + if len(job_wrapper.get_input_fnames()) > 1 or len(job_wrapper.get_output_fnames()) > 1: + log.error("The basic splitter is not capable of handling jobs with multiple inputs or outputs.") + raise Exception, "Job Splitting Failed, the basic splitter only handles tools with one input and one output" + # add in the missing information for splitting the one input and merging the one output + set_basic_defaults(job_wrapper) + return multi.do_split(job_wrapper) -def split( input_file, working_directory, parts, file_type = None): - #Implement a better method for determining how to split. - if file_type.startswith('fastq'): - return split_fq(input_file, working_directory, parts) - else: - return split_txt(input_file, working_directory, parts) +def do_merge( job_wrapper, task_wrappers): + # add in the missing information for splitting the one input and merging the one output + set_basic_defaults(job_wrapper) + return multi.do_merge(job_wrapper, task_wrappers) -def merge( working_directory, output_file ): - output_file_name = os.path.basename(output_file) - task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] - task_dirs.sort(key = lambda x: int(x.split('task_')[-1])) - for task_dir in task_dirs: - try: - os.system( 'cat %s >> %s' % ( os.path.join(task_dir, output_file_name), output_file ) ) - except Exception, e: - log.error(str(e)) --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/jobs/splitters/multi.py Tue May 24 15:15:03 2011 -0700 @@ -0,0 +1,149 @@ +import os, logging, shutil +from galaxy import model + +log = logging.getLogger( __name__ ) + +def do_split (job_wrapper): + parent_job = job_wrapper.get_job() + working_directory = os.path.abspath(job_wrapper.working_directory) + + parallel_settings = job_wrapper.tool.parallelism.attributes + # Syntax: split_inputs="input1,input2" shared_inputs="genome" + # Designates inputs to be split or shared + split_inputs=parallel_settings.get("split_inputs") + if split_inputs is None: + split_inputs = [] + else: + split_inputs = [x.strip() for x in split_inputs.split(",")] + + shared_inputs=parallel_settings.get("shared_inputs") + if shared_inputs is None: + shared_inputs = [] + else: + shared_inputs = [x.strip() for x in shared_inputs.split(",")] + illegal_inputs = [x for x in shared_inputs if x in split_inputs] + if len(illegal_inputs) > 0: + raise Exception("Inputs have conflicting parallelism attributes: %s" % str( illegal_inputs )) + + subdir_index = [0] # use a list to get around Python 2.x lame closure support + task_dirs = [] + def get_new_working_directory_name(): + dir=os.path.join(working_directory, 'task_%d' % subdir_index[0]) + subdir_index[0] = subdir_index[0] + 1 + if not os.path.exists(dir): + os.makedirs(dir) + task_dirs.append(dir) + return dir + + # For things like paired end alignment, we need two inputs to be split. Since all inputs to all + # derived subtasks need to be correlated, allow only one input type to be split + type_to_input_map = {} + for input in parent_job.input_datasets: + if input.name in split_inputs: + type_to_input_map.setdefault(input.dataset.datatype, []).append(input.name) + elif input.name in shared_inputs: + pass # pass original file name + else: + log_error = "The input '%s' does not define a method for implementing parallelism" % str(input.name) + log.error(log_error) + raise Exception(log_error) + + if len(type_to_input_map) > 1: + log_error = "The multi splitter does not support splitting inputs of more than one type" + log.error(log_error) + raise Exception(log_error) + + # split the first one to build up the task directories + input_files = [] + for input in parent_job.input_datasets: + if input.name in split_inputs: + this_input_files = job_wrapper.get_input_dataset_fnames(input.dataset) + if len(this_input_files) > 1: + log_error = "The input '%s' is composed of multiple files - splitting is not allowed" % str(input.name) + log.error(log_error) + raise Exception(log_error) + input_files.extend(this_input_files) + + input_type = type_to_input_map.keys()[0] + # DBTODO execute an external task to do the splitting, this should happen at refactor. + # If the number of tasks is sufficiently high, we can use it to calculate job completion % and give a running status. + try: + input_type.split(input_files, get_new_working_directory_name, parallel_settings) + except AttributeError: + log_error = "The type '%s' does not define a method for splitting files" % str(input_type) + log.error(log_error) + raise + log.debug('do_split created %d parts' % len(task_dirs)) + # next, after we know how many divisions there are, add the shared inputs via soft links + for input in parent_job.input_datasets: + if input and input.name in shared_inputs: + names = job_wrapper.get_input_dataset_fnames(input.dataset) + for dir in task_dirs: + for file in names: + os.symlink(file, os.path.join(dir, os.path.basename(file))) + tasks = [] + for dir in task_dirs: + task = model.Task(parent_job, dir) + tasks.append(task) + return tasks + + +def do_merge( job_wrapper, task_wrappers): + parent_job = job_wrapper.get_job() + parallel_settings = job_wrapper.tool.parallelism.attributes + # Syntax: merge_outputs="export" pickone_outputs="genomesize" + # Designates outputs to be merged, or selected from as a representative + merge_outputs = parallel_settings.get("merge_outputs") + if merge_outputs is None: + merge_outputs = [] + else: + merge_outputs = [x.strip() for x in merge_outputs.split(",")] + pickone_outputs = parallel_settings.get("pickone_outputs") + if pickone_outputs is None: + pickone_outputs = [] + else: + pickone_outputs = [x.strip() for x in pickone_outputs.split(",")] + + illegal_outputs = [x for x in merge_outputs if x in pickone_outputs] + if len(illegal_outputs) > 0: + raise Exception("Outputs have conflicting parallelism attributes: %s" % str( illegal_outputs )) + + + working_directory = job_wrapper.working_directory + task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] + # TODO: Output datasets can be very complex. This doesn't handle metadata files + outputs = job_wrapper.get_output_datasets_and_fnames() + pickone_done = [] + task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] + for output in outputs: + output_file_name = str(outputs[output][1]) + base_output_name = os.path.basename(output_file_name) + if output in merge_outputs: + output_type = outputs[output][0].datatype + output_files = [os.path.join(dir,base_output_name) for dir in task_dirs] + log.debug('files %s ' % output_files) + output_type.merge(output_files, output_file_name) + log.debug('merge finished: %s' % output_file_name) + pass # TODO: merge all the files + elif output in pickone_outputs: + # just pick one of them + if output not in pickone_done: + task_file_name = os.path.join(task_dirs[0], base_output_name) + shutil.move( task_file_name, output_file_name ) + pickone_done.append(output) + else: + log_error = "The output '%s' does not define a method for implementing parallelism" % output + log.error(log_error) + raise Exception(log_error) + + stdout = '' + stderr='' + for tw in task_wrappers: + # Prevent repetitive output, e.g. "Sequence File Aligned"x20 + # Eventually do a reduce for jobs that output "N reads mapped", combining all N for tasks. + if stdout.strip() != tw.get_task().stdout.strip(): + stdout += tw.get_task().stdout + if stderr.strip() != tw.get_task().stderr.strip(): + stderr += tw.get_task().stderr + return (stdout, stderr) + --- a/lib/galaxy/model/__init__.py Tue May 24 05:33:07 2011 -0400 +++ b/lib/galaxy/model/__init__.py Tue May 24 15:15:03 2011 -0700 @@ -176,12 +176,23 @@ self.parameters = [] self.state = Task.states.NEW self.info = None + # TODO: Rename this to working_directory + # Does this necessitate a DB migration step? self.part_file = part_file self.task_runner_name = None self.task_runner_external_id = None self.job = job self.stdout = None self.stderr = None + + @property + def working_directory(self): + if self.part_file is not None: + if not os.path.isdir(self.part_file): + return os.path.dirname(self.part_file) + else: + return self.part_file + return None def set_state( self, state ): self.state = state --- a/lib/galaxy/tools/__init__.py Tue May 24 05:33:07 2011 -0400 +++ b/lib/galaxy/tools/__init__.py Tue May 24 15:15:03 2011 -0700 @@ -321,6 +321,21 @@ self.type = None self.version = None +class ToolParallelismInfo(object): + """ + Stores the information (if any) for running multiple instances of the tool in parallel + on the same set of inputs. + """ + def __init__(self, tag): + self.method = tag.get('method') + self.attributes = dict([item for item in tag.attrib.items() if item[0] != 'method' ]) + if len(self.attributes) == 0: + # legacy basic mode - provide compatible defaults + self.attributes['split_size'] = 20 + self.attributes['split_mode'] = 'number_of_parts' + + + class Tool: """ Represents a computational tool that can be executed through Galaxy. @@ -403,7 +418,7 @@ # Parallelism for tasks, read from tool config. parallelism = root.find("parallelism") if parallelism is not None and parallelism.get("method"): - self.parallelism = parallelism.get("method") + self.parallelism = ToolParallelismInfo(parallelism) else: self.parallelism = None if self.app.config.start_job_runners is None: http://bitbucket.org/galaxy/galaxy-central/changeset/6441f8214ebe/ changeset: 6441f8214ebe user: John Duddy date: 2011-05-25 19:45:53 summary: Remove zcat + gzip -c for merging gz files. Go with cat for now affected #: 1 file (-1 bytes) --- a/lib/galaxy/datatypes/data.py Tue May 24 15:15:03 2011 -0700 +++ b/lib/galaxy/datatypes/data.py Wed May 25 10:45:53 2011 -0700 @@ -1,4 +1,4 @@ -import logging, os, sys, time, tempfile, gzip +import logging, os, sys, time, tempfile from galaxy import util from galaxy.util.odict import odict from galaxy.util.bunch import Bunch @@ -356,24 +356,11 @@ def merge( split_files, output_file): """ - Export files are usually compressed, but it doesn't have to be so. In the case that they are, use - zcat to cat the files and gzip -c to recompress the result, otherwise use cat - TODO: Move to a faster gzjoin-based technique + TODO: Do we need to merge gzip files using gzjoin? cat seems to work, + but might be brittle. Need to revisit this. """ - #TODO: every time I try to import this from sniff, the parser dies - def is_gzip( filename ): - temp = open( filename, "U" ) - magic_check = temp.read( 2 ) - temp.close() - if magic_check != util.gzip_magic: - return False - return True - if len(split_files) == 1: os.system( 'mv -f %s %s' % ( split_files[0], output_file ) ) - return - if is_gzip(split_files[0]): - os.system( 'zcat %s | gzip -c > %s' % ( ' '.join(split_files), output_file ) ) else: os.system( 'cat %s > %s' % ( ' '.join(split_files), output_file ) ) merge = staticmethod(merge) http://bitbucket.org/galaxy/galaxy-central/changeset/4a5512953201/ changeset: 4a5512953201 user: John Duddy date: 2011-06-16 21:19:07 summary: Add ability to split compressed input Fix off by one error on number_of_parts splitting affected #: 1 file (-1 bytes) --- a/lib/galaxy/datatypes/sequence.py Tue May 24 15:15:03 2011 -0700 +++ b/lib/galaxy/datatypes/sequence.py Thu Jun 16 12:19:07 2011 -0700 @@ -2,6 +2,7 @@ Sequence classes """ +import gzip import data import logging import re @@ -58,17 +59,22 @@ return def split_one( input_file, get_dir, clusters_per_file, default_clusters=None): - in_file = open(input_file, 'rt') + compress = is_gzip(input_file) + if compress: +# TODO: Python 2.4, 2.5 don't have io.BufferedReader!!! +# add a buffered reader because gzip is really slow before python 2.7 + in_file = gzip.GzipFile(input_file, 'r') + else: + in_file = open(input_file, 'rt') part_file = None part = 0 if clusters_per_file is None: local_clusters_per_file = [default_clusters] else: local_clusters_per_file = [x for x in clusters_per_file] - for i, line in enumerate(in_file): cluster_number, line_in_cluster = divmod(i, 4) - current_part, remainder = divmod(cluster_number, local_clusters_per_file[part]) + current_part, remainder = divmod(cluster_number, local_clusters_per_file[part]+1) if (current_part != part or part_file is None): if (part_file): @@ -76,6 +82,7 @@ part = current_part part_dir = get_dir() part_path = os.path.join(part_dir, os.path.basename(input_file)) +# TODO: If the input was compressed, compress the output? part_file = open(part_path, 'w') if clusters_per_file is None: local_clusters_per_file.append(default_clusters) http://bitbucket.org/galaxy/galaxy-central/changeset/ea221820ba19/ changeset: ea221820ba19 user: John Duddy date: 2011-06-16 23:00:45 summary: Merge affected #: 1 file (-1 bytes) --- a/lib/galaxy/datatypes/data.py Thu Jun 16 12:19:07 2011 -0700 +++ b/lib/galaxy/datatypes/data.py Thu Jun 16 14:00:45 2011 -0700 @@ -1,4 +1,4 @@ -import logging, os, sys, time, tempfile, gzip +import logging, os, sys, time, tempfile from galaxy import util from galaxy.util.odict import odict from galaxy.util.bunch import Bunch @@ -356,24 +356,11 @@ def merge( split_files, output_file): """ - Export files are usually compressed, but it doesn't have to be so. In the case that they are, use - zcat to cat the files and gzip -c to recompress the result, otherwise use cat - TODO: Move to a faster gzjoin-based technique + TODO: Do we need to merge gzip files using gzjoin? cat seems to work, + but might be brittle. Need to revisit this. """ - #TODO: every time I try to import this from sniff, the parser dies - def is_gzip( filename ): - temp = open( filename, "U" ) - magic_check = temp.read( 2 ) - temp.close() - if magic_check != util.gzip_magic: - return False - return True - if len(split_files) == 1: os.system( 'mv -f %s %s' % ( split_files[0], output_file ) ) - return - if is_gzip(split_files[0]): - os.system( 'zcat %s | gzip -c > %s' % ( ' '.join(split_files), output_file ) ) else: os.system( 'cat %s > %s' % ( ' '.join(split_files), output_file ) ) merge = staticmethod(merge) http://bitbucket.org/galaxy/galaxy-central/changeset/1dfae31e27bc/ changeset: 1dfae31e27bc user: John Duddy date: 2011-06-16 23:06:06 summary: Fix issues related to splitting multiple inputs and handling Files split evenly affected #: 1 file (-1 bytes) --- a/lib/galaxy/datatypes/sequence.py Thu Jun 16 14:00:45 2011 -0700 +++ b/lib/galaxy/datatypes/sequence.py Thu Jun 16 14:06:06 2011 -0700 @@ -58,7 +58,10 @@ if split_params is None: return - def split_one( input_file, get_dir, clusters_per_file, default_clusters=None): + def split_calculate_clusters( input_file, get_dir, default_clusters): + """ + Split the 0th file into even sized chunks, and return the number of clusters in each + """ compress = is_gzip(input_file) if compress: # TODO: Python 2.4, 2.5 don't have io.BufferedReader!!! @@ -68,13 +71,10 @@ in_file = open(input_file, 'rt') part_file = None part = 0 - if clusters_per_file is None: - local_clusters_per_file = [default_clusters] - else: - local_clusters_per_file = [x for x in clusters_per_file] + local_clusters_per_file = [] for i, line in enumerate(in_file): cluster_number, line_in_cluster = divmod(i, 4) - current_part, remainder = divmod(cluster_number, local_clusters_per_file[part]+1) + current_part, remainder = divmod(cluster_number, default_clusters) if (current_part != part or part_file is None): if (part_file): @@ -84,8 +84,7 @@ part_path = os.path.join(part_dir, os.path.basename(input_file)) # TODO: If the input was compressed, compress the output? part_file = open(part_path, 'w') - if clusters_per_file is None: - local_clusters_per_file.append(default_clusters) + local_clusters_per_file.append(default_clusters) part_file.write(line) if (part_file): part_file.close() @@ -93,6 +92,50 @@ local_clusters_per_file[part] = remainder + 1 return local_clusters_per_file + def split_to_size(input_file, get_dir, clusters_per_file): + """ + Split the files beyond the 0th to the same number of clusters as the 0th. + This is used to split in a variety of ways, so these are both legal for + clusters_per_file: + [ 10000, 10000, 10000, 10000, 2 ] # to_size=10000, 40002 total + [ 10001, 10001, 10000, 10000 ] # number_of_parts = 4, 40002 total + + """ + compress = is_gzip(input_file) + if compress: +# TODO: Python 2.4, 2.5 don't have io.BufferedReader!!! +# add a buffered reader because gzip is really slow before python 2.7 + in_file = gzip.GzipFile(input_file, 'r') + else: + in_file = open(input_file, 'rt') + part_file = None + part = 0 + clusters_this_part = 0 + for i, line in enumerate(in_file): + cluster_number, line_in_cluster = divmod(i, 4) + if clusters_this_part == clusters_per_file[part]: + current_part = part + 1 + else: + current_part = part + + if (current_part != part or part_file is None): + if (part_file): + part_file.close() + part = current_part + clusters_this_part = 0 + part_dir = get_dir() + part_path = os.path.join(part_dir, os.path.basename(input_file)) +# TODO: If the input was compressed, compress the output? + part_file = open(part_path, 'w') + if clusters_per_file is None and part > 0: + local_clusters_per_file.append(default_clusters) + part_file.write(line) + if line_in_cluster == 3: + clusters_this_part += 1 + if (part_file): + part_file.close() + in_file.close() + directories = [] def create_subdir(): dir = subdir_generator_function() @@ -123,10 +166,11 @@ clusters_per_file.append(chunk) remainder=- 1 length -= chunk - split_one(input_files[0], create_subdir, clusters_per_file) + split_to_size(input_files[0], create_subdir, clusters_per_file) elif split_params['split_mode'] == 'to_size': # split one file and see what the cluster sizes turn out to be - clusters_per_file = split_one(input_files[0], create_subdir, None, int(split_params['split_size'])) + clusters_per_file = split_calculate_clusters(input_files[0], create_subdir, + int(split_params['split_size'])) else: raise Exception('Unsupported split mode %s' % split_params['split_mode']) @@ -141,7 +185,7 @@ for i in range(1, len(input_files)): current_dir_idx[0] = 0 - split_one(input_files[i], get_subdir, clusters_per_file) + split_to_size(input_files[i], get_subdir, clusters_per_file) split = staticmethod(split) http://bitbucket.org/galaxy/galaxy-central/changeset/49c9f87e4717/ changeset: 49c9f87e4717 user: John Duddy date: 2011-10-07 22:25:19 summary: Merge affected #: 711 files (-1 bytes) Diff too large to display. http://bitbucket.org/galaxy/galaxy-central/changeset/1827729ed37e/ changeset: 1827729ed37e user: John Duddy date: 2011-10-07 23:12:40 summary: Upgrade of splitting code to do splitting in the tasks versus in the Galaxy process affected #: 12 files (-1 bytes) --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/extract_dataset_parts.sh Fri Oct 07 14:12:40 2011 -0700 @@ -0,0 +1,8 @@ +#!/bin/sh + +cd `dirname $0` +for file in $1/split_info*.json +do + # echo processing $file + python ./scripts/extract_dataset_part.py $file +done --- a/lib/galaxy/datatypes/data.py Fri Oct 07 13:25:19 2011 -0700 +++ b/lib/galaxy/datatypes/data.py Fri Oct 07 14:12:40 2011 -0700 @@ -360,9 +360,12 @@ but might be brittle. Need to revisit this. """ if len(split_files) == 1: - os.system( 'mv -f %s %s' % ( split_files[0], output_file ) ) + cmd = 'mv -f %s %s' % ( split_files[0], output_file ) else: - os.system( 'cat %s > %s' % ( ' '.join(split_files), output_file ) ) + cmd = 'cat %s > %s' % ( ' '.join(split_files), output_file ) + result = os.system(cmd) + if result != 0: + raise Exception('Result %s from %s' % (result, cmd)) merge = staticmethod(merge) class Text( Data ): @@ -533,6 +536,13 @@ f.close() split = staticmethod(split) +class LineCount( Text ): + """ + Dataset contains a single line with a single integer that denotes the + line count for a related dataset. Used for custom builds. + """ + pass + class Newick( Text ): pass --- a/lib/galaxy/datatypes/sequence.py Fri Oct 07 13:25:19 2011 -0700 +++ b/lib/galaxy/datatypes/sequence.py Fri Oct 07 14:12:40 2011 -0700 @@ -7,6 +7,7 @@ import logging import re import string +import os from cgi import escape from galaxy.datatypes.metadata import MetadataElement from galaxy.datatypes import metadata @@ -14,8 +15,52 @@ from galaxy import util from sniff import * +import pkg_resources +pkg_resources.require("simplejson") +import simplejson + log = logging.getLogger(__name__) +class SequenceSplitLocations( data.Text ): + """ + Class storing information about a sequence file composed of multiple gzip files concatenated as + one OR an uncompressed file. In the GZIP case, each sub-file's location is stored in start and end. + The format of the file is JSON: + { "sections" : [ + { "start" : "x", "end" : "y", "clusters" : "z" }, + ... + ]} + """ + def set_peek( self, dataset, is_multi_byte=False ): + if not dataset.dataset.purged: + try: + parsed_data = simplejson.load(open(dataset.file_name)) + # dataset.peek = simplejson.dumps(data, sort_keys=True, indent=4) + dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) + dataset.blurb = '%d sections' % len(parsed_data['sections']) + except Exception, e: + dataset.peek = 'Not FQTOC file' + dataset.blurb = 'Not FQTOC file' + else: + dataset.peek = 'file does not exist' + dataset.blurb = 'file purged from disk' + + file_ext = "fqtoc" + + def sniff( self, filename ): + if os.path.getsize(filename) < 50000: + try: + data = simplejson.load(open(filename)) + sections = data['sections'] + for section in sections: + if 'start' not in section or 'end' not in section or 'clusters' not in section: + return False + return True + except: + pass + return False + + class Sequence( data.Text ): """Class describing a sequence""" @@ -50,143 +95,237 @@ else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' + + def get_sequences_per_file(total_clusters, split_params): + if split_params['split_mode'] == 'number_of_parts': + # legacy basic mode - split into a specified number of parts + parts = int(split_params['split_size']) + sequences_per_file = [total_clusters/parts for i in range(parts)] + for i in range(total_clusters % parts): + sequences_per_file[i] += 1 + elif split_params['split_mode'] == 'to_size': + # loop through the sections and calculate the number of clusters + chunk_size = long(split_params['split_size']) + + chunks = total_clusters / chunk_size + rem = total_clusters % chunk_size + sequences_per_file = [chunk_size for i in range(total_clusters / chunk_size)] + # TODO: Should we invest the time in a better way to handle small remainders? + if rem > 0: + sequences_per_file.append(rem) + else: + raise Exception('Unsupported split mode %s' % split_params['split_mode']) + return sequences_per_file + get_sequences_per_file = staticmethod(get_sequences_per_file) + + def do_slow_split( cls, input_datasets, subdir_generator_function, split_params): + # count the clusters so we can split + # TODO: if metadata is present, take the number of lines / 4 + if input_datasets[0].metadata is not None and input_datasets[0].metadata.sequences is not None: + total_clusters = input_datasets[0].metadata.sequences + else: + input_file = input_datasets[0].file_name + compress = is_gzip(input_file) + if compress: + # gzip is really slow before python 2.7! + in_file = gzip.GzipFile(input_file, 'r') + else: + # TODO + # if a file is not compressed, seek locations can be calculated and stored + # ideally, this would be done in metadata + # TODO + # Add BufferedReader if python 2.7? + in_file = open(input_file, 'rt') + total_clusters = long(0) + for i, line in enumerate(in_file): + total_clusters += 1 + in_file.close() + total_clusters /= 4 + + sequences_per_file = cls.get_sequences_per_file(total_clusters, split_params) + return cls.write_split_files(input_datasets, None, subdir_generator_function, sequences_per_file) + do_slow_split = classmethod(do_slow_split) + + def do_fast_split( cls, input_datasets, toc_file_datasets, subdir_generator_function, split_params): + data = simplejson.load(open(toc_file_datasets[0].file_name)) + sections = data['sections'] + total_clusters = long(0) + for section in sections: + total_clusters += long(section['clusters']) + sequences_per_file = cls.get_sequences_per_file(total_clusters, split_params) + return cls.write_split_files(input_datasets, toc_file_datasets, subdir_generator_function, sequences_per_file) + do_fast_split = classmethod(do_fast_split) + + def write_split_files(cls, input_datasets, toc_file_datasets, subdir_generator_function, sequences_per_file): + directories = [] + def get_subdir(idx): + if idx < len(directories): + return directories[idx] + dir = subdir_generator_function() + directories.append(dir) + return dir + + # we know how many splits and how many clusters in each. What remains is to write out instructions for the + # splitting of all the input files. To decouple the format of those instructions from this code, the exact format of + # those instructions is delegated to scripts + start_sequence=0 + for part_no in range(len(sequences_per_file)): + dir = get_subdir(part_no) + for ds_no in range(len(input_datasets)): + ds = input_datasets[ds_no] + base_name = os.path.basename(ds.file_name) + part_path = os.path.join(dir, base_name) + split_data = dict(class_name='%s.%s' % (cls.__module__, cls.__name__), + output_name=part_path, + input_name=ds.file_name, + args=dict(start_sequence=start_sequence, num_sequences=sequences_per_file[part_no])) + if toc_file_datasets is not None: + toc = toc_file_datasets[ds_no] + split_data['args']['toc_file'] = toc.file_name + f = open(os.path.join(dir, 'split_info_%s.json' % base_name), 'w') + simplejson.dump(split_data, f) + f.close() + start_sequence += sequences_per_file[part_no] + return directories + write_split_files = classmethod(write_split_files) - def split( input_files, subdir_generator_function, split_params): + def split( cls, input_datasets, subdir_generator_function, split_params): """ FASTQ files are split on cluster boundaries, in increments of 4 lines """ if split_params is None: - return + return None + + # first, see if there are any associated FQTOC files that will give us the split locations + # if so, we don't need to read the files to do the splitting + toc_file_datasets = [] + for ds in input_datasets: + tmp_ds = ds + fqtoc_file = None + while fqtoc_file is None and tmp_ds is not None: + fqtoc_file = tmp_ds.get_converted_files_by_type('fqtoc') + tmp_ds = tmp_ds.copied_from_library_dataset_dataset_association + + if fqtoc_file is not None: + toc_file_datasets.append(fqtoc_file) + + if len(toc_file_datasets) == len(input_datasets): + return cls.do_fast_split(input_datasets, toc_file_datasets, subdir_generator_function, split_params) + return cls.do_slow_split(input_datasets, subdir_generator_function, split_params) + split = classmethod(split) + + def process_split_file(data): + """ + This is called in the context of an external process launched by a Task (possibly not on the Galaxy machine) + to create the input files for the Task. The parameters: + data - a dict containing the contents of the split file + """ + args = data['args'] + input_name = data['input_name'] + output_name = data['output_name'] + start_sequence = long(args['start_sequence']) + sequence_count = long(args['num_sequences']) - def split_calculate_clusters( input_file, get_dir, default_clusters): - """ - Split the 0th file into even sized chunks, and return the number of clusters in each - """ - compress = is_gzip(input_file) - if compress: -# TODO: Python 2.4, 2.5 don't have io.BufferedReader!!! -# add a buffered reader because gzip is really slow before python 2.7 - in_file = gzip.GzipFile(input_file, 'r') - else: - in_file = open(input_file, 'rt') - part_file = None - part = 0 - local_clusters_per_file = [] - for i, line in enumerate(in_file): - cluster_number, line_in_cluster = divmod(i, 4) - current_part, remainder = divmod(cluster_number, default_clusters) + if 'toc_file' in args: + toc_file = simplejson.load(open(args['toc_file'], 'r')) + commands = Sequence.get_split_commands_with_toc(input_name, output_name, toc_file, start_sequence, sequence_count) + else: + commands = Sequence.get_split_commands_sequential(is_gzip(input_name), input_name, output_name, start_sequence, sequence_count) + for cmd in commands: + if 0 != os.system(cmd): + raise Exception("Executing '%s' failed" % cmd) + return True + process_split_file = staticmethod(process_split_file) + + def get_split_commands_with_toc(input_name, output_name, toc_file, start_sequence, sequence_count): + """ + Uses a Table of Contents dict, parsed from an FQTOC file, to come up with a set of + shell commands that will extract the parts necessary + >>> three_sections=[dict(start=0, end=74, clusters=10), dict(start=74, end=148, clusters=10), dict(start=148, end=148+76, clusters=10)] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=0, sequence_count=10) + ['dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=1, sequence_count=5) + ['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +5 2> /dev/null) | head -20 | gzip -c >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=0, sequence_count=20) + ['dd bs=1 skip=0 count=148 if=./input.gz 2> /dev/null >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=5, sequence_count=10) + ['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +21 2> /dev/null) | head -20 | gzip -c >> ./output.gz', '(dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +1 2> /dev/null) | head -20 | gzip -c >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=10, sequence_count=10) + ['dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=5, sequence_count=20) + ['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +21 2> /dev/null) | head -20 | gzip -c >> ./output.gz', 'dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null >> ./output.gz', '(dd bs=1 skip=148 count=76 if=./input.gz 2> /dev/null )| zcat | ( tail -n +1 2> /dev/null) | head -20 | gzip -c >> ./output.gz'] + """ + sections = toc_file['sections'] + result = [] + + current_sequence = long(0) + i=0 + # skip to the section that contains my starting sequence + while i < len(sections) and start_sequence >= current_sequence + long(sections[i]['clusters']): + current_sequence += long(sections[i]['clusters']) + i += 1 + if i == len(sections): # bad input data! + raise Exception('No FQTOC section contains starting sequence %s' % start_sequence) + + # These two variables act as an accumulator for consecutive entire blocks that + # can be copied verbatim (without decompressing) + start_chunk = long(-1) + end_chunk = long(-1) + copy_chunk_cmd = 'dd bs=1 skip=%s count=%s if=%s 2> /dev/null >> %s' + + while sequence_count > 0 and i < len(sections): + # we need to extract partial data. So, find the byte offsets of the chunks that contain the data we need + # use a combination of dd (to pull just the right sections out) tail (to skip lines) and head (to get the + # right number of lines + sequences = long(sections[i]['clusters']) + skip_sequences = start_sequence-current_sequence + sequences_to_extract = min(sequence_count, sequences-skip_sequences) + start_copy = long(sections[i]['start']) + end_copy = long(sections[i]['end']) + if sequences_to_extract < sequences: + if start_chunk > -1: + result.append(copy_chunk_cmd % (start_chunk, end_chunk-start_chunk, input_name, output_name)) + start_chunk = -1 + # extract, unzip, trim, recompress + result.append('(dd bs=1 skip=%s count=%s if=%s 2> /dev/null )| zcat | ( tail -n +%s 2> /dev/null) | head -%s | gzip -c >> %s' % + (start_copy, end_copy-start_copy, input_name, skip_sequences*4+1, sequences_to_extract*4, output_name)) + else: # whole section - add it to the start_chunk/end_chunk accumulator + if start_chunk == -1: + start_chunk = start_copy + end_chunk = end_copy + sequence_count -= sequences_to_extract + start_sequence += sequences_to_extract + current_sequence += sequences + i += 1 + if start_chunk > -1: + result.append(copy_chunk_cmd % (start_chunk, end_chunk-start_chunk, input_name, output_name)) - if (current_part != part or part_file is None): - if (part_file): - part_file.close() - part = current_part - part_dir = get_dir() - part_path = os.path.join(part_dir, os.path.basename(input_file)) -# TODO: If the input was compressed, compress the output? - part_file = open(part_path, 'w') - local_clusters_per_file.append(default_clusters) - part_file.write(line) - if (part_file): - part_file.close() - in_file.close() - local_clusters_per_file[part] = remainder + 1 - return local_clusters_per_file - - def split_to_size(input_file, get_dir, clusters_per_file): - """ - Split the files beyond the 0th to the same number of clusters as the 0th. - This is used to split in a variety of ways, so these are both legal for - clusters_per_file: - [ 10000, 10000, 10000, 10000, 2 ] # to_size=10000, 40002 total - [ 10001, 10001, 10000, 10000 ] # number_of_parts = 4, 40002 total + if sequence_count > 0: + raise Exception('%s sequences not found in file' % sequence_count) + + return result + get_split_commands_with_toc = staticmethod(get_split_commands_with_toc) - """ - compress = is_gzip(input_file) - if compress: -# TODO: Python 2.4, 2.5 don't have io.BufferedReader!!! -# add a buffered reader because gzip is really slow before python 2.7 - in_file = gzip.GzipFile(input_file, 'r') - else: - in_file = open(input_file, 'rt') - part_file = None - part = 0 - clusters_this_part = 0 - for i, line in enumerate(in_file): - cluster_number, line_in_cluster = divmod(i, 4) - if clusters_this_part == clusters_per_file[part]: - current_part = part + 1 - else: - current_part = part - - if (current_part != part or part_file is None): - if (part_file): - part_file.close() - part = current_part - clusters_this_part = 0 - part_dir = get_dir() - part_path = os.path.join(part_dir, os.path.basename(input_file)) -# TODO: If the input was compressed, compress the output? - part_file = open(part_path, 'w') - if clusters_per_file is None and part > 0: - local_clusters_per_file.append(default_clusters) - part_file.write(line) - if line_in_cluster == 3: - clusters_this_part += 1 - if (part_file): - part_file.close() - in_file.close() - directories = [] - def create_subdir(): - dir = subdir_generator_function() - directories.append(dir) - return dir + def get_split_commands_sequential(is_compressed, input_name, output_name, start_sequence, sequence_count): + """ + Does a brain-dead sequential scan & extract of certain sequences + >>> Sequence.get_split_commands_sequential(True, './input.gz', './output.gz', start_sequence=0, sequence_count=10) + ['zcat "./input.gz" | ( tail -n +1 2> /dev/null) | head -40 | gzip -c > "./output.gz"'] + >>> Sequence.get_split_commands_sequential(False, './input.fastq', './output.fastq', start_sequence=10, sequence_count=10) + ['tail -n +41 "./input.fastq" 2> /dev/null | head -40 > "./output.fastq"'] + """ + start_line = start_sequence * 4 + line_count = sequence_count * 4 + # TODO: verify that tail can handle 64-bit numbers + if is_compressed: + cmd = 'zcat "%s" | ( tail -n +%s 2> /dev/null) | head -%s | gzip -c' % (input_name, start_line+1, line_count) + else: + cmd = 'tail -n +%s "%s" 2> /dev/null | head -%s' % (start_line+1, input_name, line_count) + cmd += ' > "%s"' % output_name - clusters_per_file = None - if split_params['split_mode'] == 'number_of_parts': - # legacy splitting. To keep things simple, just scan the 0th file and count the clusters, - # then split it - clusters_per_file = [] - in_file = open(input_files[0], 'rt') - for i, line in enumerate(in_file): - pass - in_file.close() - length = (i+1)/4 - - if length <= 0: - raise Exception('Invalid sequence file %s' % input_files[0]) - parts = int(split_params['split_size']) - if length < parts: - parts = length - len_each, remainder = divmod(length, parts) - while length > 0: - chunk = len_each - if remainder > 0: - chunk += 1 - clusters_per_file.append(chunk) - remainder=- 1 - length -= chunk - split_to_size(input_files[0], create_subdir, clusters_per_file) - elif split_params['split_mode'] == 'to_size': - # split one file and see what the cluster sizes turn out to be - clusters_per_file = split_calculate_clusters(input_files[0], create_subdir, - int(split_params['split_size'])) - else: - raise Exception('Unsupported split mode %s' % split_params['split_mode']) - - # split the rest, using the same number of clusters for each file - current_dir_idx = [0] # use a list to get around Python 2.x lame closure support - def get_subdir(): - if len(directories) <= current_dir_idx[0]: - raise Exception('FASTQ files do not have the same number of clusters - splitting failed') - result = directories[current_dir_idx[0]] - current_dir_idx[0] = current_dir_idx[0] + 1 - return result - - for i in range(1, len(input_files)): - current_dir_idx[0] = 0 - split_to_size(input_files[i], get_subdir, clusters_per_file) - split = staticmethod(split) + return [cmd] + get_split_commands_sequential = staticmethod(get_split_commands_sequential) @@ -690,3 +829,7 @@ return False except: return False + +if __name__ == '__main__': + import doctest, sys + doctest.testmod(sys.modules[__name__]) \ No newline at end of file --- a/lib/galaxy/jobs/__init__.py Fri Oct 07 13:25:19 2011 -0700 +++ b/lib/galaxy/jobs/__init__.py Fri Oct 07 14:12:40 2011 -0700 @@ -32,9 +32,9 @@ class JobManager( object ): """ Highest level interface to job management. - + TODO: Currently the app accesses "job_queue" and "job_stop_queue" directly. - This should be decoupled. + This should be decoupled. """ def __init__( self, app ): self.app = app @@ -71,7 +71,7 @@ class JobQueue( object ): """ - Job manager, waits for jobs to be runnable and then dispatches to + Job manager, waits for jobs to be runnable and then dispatches to a JobRunner. """ STOP_SIGNAL = object() @@ -95,7 +95,7 @@ self.running = True self.dispatcher = dispatcher self.monitor_thread = threading.Thread( target=self.__monitor ) - self.monitor_thread.start() + self.monitor_thread.start() log.info( "job manager started" ) if app.config.get_bool( 'enable_job_recovery', True ): self.__check_jobs_at_startup() @@ -132,7 +132,7 @@ def __monitor( self ): """ - Continually iterate the waiting jobs, checking is each is ready to + Continually iterate the waiting jobs, checking is each is ready to run and dispatching if so. """ # HACK: Delay until after forking, we need a way to do post fork notification!!! @@ -180,12 +180,12 @@ jobs_to_check.append( self.sa_session.query( model.Job ).get( job_id ) ) except Empty: pass - # Iterate over new and waiting jobs and look for any that are + # Iterate over new and waiting jobs and look for any that are # ready to run new_waiting_jobs = [] for job in jobs_to_check: try: - # Check the job's dependencies, requeue if they're not done + # Check the job's dependencies, requeue if they're not done job_state = self.__check_if_ready_to_run( job ) if job_state == JOB_WAIT: if not self.track_jobs_in_database: @@ -216,7 +216,7 @@ self.waiting_jobs = new_waiting_jobs # Done with the session self.sa_session.remove() - + def __check_if_ready_to_run( self, job ): """ Check if a job is ready to run by verifying that each of its input @@ -281,13 +281,13 @@ if len( user_jobs ) >= self.app.config.user_job_limit: return JOB_WAIT return JOB_READY - + def put( self, job_id, tool ): """Add a job to the queue (by job identifier)""" if not self.track_jobs_in_database: self.queue.put( ( job_id, tool.id ) ) self.sleeper.wake() - + def shutdown( self ): """Attempts to gracefully shut down the worker thread""" if self.parent_pid != os.getpid(): @@ -304,7 +304,7 @@ class JobWrapper( object ): """ - Wraps a 'model.Job' with convenience methods for running processes and + Wraps a 'model.Job' with convenience methods for running processes and state management. """ def __init__( self, job, queue ): @@ -330,15 +330,15 @@ self.output_dataset_paths = None self.tool_provided_job_metadata = None # Wrapper holding the info required to restore and clean up from files used for setting metadata externally - self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job ) - + self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job ) + def get_job( self ): return self.sa_session.query( model.Job ).get( self.job_id ) - + def get_id_tag(self): # For compatability with drmaa, which uses job_id right now, and TaskWrapper return str(self.job_id) - + def get_param_dict( self ): """ Restore the dictionary of parameters from the database. @@ -347,10 +347,10 @@ param_dict = dict( [ ( p.name, p.value ) for p in job.parameters ] ) param_dict = self.tool.params_from_strings( param_dict, self.app ) return param_dict - + def get_version_string_path( self ): return os.path.abspath(os.path.join(self.app.config.new_file_path, "GALAXY_VERSION_STRING_%s" % self.job_id)) - + def prepare( self ): """ Prepare the job to run by creating the working directory and the @@ -372,9 +372,9 @@ out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] ) inp_data.update( [ ( da.name, da.dataset ) for da in job.input_library_datasets ] ) out_data.update( [ ( da.name, da.dataset ) for da in job.output_library_datasets ] ) - - # Set up output dataset association for export history jobs. Because job - # uses a Dataset rather than an HDA or LDA, it's necessary to set up a + + # Set up output dataset association for export history jobs. Because job + # uses a Dataset rather than an HDA or LDA, it's necessary to set up a # fake dataset association that provides the needed attributes for # preparing a job. class FakeDatasetAssociation ( object ): @@ -401,7 +401,7 @@ # ( this used to be performed in the "exec_before_job" hook, but hooks are deprecated ). self.tool.exec_before_job( self.queue.app, inp_data, out_data, param_dict ) # Run the before queue ("exec_before_job") hook - self.tool.call_hook( 'exec_before_job', self.queue.app, inp_data=inp_data, + self.tool.call_hook( 'exec_before_job', self.queue.app, inp_data=inp_data, out_data=out_data, tool=self.tool, param_dict=incoming) self.sa_session.flush() # Build any required config files @@ -434,7 +434,7 @@ def fail( self, message, exception=False ): """ - Indicate job failure by setting state and message on all output + Indicate job failure by setting state and message on all output datasets. """ job = self.get_job() @@ -480,7 +480,7 @@ if self.tool: self.tool.job_failed( self, message, exception ) self.cleanup() - + def change_state( self, state, info = False ): job = self.get_job() self.sa_session.refresh( job ) @@ -510,12 +510,12 @@ job.job_runner_external_id = external_id self.sa_session.add( job ) self.sa_session.flush() - + def finish( self, stdout, stderr ): """ - Called to indicate that the associated command has been run. Updates + Called to indicate that the associated command has been run. Updates the output datasets based on stderr and stdout from the command, and - the contents of the output files. + the contents of the output files. """ # default post job setup self.sa_session.expunge_all() @@ -537,7 +537,7 @@ if os.path.exists(version_filename): self.version_string = open(version_filename).read() os.unlink(version_filename) - + if self.app.config.outputs_to_working_directory: for dataset_path in self.get_output_fnames(): try: @@ -585,7 +585,7 @@ else: # Security violation. log.exception( "from_work_dir specified a location not in the working directory: %s, %s" % ( source_file, self.working_directory ) ) - + dataset.blurb = 'done' dataset.peek = 'no peek' dataset.info = context['stdout'] + context['stderr'] @@ -600,7 +600,7 @@ dataset.init_meta( copy_from=dataset ) #if a dataset was copied, it won't appear in our dictionary: #either use the metadata from originating output dataset, or call set_meta on the copies - #it would be quicker to just copy the metadata from the originating output dataset, + #it would be quicker to just copy the metadata from the originating output dataset, #but somewhat trickier (need to recurse up the copied_from tree), for now we'll call set_meta() if not self.app.config.set_metadata_externally or \ ( not self.external_output_metadata.external_metadata_set_successfully( dataset, self.sa_session ) \ @@ -612,7 +612,7 @@ #load metadata from file #we need to no longer allow metadata to be edited while the job is still running, #since if it is edited, the metadata changed on the running output will no longer match - #the metadata that was stored to disk for use via the external process, + #the metadata that was stored to disk for use via the external process, #and the changes made by the user will be lost, without warning or notice dataset.metadata.from_JSON_dict( self.external_output_metadata.get_output_filenames_by_dataset( dataset, self.sa_session ).filename_out ) try: @@ -653,13 +653,13 @@ # Flush all the dataset and job changes above. Dataset state changes # will now be seen by the user. self.sa_session.flush() - # Save stdout and stderr + # Save stdout and stderr if len( stdout ) > 32768: log.error( "stdout for job %d is greater than 32K, only first part will be logged to database" % job.id ) job.stdout = stdout[:32768] if len( stderr ) > 32768: log.error( "stderr for job %d is greater than 32K, only first part will be logged to database" % job.id ) - job.stderr = stderr[:32768] + job.stderr = stderr[:32768] # custom post process setup inp_data = dict( [ ( da.name, da.dataset ) for da in job.input_datasets ] ) out_data = dict( [ ( da.name, da.dataset ) for da in job.output_datasets ] ) @@ -676,8 +676,8 @@ # ( this used to be performed in the "exec_after_process" hook, but hooks are deprecated ). self.tool.exec_after_process( self.queue.app, inp_data, out_data, param_dict, job = job ) # Call 'exec_after_process' hook - self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data, - out_data=out_data, param_dict=param_dict, + self.tool.call_hook( 'exec_after_process', self.queue.app, inp_data=inp_data, + out_data=out_data, param_dict=param_dict, tool=self.tool, stdout=stdout, stderr=stderr ) job.command_line = self.command_line @@ -696,7 +696,7 @@ self.sa_session.flush() log.debug( 'job %d ended' % self.job_id ) self.cleanup() - + def cleanup( self ): # remove temporary files try: @@ -710,10 +710,10 @@ galaxy.tools.imp_exp.JobImportHistoryArchiveWrapper( self.job_id ).cleanup_after_job( self.sa_session ) except: log.exception( "Unable to cleanup job %d" % self.job_id ) - + def get_command_line( self ): return self.command_line - + def get_session_id( self ): return self.session_id @@ -884,13 +884,17 @@ Should be refactored into a generalized executable unit wrapper parent, then jobs and tasks. """ # Abstract this to be more useful for running tasks that *don't* necessarily compose a job. - + def __init__(self, task, queue): super(TaskWrapper, self).__init__(task.job, queue) self.task_id = task.id self.working_directory = task.working_directory + if task.prepare_input_files_cmd is not None: + self.prepare_input_files_cmds = [ task.prepare_input_files_cmd ] + else: + self.prepare_input_files_cmds = None self.status = task.states.NEW - + def get_job( self ): if self.job_id: return self.sa_session.query( model.Job ).get( self.job_id ) @@ -953,7 +957,7 @@ # ( this used to be performed in the "exec_before_job" hook, but hooks are deprecated ). self.tool.exec_before_job( self.queue.app, inp_data, out_data, param_dict ) # Run the before queue ("exec_before_job") hook - self.tool.call_hook( 'exec_before_job', self.queue.app, inp_data=inp_data, + self.tool.call_hook( 'exec_before_job', self.queue.app, inp_data=inp_data, out_data=out_data, tool=self.tool, param_dict=incoming) self.sa_session.flush() # Build any required config files @@ -1000,12 +1004,12 @@ task.state = state self.sa_session.add( task ) self.sa_session.flush() - + def get_state( self ): task = self.get_task() self.sa_session.refresh( task ) return task.state - + def set_runner( self, runner_url, external_id ): task = self.get_task() self.sa_session.refresh( task ) @@ -1014,15 +1018,15 @@ # DBTODO Check task job_runner_stuff self.sa_session.add( task ) self.sa_session.flush() - + def finish( self, stdout, stderr ): # DBTODO integrate previous finish logic. # Simple finish for tasks. Just set the flag OK. log.debug( 'task %s for job %d ended' % (self.task_id, self.job_id) ) """ - Called to indicate that the associated command has been run. Updates + Called to indicate that the associated command has been run. Updates the output datasets based on stderr and stdout from the command, and - the contents of the output files. + the contents of the output files. """ # default post job setup_external_metadata self.sa_session.expunge_all() @@ -1039,7 +1043,7 @@ task.state = task.states.ERROR else: task.state = task.states.OK - # Save stdout and stderr + # Save stdout and stderr if len( stdout ) > 32768: log.error( "stdout for task %d is greater than 32K, only first part will be logged to database" % task.id ) task.stdout = stdout[:32768] @@ -1053,7 +1057,7 @@ def cleanup( self ): # There is no task cleanup. The job cleans up for all tasks. pass - + def get_command_line( self ): return self.command_line @@ -1063,7 +1067,7 @@ def get_output_file_id( self, file ): # There is no permanent output file for tasks. return None - + def get_tool_provided_job_metadata( self ): # DBTODO Handle this as applicable for tasks. return None @@ -1085,7 +1089,7 @@ def setup_external_metadata( self, exec_dir = None, tmp_dir = None, dataset_files_path = None, config_root = None, datatypes_config = None, set_extension = True, **kwds ): # There is no metadata setting for tasks. This is handled after the merge, at the job level. return "" - + class DefaultJobDispatcher( object ): def __init__( self, app ): self.app = app @@ -1115,7 +1119,7 @@ runner = getattr( module, obj ) self.job_runners[name] = runner( self.app ) log.debug( 'Loaded job runner: %s' % display_name ) - + def put( self, job_wrapper ): try: if self.app.config.use_tasked_jobs and job_wrapper.tool.parallelism is not None: @@ -1126,8 +1130,8 @@ self.job_runners[runner_name].put( job_wrapper ) else: runner_name = "tasks" - log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) ) - self.job_runners[runner_name].put( job_wrapper ) + log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) ) + self.job_runners[runner_name].put( job_wrapper ) else: runner_name = ( job_wrapper.tool.job_runner.split(":", 1) )[0] log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) ) @@ -1183,7 +1187,7 @@ self.sleeper = Sleeper() self.running = True self.monitor_thread = threading.Thread( target=self.monitor ) - self.monitor_thread.start() + self.monitor_thread.start() log.info( "job stopper started" ) def monitor( self ): @@ -1263,4 +1267,3 @@ return def shutdown( self ): return - --- a/lib/galaxy/jobs/runners/__init__.py Fri Oct 07 13:25:19 2011 -0700 +++ b/lib/galaxy/jobs/runners/__init__.py Fri Oct 07 14:12:40 2011 -0700 @@ -6,6 +6,7 @@ Compose the sequence of commands necessary to execute a job. This will currently include: - environment settings corresponding to any requirement tags + - preparing input files - command line taken from job wrapper - commands to set metadata (if include_metadata is True) """ @@ -17,10 +18,13 @@ # Prepend version string if job_wrapper.version_string_cmd: commands = "%s &> %s; " % ( job_wrapper.version_string_cmd, job_wrapper.get_version_string_path() ) + commands + # prepend getting input files (if defined) + if hasattr(job_wrapper, 'prepare_input_files_cmds') and job_wrapper.prepare_input_files_cmds is not None: + commands = "; ".join( job_wrapper.prepare_input_files_cmds + [ commands ] ) # Prepend dependency injection if job_wrapper.dependency_shell_commands: commands = "; ".join( job_wrapper.dependency_shell_commands + [ commands ] ) - + # Append metadata setting commands, we don't want to overwrite metadata # that was copied over in init_meta(), as per established behavior if include_metadata and self.app.config.set_metadata_externally: --- a/lib/galaxy/jobs/runners/lwr.py Fri Oct 07 13:25:19 2011 -0700 +++ b/lib/galaxy/jobs/runners/lwr.py Fri Oct 07 14:12:40 2011 -0700 @@ -249,6 +249,12 @@ try: job_wrapper.prepare() + if hasattr(job_wrapper, 'prepare_input_files_cmds') and job_wrapper.prepare_input_files_cmds is not None: + for cmd in job_wrapper.prepare_input_file_cmds: # run the commands to stage the input files + #log.debug( 'executing: %s' % cmd ) + if 0 != os.system(cmd): + raise Exception('Error running file staging command: %s' % cmd) + job_wrapper.prepare_input_files_cmds = None # prevent them from being used in-line command_line = self.build_command_line( job_wrapper, include_metadata=False ) except: job_wrapper.fail( "failure preparing job", exception=True ) --- a/lib/galaxy/jobs/splitters/multi.py Fri Oct 07 13:25:19 2011 -0700 +++ b/lib/galaxy/jobs/splitters/multi.py Fri Oct 07 14:12:40 2011 -0700 @@ -1,5 +1,6 @@ import os, logging, shutil -from galaxy import model +from galaxy import model, util + log = logging.getLogger( __name__ ) @@ -54,7 +55,7 @@ raise Exception(log_error) # split the first one to build up the task directories - input_files = [] + input_datasets = [] for input in parent_job.input_datasets: if input.name in split_inputs: this_input_files = job_wrapper.get_input_dataset_fnames(input.dataset) @@ -62,13 +63,13 @@ log_error = "The input '%s' is composed of multiple files - splitting is not allowed" % str(input.name) log.error(log_error) raise Exception(log_error) - input_files.extend(this_input_files) + input_datasets.append(input.dataset) input_type = type_to_input_map.keys()[0] # DBTODO execute an external task to do the splitting, this should happen at refactor. # If the number of tasks is sufficiently high, we can use it to calculate job completion % and give a running status. try: - input_type.split(input_files, get_new_working_directory_name, parallel_settings) + input_type.split(input_datasets, get_new_working_directory_name, parallel_settings) except AttributeError: log_error = "The type '%s' does not define a method for splitting files" % str(input_type) log.error(log_error) @@ -82,8 +83,9 @@ for file in names: os.symlink(file, os.path.join(dir, os.path.basename(file))) tasks = [] + prepare_files = os.path.join(util.galaxy_directory(), 'extract_dataset_parts.sh') + ' %s' for dir in task_dirs: - task = model.Task(parent_job, dir) + task = model.Task(parent_job, dir, prepare_files % dir) tasks.append(task) return tasks @@ -106,44 +108,51 @@ illegal_outputs = [x for x in merge_outputs if x in pickone_outputs] if len(illegal_outputs) > 0: - raise Exception("Outputs have conflicting parallelism attributes: %s" % str( illegal_outputs )) + return ('Tool file error', 'Outputs have conflicting parallelism attributes: %s' % str( illegal_outputs )) + + stdout = '' + stderr = '' + + try: + working_directory = job_wrapper.working_directory + task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] + # TODO: Output datasets can be very complex. This doesn't handle metadata files + outputs = job_wrapper.get_output_datasets_and_fnames() + pickone_done = [] + task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] + for output in outputs: + output_file_name = str(outputs[output][1]) + base_output_name = os.path.basename(output_file_name) + if output in merge_outputs: + output_type = outputs[output][0].datatype + output_files = [os.path.join(dir,base_output_name) for dir in task_dirs] + log.debug('files %s ' % output_files) + output_type.merge(output_files, output_file_name) + log.debug('merge finished: %s' % output_file_name) + pass # TODO: merge all the files + elif output in pickone_outputs: + # just pick one of them + if output not in pickone_done: + task_file_name = os.path.join(task_dirs[0], base_output_name) + shutil.move( task_file_name, output_file_name ) + pickone_done.append(output) + else: + log_error = "The output '%s' does not define a method for implementing parallelism" % output + log.error(log_error) + raise Exception(log_error) + except Exception, e: + stdout = 'Error merging files'; + stderr = str(e) + - - working_directory = job_wrapper.working_directory - task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] - # TODO: Output datasets can be very complex. This doesn't handle metadata files - outputs = job_wrapper.get_output_datasets_and_fnames() - pickone_done = [] - task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] - for output in outputs: - output_file_name = str(outputs[output][1]) - base_output_name = os.path.basename(output_file_name) - if output in merge_outputs: - output_type = outputs[output][0].datatype - output_files = [os.path.join(dir,base_output_name) for dir in task_dirs] - log.debug('files %s ' % output_files) - output_type.merge(output_files, output_file_name) - log.debug('merge finished: %s' % output_file_name) - pass # TODO: merge all the files - elif output in pickone_outputs: - # just pick one of them - if output not in pickone_done: - task_file_name = os.path.join(task_dirs[0], base_output_name) - shutil.move( task_file_name, output_file_name ) - pickone_done.append(output) - else: - log_error = "The output '%s' does not define a method for implementing parallelism" % output - log.error(log_error) - raise Exception(log_error) - - stdout = '' - stderr='' for tw in task_wrappers: # Prevent repetitive output, e.g. "Sequence File Aligned"x20 # Eventually do a reduce for jobs that output "N reads mapped", combining all N for tasks. - if stdout.strip() != tw.get_task().stdout.strip(): - stdout += tw.get_task().stdout - if stderr.strip() != tw.get_task().stderr.strip(): - stderr += tw.get_task().stderr + out = tw.get_task().stdout.strip() + err = tw.get_task().stderr.strip() + if len(out) > 0: + stdout += tw.working_directory + ':\n' + out + if len(err) > 0: + stderr += tw.working_directory + ':\n' + err return (stdout, stderr) --- a/lib/galaxy/model/__init__.py Fri Oct 07 13:25:19 2011 -0700 +++ b/lib/galaxy/model/__init__.py Fri Oct 07 14:12:40 2011 -0700 @@ -204,29 +204,19 @@ ERROR = 'error', DELETED = 'deleted' ) - def __init__( self, job, part_file = None ): + def __init__( self, job, working_directory, prepare_files_cmd ): self.command_line = None self.parameters = [] self.state = Task.states.NEW self.info = None - # TODO: Rename this to working_directory - # Does this necessitate a DB migration step? - self.part_file = part_file + self.working_directory = working_directory self.task_runner_name = None self.task_runner_external_id = None self.job = job self.stdout = None self.stderr = None + self.prepare_input_files_cmd = prepare_files_cmd - @property - def working_directory(self): - if self.part_file is not None: - if not os.path.isdir(self.part_file): - return os.path.dirname(self.part_file) - else: - return self.part_file - return None - def set_state( self, state ): self.state = state @@ -907,7 +897,9 @@ def get_converted_files_by_type( self, file_type ): for assoc in self.implicitly_converted_datasets: if not assoc.deleted and assoc.type == file_type: - return assoc.dataset + if assoc.dataset: + return assoc.dataset + return assoc.dataset_ldda return None def get_converted_dataset_deps(self, trans, target_ext): """ @@ -1599,7 +1591,12 @@ class ImplicitlyConvertedDatasetAssociation( object ): def __init__( self, id = None, parent = None, dataset = None, file_type = None, deleted = False, purged = False, metadata_safe = True ): self.id = id - self.dataset = dataset + if isinstance(dataset, HistoryDatasetAssociation): + self.dataset = dataset + elif isinstance(dataset, LibraryDatasetDatasetAssociation): + self.dataset_ldda = dataset + else: + raise AttributeError, 'Unknown dataset type provided for dataset: %s' % type( dataset ) if isinstance(parent, HistoryDatasetAssociation): self.parent_hda = parent elif isinstance(parent, LibraryDatasetDatasetAssociation): --- a/lib/galaxy/model/mapping.py Fri Oct 07 13:25:19 2011 -0700 +++ b/lib/galaxy/model/mapping.py Fri Oct 07 14:12:40 2011 -0700 @@ -148,6 +148,7 @@ Column( "create_time", DateTime, default=now ), Column( "update_time", DateTime, default=now, onupdate=now ), Column( "hda_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True, nullable=True ), + Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True, nullable=True ), Column( "hda_parent_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ), Column( "ldda_parent_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ), Column( "deleted", Boolean, index=True, default=False ), @@ -469,9 +470,10 @@ Column( "stderr", TEXT ), Column( "traceback", TEXT ), Column( "job_id", Integer, ForeignKey( "job.id" ), index=True, nullable=False ), - Column( "part_file", String(1024)), + Column( "working_directory", String(1024)), Column( "task_runner_name", String( 255 ) ), - Column( "task_runner_external_id", String( 255 ) ) ) + Column( "task_runner_external_id", String( 255 ) ), + Column( "prepare_input_files_cmd", TEXT ) ) PostJobAction.table = Table("post_job_action", metadata, Column("id", Integer, primary_key=True), @@ -1211,6 +1213,9 @@ LibraryDatasetDatasetAssociation, primaryjoin=( ImplicitlyConvertedDatasetAssociation.table.c.ldda_parent_id == LibraryDatasetDatasetAssociation.table.c.id ) ), + dataset_ldda=relation( + LibraryDatasetDatasetAssociation, + primaryjoin=( ImplicitlyConvertedDatasetAssociation.table.c.ldda_id == LibraryDatasetDatasetAssociation.table.c.id ) ), dataset=relation( HistoryDatasetAssociation, primaryjoin=( ImplicitlyConvertedDatasetAssociation.table.c.hda_id == HistoryDatasetAssociation.table.c.id ) ) ) ) @@ -1594,7 +1599,7 @@ annotations=relation( PageAnnotationAssociation, order_by=PageAnnotationAssociation.table.c.id, backref="pages" ), ratings=relation( PageRatingAssociation, order_by=PageRatingAssociation.table.c.id, backref="pages" ) ) ) - + assign_mapper( context, ToolShedRepository, ToolShedRepository.table ) # Set up proxy so that --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/model/migrate/versions/0083_add_prepare_files_to_task.py Fri Oct 07 14:12:40 2011 -0700 @@ -0,0 +1,63 @@ +""" +Migration script to add 'prepare_input_files_cmd' column to the task table and to rename a column. +""" + +from sqlalchemy import * +from sqlalchemy.orm import * +from migrate import * +from migrate.changeset import * + +import logging +log = logging.getLogger( __name__ ) + +metadata = MetaData( migrate_engine ) +db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) ) + +def upgrade(): + print __doc__ + metadata.reflect() + try: + task_table = Table( "task", metadata, autoload=True ) + c = Column( "prepare_input_files_cmd", TEXT, nullable=True ) + c.create( task_table ) + assert c is task_table.c.prepare_input_files_cmd + except Exception, e: + print "Adding prepare_input_files_cmd column to task table failed: %s" % str( e ) + log.debug( "Adding prepare_input_files_cmd column to task table failed: %s" % str( e ) ) + try: + task_table = Table( "task", metadata, autoload=True ) + c = Column( "working_directory", String ( 1024 ), nullable=True ) + c.create( task_table ) + assert c is task_table.c.working_directory + except Exception, e: + print "Adding working_directory column to task table failed: %s" % str( e ) + log.debug( "Adding working_directory column to task table failed: %s" % str( e ) ) + + # remove the 'part_file' column - nobody used tasks before this, so no data needs to be migrated + try: + task_table.c.part_file.drop() + except Exception, e: + log.debug( "Deleting column 'part_file' from the 'task' table failed: %s" % ( str( e ) ) ) + +def downgrade(): + metadata.reflect() + try: + task_table = Table( "task", metadata, autoload=True ) + task_table.c.prepare_input_files_cmd.drop() + except Exception, e: + print "Dropping prepare_input_files_cmd column from task table failed: %s" % str( e ) + log.debug( "Dropping prepare_input_files_cmd column from task table failed: %s" % str( e ) ) + try: + task_table = Table( "task", metadata, autoload=True ) + task_table.c.working_directory.drop() + except Exception, e: + print "Dropping working_directory column from task table failed: %s" % str( e ) + log.debug( "Dropping working_directory column from task table failed: %s" % str( e ) ) + try: + task_table = Table( "task", metadata, autoload=True ) + c = Column( "part_file", String ( 1024 ), nullable=True ) + c.create( task_table ) + assert c is task_table.c.part_file + except Exception, e: + print "Adding part_file column to task table failed: %s" % str( e ) + log.debug( "Adding part_file column to task table failed: %s" % str( e ) ) --- a/lib/galaxy/util/__init__.py Fri Oct 07 13:25:19 2011 -0700 +++ b/lib/galaxy/util/__init__.py Fri Oct 07 14:12:40 2011 -0700 @@ -623,6 +623,9 @@ gbrowse_build_sites = read_build_sites( os.path.join( galaxy_root_path, "tool-data", "shared", "gbrowse", "gbrowse_build_sites.txt" ) ) genetrack_sites = read_build_sites( os.path.join( galaxy_root_path, "tool-data", "shared", "genetrack", "genetrack_sites.txt" ), check_builds=False ) +def galaxy_directory(): + return os.path.abspath(galaxy_root_path) + if __name__ == '__main__': import doctest, sys doctest.testmod(sys.modules[__name__], verbose=False) --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scripts/extract_dataset_part.py Fri Oct 07 14:12:40 2011 -0700 @@ -0,0 +1,48 @@ +""" +Reads a JSON file and uses it to call into a datatype class to extract +a subset of a dataset for processing. + +Used by jobs that split large files into pieces to be processed concurrently +on a gid in a scatter-gather mode. This does part of the scatter. + +""" +import os +import sys +import logging +logging.basicConfig() +log = logging.getLogger( __name__ ) + +new_path = [ os.path.join( os.getcwd(), "lib" ) ] +new_path.extend( sys.path[1:] ) # remove scripts/ from the path +sys.path = new_path + +from galaxy import eggs +import pkg_resources +pkg_resources.require("simplejson") +import simplejson + +# This junk is here to prevent loading errors +import galaxy.model.mapping #need to load this before we unpickle, in order to setup properties assigned by the mappers +galaxy.model.Job() #this looks REAL stupid, but it is REQUIRED in order for SA to insert parameters into the classes defined by the mappers --> it appears that instantiating ANY mapper'ed class would suffice here +galaxy.datatypes.metadata.DATABASE_CONNECTION_AVAILABLE = False #Let metadata know that there is no database connection, and to just assume object ids are valid + +def __main__(): + """ + Argument: a JSON file + """ + file_path = sys.argv.pop( 1 ) + data = simplejson.load(open(file_path, 'r')) + try: + class_name_parts = data['class_name'].split('.') + module_name = '.'.join(class_name_parts[:-1]) + class_name = class_name_parts[-1] + mod = __import__(module_name, globals(), locals(), [class_name]) + cls = getattr(mod, class_name) + if not cls.process_split_file(data): + sys.stderr.write('Writing split file failed\n') + sys.exit(1) + except Exception, e: + sys.stderr.write(str(e)) + sys.exit(1) + +__main__() http://bitbucket.org/galaxy/galaxy-central/changeset/5b65ae5d04f3/ changeset: 5b65ae5d04f3 user: John Duddy date: 2011-10-10 21:07:19 summary: Fix missing columns for Task, add migration to add ldda for associated types affected #: 3 files (-1 bytes) --- a/lib/galaxy/model/mapping.py Fri Oct 07 14:12:40 2011 -0700 +++ b/lib/galaxy/model/mapping.py Mon Oct 10 12:07:19 2011 -0700 @@ -468,6 +468,7 @@ Column( "runner_name", String( 255 ) ), Column( "stdout", TEXT ), Column( "stderr", TEXT ), + Column( "info", TrimmedString ( 255 ) ), Column( "traceback", TEXT ), Column( "job_id", Integer, ForeignKey( "job.id" ), index=True, nullable=False ), Column( "working_directory", String(1024)), --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py Mon Oct 10 12:07:19 2011 -0700 @@ -0,0 +1,35 @@ +""" +Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association table. +""" + +from sqlalchemy import * +from sqlalchemy.orm import * +from migrate import * +from migrate.changeset import * + +import logging +log = logging.getLogger( __name__ ) + +metadata = MetaData( migrate_engine ) +db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) ) + +def upgrade(): + print __doc__ + metadata.reflect() + try: + Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True ) + c = Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True, nullable=True ) + c.create( Implicitly_converted_table ) + assert c is Implicitly_converted_table.c.ldda_id + except Exception, e: + print "Adding ldda_id column to implicitly_converted_dataset_association table failed: %s" % str( e ) + log.debug( "Adding ldda_id column to implicitly_converted_dataset_association table failed: %s" % str( e ) ) + +def downgrade(): + metadata.reflect() + try: + Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True ) + Implicitly_converted_table.c.ldda_id.drop() + except Exception, e: + print "Dropping ldda_id column from implicitly_converted_dataset_association table failed: %s" % str( e ) + log.debug( "Dropping ldda_id column from implicitly_converted_dataset_association table failed: %s" % str( e ) ) --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/model/migrate/versions/0085_add_task_info.py Mon Oct 10 12:07:19 2011 -0700 @@ -0,0 +1,35 @@ +""" +Migration script to add 'info' column to the task table. +""" + +from sqlalchemy import * +from sqlalchemy.orm import * +from migrate import * +from migrate.changeset import * + +import logging +log = logging.getLogger( __name__ ) + +metadata = MetaData( migrate_engine ) +db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) ) + +def upgrade(): + print __doc__ + metadata.reflect() + try: + task_table = Table( "task", metadata, autoload=True ) + c = Column( "info", TrimmedString (255) , nullable=True ) + c.create( task_table ) + assert c is task_table.c.info + except Exception, e: + print "Adding info column to table table failed: %s" % str( e ) + log.debug( "Adding info column to task table failed: %s" % str( e ) ) + +def downgrade(): + metadata.reflect() + try: + task_table = Table( "task", metadata, autoload=True ) + task_table.c.info.drop() + except Exception, e: + print "Dropping info column from task table failed: %s" % str( e ) + log.debug( "Dropping info column from task table failed: %s" % str( e ) ) http://bitbucket.org/galaxy/galaxy-central/changeset/dc352138c3de/ changeset: dc352138c3de user: John Duddy date: 2011-10-10 21:17:57 summary: add FQTOC datatype to drive file splitting affected #: 4 files (-1 bytes) --- a/datatypes_conf.xml.sample Mon Oct 10 12:07:19 2011 -0700 +++ b/datatypes_conf.xml.sample Mon Oct 10 12:17:57 2011 -0700 @@ -56,11 +56,22 @@ <converter file="fasta_to_2bit.xml" target_datatype="twobit"/><converter file="fasta_to_len.xml" target_datatype="len"/></datatype> - <datatype extension="fastq" type="galaxy.datatypes.sequence:Fastq" display_in_upload="true"/> - <datatype extension="fastqsanger" type="galaxy.datatypes.sequence:FastqSanger" display_in_upload="true"/> - <datatype extension="fastqsolexa" type="galaxy.datatypes.sequence:FastqSolexa" display_in_upload="true"/> - <datatype extension="fastqcssanger" type="galaxy.datatypes.sequence:FastqCSSanger" display_in_upload="true"/> - <datatype extension="fastqillumina" type="galaxy.datatypes.sequence:FastqIllumina" display_in_upload="true"/> + <datatype extension="fastq" type="galaxy.datatypes.sequence:Fastq" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqsanger" type="galaxy.datatypes.sequence:FastqSanger" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqsolexa" type="galaxy.datatypes.sequence:FastqSolexa" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqcssanger" type="galaxy.datatypes.sequence:FastqCSSanger" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqillumina" type="galaxy.datatypes.sequence:FastqIllumina" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fqtoc" type="galaxy.datatypes.sequence:SequenceSplitLocations" display_in_upload="true"/><datatype extension="eland" type="galaxy.datatypes.tabular:Eland" display_in_upload="true"/><datatype extension="elandmulti" type="galaxy.datatypes.tabular:ElandMulti" display_in_upload="true"/><datatype extension="genetrack" type="galaxy.datatypes.tracks:GeneTrack"> --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/datatypes/converters/fastq_to_fqtoc.py Mon Oct 10 12:17:57 2011 -0700 @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +import sys, os, gzip +from galaxy.datatypes.checkers import is_gzip + + +def main(): + """ + The format of the file is JSON: + { "sections" : [ + { "start" : "x", "end" : "y", "sequences" : "z" }, + ... + ]} + This works only for UNCOMPRESSED fastq files. The Python GzipFile does not provide seekable + offsets via tell(), so clients just have to split the slow way + """ + input_fname = sys.argv[1] + if is_gzip(input_fname): + print 'Conversion is only possible for uncompressed files' + sys.exit(1) + + out_file = open(sys.argv[2], 'w') + + current_line = 0 + sequences=1000000 + lines_per_chunk = 4*sequences + chunk_begin = 0 + + in_file = open(input_name) + + out_file.write('{"sections" : ['); + + for line in in_file: + current_line += 1 + if 0 == current_line % lines_per_chunk: + chunk_end = in_file.tell() + out_file.write('{"start":"%s","end":"%s","sequences":"%s"},' % (chunk_begin, chunk_end, sequences)) + chunk_begin = chunk_end + + chunk_end = in_file.tell() + out_file.write('{"start":"%s","end":"%s","sequences":"%s"}' % (chunk_begin, chunk_end, (current_line % lines_per_chunk) / 4)) + out_file.write(']}\n') + + +if __name__ == "__main__": + main() --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/datatypes/converters/fastq_to_fqtoc.xml Mon Oct 10 12:17:57 2011 -0700 @@ -0,0 +1,13 @@ +<tool id="CONVERTER_fastq_to_fqtoc0" name="Convert FASTQ files to seek locations" version="1.0.0" hidden="true"> + <command interpreter="python">fastq_to_fqtoc.py $input1 $output1</command> + <inputs> + <page> + <param format="fastq" name="input1" type="data" label="Choose FASTQ file"/> + </page> + </inputs> + <outputs> + <data format="fqtoc" name="output1"/> + </outputs> + <help> + </help> +</tool> --- a/lib/galaxy/datatypes/sequence.py Mon Oct 10 12:07:19 2011 -0700 +++ b/lib/galaxy/datatypes/sequence.py Mon Oct 10 12:17:57 2011 -0700 @@ -27,7 +27,7 @@ one OR an uncompressed file. In the GZIP case, each sub-file's location is stored in start and end. The format of the file is JSON: { "sections" : [ - { "start" : "x", "end" : "y", "clusters" : "z" }, + { "start" : "x", "end" : "y", "sequences" : "z" }, ... ]} """ @@ -53,7 +53,7 @@ data = simplejson.load(open(filename)) sections = data['sections'] for section in sections: - if 'start' not in section or 'end' not in section or 'clusters' not in section: + if 'start' not in section or 'end' not in section or 'sequences' not in section: return False return True except: @@ -96,20 +96,20 @@ dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' - def get_sequences_per_file(total_clusters, split_params): + def get_sequences_per_file(total_sequences, split_params): if split_params['split_mode'] == 'number_of_parts': # legacy basic mode - split into a specified number of parts parts = int(split_params['split_size']) - sequences_per_file = [total_clusters/parts for i in range(parts)] - for i in range(total_clusters % parts): + sequences_per_file = [total_sequences/parts for i in range(parts)] + for i in range(total_sequences % parts): sequences_per_file[i] += 1 elif split_params['split_mode'] == 'to_size': - # loop through the sections and calculate the number of clusters + # loop through the sections and calculate the number of sequences chunk_size = long(split_params['split_size']) - chunks = total_clusters / chunk_size - rem = total_clusters % chunk_size - sequences_per_file = [chunk_size for i in range(total_clusters / chunk_size)] + chunks = total_sequences / chunk_size + rem = total_sequences % chunk_size + sequences_per_file = [chunk_size for i in range(total_sequences / chunk_size)] # TODO: Should we invest the time in a better way to handle small remainders? if rem > 0: sequences_per_file.append(rem) @@ -119,10 +119,10 @@ get_sequences_per_file = staticmethod(get_sequences_per_file) def do_slow_split( cls, input_datasets, subdir_generator_function, split_params): - # count the clusters so we can split + # count the sequences so we can split # TODO: if metadata is present, take the number of lines / 4 if input_datasets[0].metadata is not None and input_datasets[0].metadata.sequences is not None: - total_clusters = input_datasets[0].metadata.sequences + total_sequences = input_datasets[0].metadata.sequences else: input_file = input_datasets[0].file_name compress = is_gzip(input_file) @@ -136,23 +136,23 @@ # TODO # Add BufferedReader if python 2.7? in_file = open(input_file, 'rt') - total_clusters = long(0) + total_sequences = long(0) for i, line in enumerate(in_file): - total_clusters += 1 + total_sequences += 1 in_file.close() - total_clusters /= 4 + total_sequences /= 4 - sequences_per_file = cls.get_sequences_per_file(total_clusters, split_params) + sequences_per_file = cls.get_sequences_per_file(total_sequences, split_params) return cls.write_split_files(input_datasets, None, subdir_generator_function, sequences_per_file) do_slow_split = classmethod(do_slow_split) def do_fast_split( cls, input_datasets, toc_file_datasets, subdir_generator_function, split_params): data = simplejson.load(open(toc_file_datasets[0].file_name)) sections = data['sections'] - total_clusters = long(0) + total_sequences = long(0) for section in sections: - total_clusters += long(section['clusters']) - sequences_per_file = cls.get_sequences_per_file(total_clusters, split_params) + total_sequences += long(section['sequences']) + sequences_per_file = cls.get_sequences_per_file(total_sequences, split_params) return cls.write_split_files(input_datasets, toc_file_datasets, subdir_generator_function, sequences_per_file) do_fast_split = classmethod(do_fast_split) @@ -165,7 +165,7 @@ directories.append(dir) return dir - # we know how many splits and how many clusters in each. What remains is to write out instructions for the + # we know how many splits and how many sequences in each. What remains is to write out instructions for the # splitting of all the input files. To decouple the format of those instructions from this code, the exact format of # those instructions is delegated to scripts start_sequence=0 @@ -241,7 +241,7 @@ """ Uses a Table of Contents dict, parsed from an FQTOC file, to come up with a set of shell commands that will extract the parts necessary - >>> three_sections=[dict(start=0, end=74, clusters=10), dict(start=74, end=148, clusters=10), dict(start=148, end=148+76, clusters=10)] + >>> three_sections=[dict(start=0, end=74, sequences=10), dict(start=74, end=148, sequences=10), dict(start=148, end=148+76, sequences=10)] >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=0, sequence_count=10) ['dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null >> ./output.gz'] >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=1, sequence_count=5) @@ -261,8 +261,8 @@ current_sequence = long(0) i=0 # skip to the section that contains my starting sequence - while i < len(sections) and start_sequence >= current_sequence + long(sections[i]['clusters']): - current_sequence += long(sections[i]['clusters']) + while i < len(sections) and start_sequence >= current_sequence + long(sections[i]['sequences']): + current_sequence += long(sections[i]['sequences']) i += 1 if i == len(sections): # bad input data! raise Exception('No FQTOC section contains starting sequence %s' % start_sequence) @@ -277,7 +277,7 @@ # we need to extract partial data. So, find the byte offsets of the chunks that contain the data we need # use a combination of dd (to pull just the right sections out) tail (to skip lines) and head (to get the # right number of lines - sequences = long(sections[i]['clusters']) + sequences = long(sections[i]['sequences']) skip_sequences = start_sequence-current_sequence sequences_to_extract = min(sequence_count, sequences-skip_sequences) start_copy = long(sections[i]['start']) @@ -832,4 +832,4 @@ if __name__ == '__main__': import doctest, sys - doctest.testmod(sys.modules[__name__]) \ No newline at end of file + doctest.testmod(sys.modules[__name__]) http://bitbucket.org/galaxy/galaxy-central/changeset/d226b75b9dbc/ changeset: d226b75b9dbc user: dannon date: 2011-10-12 17:19:42 summary: Pull and merge of Task overhaul. affected #: 20 files (-1 bytes) --- a/datatypes_conf.xml.sample Mon Oct 10 18:42:31 2011 -0400 +++ b/datatypes_conf.xml.sample Wed Oct 12 11:19:42 2011 -0400 @@ -56,11 +56,22 @@ <converter file="fasta_to_2bit.xml" target_datatype="twobit"/><converter file="fasta_to_len.xml" target_datatype="len"/></datatype> - <datatype extension="fastq" type="galaxy.datatypes.sequence:Fastq" display_in_upload="true"/> - <datatype extension="fastqsanger" type="galaxy.datatypes.sequence:FastqSanger" display_in_upload="true"/> - <datatype extension="fastqsolexa" type="galaxy.datatypes.sequence:FastqSolexa" display_in_upload="true"/> - <datatype extension="fastqcssanger" type="galaxy.datatypes.sequence:FastqCSSanger" display_in_upload="true"/> - <datatype extension="fastqillumina" type="galaxy.datatypes.sequence:FastqIllumina" display_in_upload="true"/> + <datatype extension="fastq" type="galaxy.datatypes.sequence:Fastq" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqsanger" type="galaxy.datatypes.sequence:FastqSanger" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqsolexa" type="galaxy.datatypes.sequence:FastqSolexa" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqcssanger" type="galaxy.datatypes.sequence:FastqCSSanger" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqillumina" type="galaxy.datatypes.sequence:FastqIllumina" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fqtoc" type="galaxy.datatypes.sequence:SequenceSplitLocations" display_in_upload="true"/><datatype extension="eland" type="galaxy.datatypes.tabular:Eland" display_in_upload="true"/><datatype extension="elandmulti" type="galaxy.datatypes.tabular:ElandMulti" display_in_upload="true"/><datatype extension="genetrack" type="galaxy.datatypes.tracks:GeneTrack"> --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/extract_dataset_parts.sh Wed Oct 12 11:19:42 2011 -0400 @@ -0,0 +1,8 @@ +#!/bin/sh + +cd `dirname $0` +for file in $1/split_info*.json +do + # echo processing $file + python ./scripts/extract_dataset_part.py $file +done --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/datatypes/converters/fastq_to_fqtoc.py Wed Oct 12 11:19:42 2011 -0400 @@ -0,0 +1,46 @@ +#!/usr/bin/env python + +import sys, os, gzip +from galaxy.datatypes.checkers import is_gzip + + +def main(): + """ + The format of the file is JSON: + { "sections" : [ + { "start" : "x", "end" : "y", "sequences" : "z" }, + ... + ]} + This works only for UNCOMPRESSED fastq files. The Python GzipFile does not provide seekable + offsets via tell(), so clients just have to split the slow way + """ + input_fname = sys.argv[1] + if is_gzip(input_fname): + print 'Conversion is only possible for uncompressed files' + sys.exit(1) + + out_file = open(sys.argv[2], 'w') + + current_line = 0 + sequences=1000000 + lines_per_chunk = 4*sequences + chunk_begin = 0 + + in_file = open(input_name) + + out_file.write('{"sections" : ['); + + for line in in_file: + current_line += 1 + if 0 == current_line % lines_per_chunk: + chunk_end = in_file.tell() + out_file.write('{"start":"%s","end":"%s","sequences":"%s"},' % (chunk_begin, chunk_end, sequences)) + chunk_begin = chunk_end + + chunk_end = in_file.tell() + out_file.write('{"start":"%s","end":"%s","sequences":"%s"}' % (chunk_begin, chunk_end, (current_line % lines_per_chunk) / 4)) + out_file.write(']}\n') + + +if __name__ == "__main__": + main() --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/datatypes/converters/fastq_to_fqtoc.xml Wed Oct 12 11:19:42 2011 -0400 @@ -0,0 +1,13 @@ +<tool id="CONVERTER_fastq_to_fqtoc0" name="Convert FASTQ files to seek locations" version="1.0.0" hidden="true"> + <command interpreter="python">fastq_to_fqtoc.py $input1 $output1</command> + <inputs> + <page> + <param format="fastq" name="input1" type="data" label="Choose FASTQ file"/> + </page> + </inputs> + <outputs> + <data format="fqtoc" name="output1"/> + </outputs> + <help> + </help> +</tool> --- a/lib/galaxy/datatypes/data.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/datatypes/data.py Wed Oct 12 11:19:42 2011 -0400 @@ -351,6 +351,22 @@ @property def has_resolution(self): return False + + + + def merge( split_files, output_file): + """ + TODO: Do we need to merge gzip files using gzjoin? cat seems to work, + but might be brittle. Need to revisit this. + """ + if len(split_files) == 1: + cmd = 'mv -f %s %s' % ( split_files[0], output_file ) + else: + cmd = 'cat %s > %s' % ( ' '.join(split_files), output_file ) + result = os.system(cmd) + if result != 0: + raise Exception('Result %s from %s' % (result, cmd)) + merge = staticmethod(merge) class Text( Data ): file_ext = 'txt' @@ -446,9 +462,83 @@ dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' + def split( input_files, subdir_generator_function, split_params): + """ + Split the input files by line. + """ + if split_params is None: + return + + if len(input_files) > 1: + raise Exception("Text file splitting does not support multiple files") + + lines_per_file = None + chunk_size = None + if split_params['split_mode'] == 'number_of_parts': + lines_per_file = [] + # Computing the length is expensive! + def _file_len(fname): + i = 0 + f = open(fname) + for i, l in enumerate(f): + pass + f.close() + return i + 1 + length = _file_len(input_files[0]) + parts = int(split_params['split_size']) + if length < parts: + parts = length + len_each, remainder = divmod(length, parts) + while length > 0: + chunk = len_each + if remainder > 0: + chunk += 1 + lines_per_file.append(chunk) + remainder=- 1 + length -= chunk + elif split_params['split_mode'] == 'to_size': + chunk_size = int(split_params['split_size']) + else: + raise Exception('Unsupported split mode %s' % split_params['split_mode']) + + f = open(input_files[0], 'rt') + try: + chunk_idx = 0 + file_done = False + part_file = None + while not file_done: + if lines_per_file is None: + this_chunk_size = chunk_size + elif chunk_idx < len(lines_per_file): + this_chunk_size = lines_per_file[chunk_idx] + chunk_idx += 1 + lines_remaining = this_chunk_size + part_file = None + while lines_remaining > 0: + a_line = f.readline() + if a_line == '': + file_done = True + break + if part_file is None: + part_dir = subdir_generator_function() + part_path = os.path.join(part_dir, os.path.basename(input_files[0])) + part_file = open(part_path, 'w') + part_file.write(a_line) + lines_remaining -= 1 + if part_file is not None: + part_file.close() + except Exception, e: + log.error('Unable to split files: %s' % str(e)) + f.close() + if part_file is not None: + part_file.close() + raise + f.close() + split = staticmethod(split) + class LineCount( Text ): - """ - Dataset contains a single line with a single integer that denotes the + """ + Dataset contains a single line with a single integer that denotes the line count for a related dataset. Used for custom builds. """ pass --- a/lib/galaxy/datatypes/sequence.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/datatypes/sequence.py Wed Oct 12 11:19:42 2011 -0400 @@ -2,10 +2,12 @@ Sequence classes """ +import gzip import data import logging import re import string +import os from cgi import escape from galaxy.datatypes.metadata import MetadataElement from galaxy.datatypes import metadata @@ -13,8 +15,52 @@ from galaxy import util from sniff import * +import pkg_resources +pkg_resources.require("simplejson") +import simplejson + log = logging.getLogger(__name__) +class SequenceSplitLocations( data.Text ): + """ + Class storing information about a sequence file composed of multiple gzip files concatenated as + one OR an uncompressed file. In the GZIP case, each sub-file's location is stored in start and end. + The format of the file is JSON: + { "sections" : [ + { "start" : "x", "end" : "y", "sequences" : "z" }, + ... + ]} + """ + def set_peek( self, dataset, is_multi_byte=False ): + if not dataset.dataset.purged: + try: + parsed_data = simplejson.load(open(dataset.file_name)) + # dataset.peek = simplejson.dumps(data, sort_keys=True, indent=4) + dataset.peek = data.get_file_peek( dataset.file_name, is_multi_byte=is_multi_byte ) + dataset.blurb = '%d sections' % len(parsed_data['sections']) + except Exception, e: + dataset.peek = 'Not FQTOC file' + dataset.blurb = 'Not FQTOC file' + else: + dataset.peek = 'file does not exist' + dataset.blurb = 'file purged from disk' + + file_ext = "fqtoc" + + def sniff( self, filename ): + if os.path.getsize(filename) < 50000: + try: + data = simplejson.load(open(filename)) + sections = data['sections'] + for section in sections: + if 'start' not in section or 'end' not in section or 'sequences' not in section: + return False + return True + except: + pass + return False + + class Sequence( data.Text ): """Class describing a sequence""" @@ -50,6 +96,239 @@ dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk' + def get_sequences_per_file(total_sequences, split_params): + if split_params['split_mode'] == 'number_of_parts': + # legacy basic mode - split into a specified number of parts + parts = int(split_params['split_size']) + sequences_per_file = [total_sequences/parts for i in range(parts)] + for i in range(total_sequences % parts): + sequences_per_file[i] += 1 + elif split_params['split_mode'] == 'to_size': + # loop through the sections and calculate the number of sequences + chunk_size = long(split_params['split_size']) + + chunks = total_sequences / chunk_size + rem = total_sequences % chunk_size + sequences_per_file = [chunk_size for i in range(total_sequences / chunk_size)] + # TODO: Should we invest the time in a better way to handle small remainders? + if rem > 0: + sequences_per_file.append(rem) + else: + raise Exception('Unsupported split mode %s' % split_params['split_mode']) + return sequences_per_file + get_sequences_per_file = staticmethod(get_sequences_per_file) + + def do_slow_split( cls, input_datasets, subdir_generator_function, split_params): + # count the sequences so we can split + # TODO: if metadata is present, take the number of lines / 4 + if input_datasets[0].metadata is not None and input_datasets[0].metadata.sequences is not None: + total_sequences = input_datasets[0].metadata.sequences + else: + input_file = input_datasets[0].file_name + compress = is_gzip(input_file) + if compress: + # gzip is really slow before python 2.7! + in_file = gzip.GzipFile(input_file, 'r') + else: + # TODO + # if a file is not compressed, seek locations can be calculated and stored + # ideally, this would be done in metadata + # TODO + # Add BufferedReader if python 2.7? + in_file = open(input_file, 'rt') + total_sequences = long(0) + for i, line in enumerate(in_file): + total_sequences += 1 + in_file.close() + total_sequences /= 4 + + sequences_per_file = cls.get_sequences_per_file(total_sequences, split_params) + return cls.write_split_files(input_datasets, None, subdir_generator_function, sequences_per_file) + do_slow_split = classmethod(do_slow_split) + + def do_fast_split( cls, input_datasets, toc_file_datasets, subdir_generator_function, split_params): + data = simplejson.load(open(toc_file_datasets[0].file_name)) + sections = data['sections'] + total_sequences = long(0) + for section in sections: + total_sequences += long(section['sequences']) + sequences_per_file = cls.get_sequences_per_file(total_sequences, split_params) + return cls.write_split_files(input_datasets, toc_file_datasets, subdir_generator_function, sequences_per_file) + do_fast_split = classmethod(do_fast_split) + + def write_split_files(cls, input_datasets, toc_file_datasets, subdir_generator_function, sequences_per_file): + directories = [] + def get_subdir(idx): + if idx < len(directories): + return directories[idx] + dir = subdir_generator_function() + directories.append(dir) + return dir + + # we know how many splits and how many sequences in each. What remains is to write out instructions for the + # splitting of all the input files. To decouple the format of those instructions from this code, the exact format of + # those instructions is delegated to scripts + start_sequence=0 + for part_no in range(len(sequences_per_file)): + dir = get_subdir(part_no) + for ds_no in range(len(input_datasets)): + ds = input_datasets[ds_no] + base_name = os.path.basename(ds.file_name) + part_path = os.path.join(dir, base_name) + split_data = dict(class_name='%s.%s' % (cls.__module__, cls.__name__), + output_name=part_path, + input_name=ds.file_name, + args=dict(start_sequence=start_sequence, num_sequences=sequences_per_file[part_no])) + if toc_file_datasets is not None: + toc = toc_file_datasets[ds_no] + split_data['args']['toc_file'] = toc.file_name + f = open(os.path.join(dir, 'split_info_%s.json' % base_name), 'w') + simplejson.dump(split_data, f) + f.close() + start_sequence += sequences_per_file[part_no] + return directories + write_split_files = classmethod(write_split_files) + + def split( cls, input_datasets, subdir_generator_function, split_params): + """ + FASTQ files are split on cluster boundaries, in increments of 4 lines + """ + if split_params is None: + return None + + # first, see if there are any associated FQTOC files that will give us the split locations + # if so, we don't need to read the files to do the splitting + toc_file_datasets = [] + for ds in input_datasets: + tmp_ds = ds + fqtoc_file = None + while fqtoc_file is None and tmp_ds is not None: + fqtoc_file = tmp_ds.get_converted_files_by_type('fqtoc') + tmp_ds = tmp_ds.copied_from_library_dataset_dataset_association + + if fqtoc_file is not None: + toc_file_datasets.append(fqtoc_file) + + if len(toc_file_datasets) == len(input_datasets): + return cls.do_fast_split(input_datasets, toc_file_datasets, subdir_generator_function, split_params) + return cls.do_slow_split(input_datasets, subdir_generator_function, split_params) + split = classmethod(split) + + def process_split_file(data): + """ + This is called in the context of an external process launched by a Task (possibly not on the Galaxy machine) + to create the input files for the Task. The parameters: + data - a dict containing the contents of the split file + """ + args = data['args'] + input_name = data['input_name'] + output_name = data['output_name'] + start_sequence = long(args['start_sequence']) + sequence_count = long(args['num_sequences']) + + if 'toc_file' in args: + toc_file = simplejson.load(open(args['toc_file'], 'r')) + commands = Sequence.get_split_commands_with_toc(input_name, output_name, toc_file, start_sequence, sequence_count) + else: + commands = Sequence.get_split_commands_sequential(is_gzip(input_name), input_name, output_name, start_sequence, sequence_count) + for cmd in commands: + if 0 != os.system(cmd): + raise Exception("Executing '%s' failed" % cmd) + return True + process_split_file = staticmethod(process_split_file) + + def get_split_commands_with_toc(input_name, output_name, toc_file, start_sequence, sequence_count): + """ + Uses a Table of Contents dict, parsed from an FQTOC file, to come up with a set of + shell commands that will extract the parts necessary + >>> three_sections=[dict(start=0, end=74, sequences=10), dict(start=74, end=148, sequences=10), dict(start=148, end=148+76, sequences=10)] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=0, sequence_count=10) + ['dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=1, sequence_count=5) + ['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +5 2> /dev/null) | head -20 | gzip -c >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=0, sequence_count=20) + ['dd bs=1 skip=0 count=148 if=./input.gz 2> /dev/null >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=5, sequence_count=10) + ['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +21 2> /dev/null) | head -20 | gzip -c >> ./output.gz', '(dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +1 2> /dev/null) | head -20 | gzip -c >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=10, sequence_count=10) + ['dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null >> ./output.gz'] + >>> Sequence.get_split_commands_with_toc('./input.gz', './output.gz', dict(sections=three_sections), start_sequence=5, sequence_count=20) + ['(dd bs=1 skip=0 count=74 if=./input.gz 2> /dev/null )| zcat | ( tail -n +21 2> /dev/null) | head -20 | gzip -c >> ./output.gz', 'dd bs=1 skip=74 count=74 if=./input.gz 2> /dev/null >> ./output.gz', '(dd bs=1 skip=148 count=76 if=./input.gz 2> /dev/null )| zcat | ( tail -n +1 2> /dev/null) | head -20 | gzip -c >> ./output.gz'] + """ + sections = toc_file['sections'] + result = [] + + current_sequence = long(0) + i=0 + # skip to the section that contains my starting sequence + while i < len(sections) and start_sequence >= current_sequence + long(sections[i]['sequences']): + current_sequence += long(sections[i]['sequences']) + i += 1 + if i == len(sections): # bad input data! + raise Exception('No FQTOC section contains starting sequence %s' % start_sequence) + + # These two variables act as an accumulator for consecutive entire blocks that + # can be copied verbatim (without decompressing) + start_chunk = long(-1) + end_chunk = long(-1) + copy_chunk_cmd = 'dd bs=1 skip=%s count=%s if=%s 2> /dev/null >> %s' + + while sequence_count > 0 and i < len(sections): + # we need to extract partial data. So, find the byte offsets of the chunks that contain the data we need + # use a combination of dd (to pull just the right sections out) tail (to skip lines) and head (to get the + # right number of lines + sequences = long(sections[i]['sequences']) + skip_sequences = start_sequence-current_sequence + sequences_to_extract = min(sequence_count, sequences-skip_sequences) + start_copy = long(sections[i]['start']) + end_copy = long(sections[i]['end']) + if sequences_to_extract < sequences: + if start_chunk > -1: + result.append(copy_chunk_cmd % (start_chunk, end_chunk-start_chunk, input_name, output_name)) + start_chunk = -1 + # extract, unzip, trim, recompress + result.append('(dd bs=1 skip=%s count=%s if=%s 2> /dev/null )| zcat | ( tail -n +%s 2> /dev/null) | head -%s | gzip -c >> %s' % + (start_copy, end_copy-start_copy, input_name, skip_sequences*4+1, sequences_to_extract*4, output_name)) + else: # whole section - add it to the start_chunk/end_chunk accumulator + if start_chunk == -1: + start_chunk = start_copy + end_chunk = end_copy + sequence_count -= sequences_to_extract + start_sequence += sequences_to_extract + current_sequence += sequences + i += 1 + if start_chunk > -1: + result.append(copy_chunk_cmd % (start_chunk, end_chunk-start_chunk, input_name, output_name)) + + if sequence_count > 0: + raise Exception('%s sequences not found in file' % sequence_count) + + return result + get_split_commands_with_toc = staticmethod(get_split_commands_with_toc) + + + def get_split_commands_sequential(is_compressed, input_name, output_name, start_sequence, sequence_count): + """ + Does a brain-dead sequential scan & extract of certain sequences + >>> Sequence.get_split_commands_sequential(True, './input.gz', './output.gz', start_sequence=0, sequence_count=10) + ['zcat "./input.gz" | ( tail -n +1 2> /dev/null) | head -40 | gzip -c > "./output.gz"'] + >>> Sequence.get_split_commands_sequential(False, './input.fastq', './output.fastq', start_sequence=10, sequence_count=10) + ['tail -n +41 "./input.fastq" 2> /dev/null | head -40 > "./output.fastq"'] + """ + start_line = start_sequence * 4 + line_count = sequence_count * 4 + # TODO: verify that tail can handle 64-bit numbers + if is_compressed: + cmd = 'zcat "%s" | ( tail -n +%s 2> /dev/null) | head -%s | gzip -c' % (input_name, start_line+1, line_count) + else: + cmd = 'tail -n +%s "%s" 2> /dev/null | head -%s' % (start_line+1, input_name, line_count) + cmd += ' > "%s"' % output_name + + return [cmd] + get_split_commands_sequential = staticmethod(get_split_commands_sequential) + + + class Alignment( data.Text ): """Class describing an alignment""" @@ -550,3 +829,4 @@ return False except: return False + --- a/lib/galaxy/jobs/__init__.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/jobs/__init__.py Wed Oct 12 11:19:42 2011 -0400 @@ -327,6 +327,7 @@ self.working_directory = \ os.path.join( self.app.config.job_working_directory, str( self.job_id ) ) self.output_paths = None + self.output_dataset_paths = None self.tool_provided_job_metadata = None # Wrapper holding the info required to restore and clean up from files used for setting metadata externally self.external_output_metadata = metadata.JobExternalOutputMetadataWrapper( job ) @@ -716,23 +717,35 @@ def get_session_id( self ): return self.session_id + def get_input_dataset_fnames( self, ds ): + filenames = [] + filenames = [ ds.file_name ] + #we will need to stage in metadata file names also + #TODO: would be better to only stage in metadata files that are actually needed (found in command line, referenced in config files, etc.) + for key, value in ds.metadata.items(): + if isinstance( value, model.MetadataFile ): + filenames.append( value.file_name ) + return filenames + def get_input_fnames( self ): job = self.get_job() filenames = [] for da in job.input_datasets + job.input_library_datasets: #da is JobToInputDatasetAssociation object if da.dataset: - filenames.append( da.dataset.file_name ) - #we will need to stage in metadata file names also - #TODO: would be better to only stage in metadata files that are actually needed (found in command line, referenced in config files, etc.) - for key, value in da.dataset.metadata.items(): - if isinstance( value, model.MetadataFile ): - filenames.append( value.file_name ) + filenames.extend(self.get_input_dataset_fnames(da.dataset)) return filenames def get_output_fnames( self ): - if self.output_paths is not None: - return self.output_paths + if self.output_paths is None: + self.compute_outputs() + return self.output_paths + def get_output_datasets_and_fnames( self ): + if self.output_dataset_paths is None: + self.compute_outputs() + return self.output_dataset_paths + + def compute_outputs( self ) : class DatasetPath( object ): def __init__( self, dataset_id, real_path, false_path = None ): self.dataset_id = dataset_id @@ -743,23 +756,27 @@ return self.real_path else: return self.false_path - job = self.get_job() # Job output datasets are combination of output datasets, library datasets, and jeha datasets. jeha = self.sa_session.query( model.JobExportHistoryArchive ).filter_by( job=job ).first() + jeha_false_path = None if self.app.config.outputs_to_working_directory: self.output_paths = [] + output_dataset_paths = {} for name, data in [ ( da.name, da.dataset.dataset ) for da in job.output_datasets + job.output_library_datasets ]: false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % data.id ) ) - self.output_paths.append( DatasetPath( data.id, data.file_name, false_path ) ) + dsp = DatasetPath( data.id, data.file_name, false_path ) + self.output_paths.append( dsp ) + self.output_dataset_paths[name] = data, dsp if jeha: - false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % jeha.dataset.id ) ) - self.output_paths.append( DatasetPath( jeha.dataset.id, jeha.dataset.file_name, false_path ) ) + jeha_false_path = os.path.abspath( os.path.join( self.working_directory, "galaxy_dataset_%d.dat" % jeha.dataset.id ) ) else: - self.output_paths = [ DatasetPath( da.dataset.dataset.id, da.dataset.file_name ) for da in job.output_datasets + job.output_library_datasets ] - if jeha: - self.output_paths.append( DatasetPath( jeha.dataset.id, jeha.dataset.file_name ) ) - + results = [ (da.name, da.dataset, DatasetPath( da.dataset.dataset.id, da.dataset.file_name )) for da in job.output_datasets + job.output_library_datasets ] + self.output_paths = [t[2] for t in results] + self.output_dataset_paths = dict([(t[0], t[1:]) for t in results]) + if jeha: + dsp = DatasetPath( jeha.dataset.id, jeha.dataset.file_name, jeha_false_path ) + self.output_paths.append( dsp ) return self.output_paths def get_output_file_id( self, file ): @@ -869,12 +886,11 @@ def __init__(self, task, queue): super(TaskWrapper, self).__init__(task.job, queue) self.task_id = task.id - self.parallelism = None - if task.part_file: - #do this better - self.working_directory = os.path.dirname(task.part_file) + self.working_directory = task.working_directory + if task.prepare_input_files_cmd is not None: + self.prepare_input_files_cmds = [ task.prepare_input_files_cmd ] else: - self.working_directory = None + self.prepare_input_files_cmds = None self.status = task.states.NEW def get_job( self ): @@ -1112,8 +1128,8 @@ self.job_runners[runner_name].put( job_wrapper ) else: runner_name = "tasks" - log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) ) - self.job_runners[runner_name].put( job_wrapper ) + log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) ) + self.job_runners[runner_name].put( job_wrapper ) else: runner_name = ( job_wrapper.tool.job_runner.split(":", 1) )[0] log.debug( "dispatching job %d to %s runner" %( job_wrapper.job_id, runner_name ) ) @@ -1249,4 +1265,3 @@ return def shutdown( self ): return - --- a/lib/galaxy/jobs/runners/__init__.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/jobs/runners/__init__.py Wed Oct 12 11:19:42 2011 -0400 @@ -6,6 +6,7 @@ Compose the sequence of commands necessary to execute a job. This will currently include: - environment settings corresponding to any requirement tags + - preparing input files - command line taken from job wrapper - commands to set metadata (if include_metadata is True) """ @@ -17,10 +18,13 @@ # Prepend version string if job_wrapper.version_string_cmd: commands = "%s &> %s; " % ( job_wrapper.version_string_cmd, job_wrapper.get_version_string_path() ) + commands + # prepend getting input files (if defined) + if hasattr(job_wrapper, 'prepare_input_files_cmds') and job_wrapper.prepare_input_files_cmds is not None: + commands = "; ".join( job_wrapper.prepare_input_files_cmds + [ commands ] ) # Prepend dependency injection if job_wrapper.dependency_shell_commands: commands = "; ".join( job_wrapper.dependency_shell_commands + [ commands ] ) - + # Append metadata setting commands, we don't want to overwrite metadata # that was copied over in init_meta(), as per established behavior if include_metadata and self.app.config.set_metadata_externally: --- a/lib/galaxy/jobs/runners/lwr.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/jobs/runners/lwr.py Wed Oct 12 11:19:42 2011 -0400 @@ -249,6 +249,12 @@ try: job_wrapper.prepare() + if hasattr(job_wrapper, 'prepare_input_files_cmds') and job_wrapper.prepare_input_files_cmds is not None: + for cmd in job_wrapper.prepare_input_file_cmds: # run the commands to stage the input files + #log.debug( 'executing: %s' % cmd ) + if 0 != os.system(cmd): + raise Exception('Error running file staging command: %s' % cmd) + job_wrapper.prepare_input_files_cmds = None # prevent them from being used in-line command_line = self.build_command_line( job_wrapper, include_metadata=False ) except: job_wrapper.fail( "failure preparing job", exception=True ) --- a/lib/galaxy/jobs/runners/tasks.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/jobs/runners/tasks.py Wed Oct 12 11:19:42 2011 -0400 @@ -60,71 +60,58 @@ if command_line: try: # DBTODO read tool info and use the right kind of parallelism. - # For now, the only splitter is the 'basic' one, n-ways split on one input, one output. - # This is incredibly simplified. Parallelism ultimately needs to describe which inputs, how, etc. + # For now, the only splitter is the 'basic' one job_wrapper.change_state( model.Job.states.RUNNING ) self.sa_session.flush() - parent_job = job_wrapper.get_job() # Split with the tool-defined method. - if job_wrapper.tool.parallelism == "basic": - from galaxy.jobs.splitters import basic - if len(job_wrapper.get_input_fnames()) > 1 or len(job_wrapper.get_output_fnames()) > 1: - log.error("The basic splitter is not capable of handling jobs with multiple inputs or outputs.") - job_wrapper.change_state( model.Job.states.ERROR ) - job_wrapper.fail("Job Splitting Failed, the basic splitter only handles tools with one input and one output") - # Requeue as a standard job? - return - input_file = job_wrapper.get_input_fnames()[0] - working_directory = job_wrapper.working_directory - # DBTODO execute an external task to do the splitting, this should happen at refactor. - # Regarding number of ways split, use "hints" in tool config? - # If the number of tasks is sufficiently high, we can use it to calculate job completion % and give a running status. - basic.split(input_file, working_directory, - 20, #Needs serious experimentation to find out what makes the most sense. - parent_job.input_datasets[0].dataset.ext) - # Tasks in this parts list are in alphabetical listdir order (15 before 5), but that should not matter. - parts = [os.path.join(os.path.abspath(job_wrapper.working_directory), p, os.path.basename(input_file)) - for p in os.listdir(job_wrapper.working_directory) - if p.startswith('task_')] - else: + try: + splitter = getattr(__import__('galaxy.jobs.splitters', globals(), locals(), [job_wrapper.tool.parallelism.method]), job_wrapper.tool.parallelism.method) + except: job_wrapper.change_state( model.Job.states.ERROR ) job_wrapper.fail("Job Splitting Failed, no match for '%s'" % job_wrapper.tool.parallelism) - # Assemble parts into task_wrappers + return + tasks = splitter.do_split(job_wrapper) # Not an option for now. Task objects don't *do* anything useful yet, but we'll want them tracked outside this thread to do anything. # if track_tasks_in_database: - tasks = [] task_wrappers = [] - for part in parts: - task = model.Task(parent_job, part) + for task in tasks: self.sa_session.add(task) - tasks.append(task) self.sa_session.flush() + # Must flush prior to the creation and queueing of task wrappers. for task in tasks: tw = TaskWrapper(task, job_wrapper.queue) task_wrappers.append(tw) self.app.job_manager.dispatcher.put(tw) tasks_incomplete = False + count_complete = 0 sleep_time = 1 + # sleep/loop until no more progress can be made. That is when + # all tasks are one of { OK, ERROR, DELETED } + completed_states = [ model.Task.states.OK, \ + model.Task.states.ERROR, \ + model.Task.states.DELETED ] + # TODO: Should we report an error (and not merge outputs) if one of the subtasks errored out? + # Should we prevent any that are pending from being started in that case? while tasks_incomplete is False: + count_complete = 0 tasks_incomplete = True for tw in task_wrappers: - if not tw.get_state() == model.Task.states.OK: + task_state = tw.get_state() + if not task_state in completed_states: tasks_incomplete = False - sleep( sleep_time ) - if sleep_time < 8: - sleep_time *= 2 - output_filename = job_wrapper.get_output_fnames()[0].real_path - basic.merge(working_directory, output_filename) - log.debug('execution finished: %s' % command_line) - for tw in task_wrappers: - # Prevent repetitive output, e.g. "Sequence File Aligned"x20 - # Eventually do a reduce for jobs that output "N reads mapped", combining all N for tasks. - if stdout.strip() != tw.get_task().stdout.strip(): - stdout += tw.get_task().stdout - if stderr.strip() != tw.get_task().stderr.strip(): - stderr += tw.get_task().stderr + else: + count_complete = count_complete + 1 + if tasks_incomplete is False: + # log.debug('Tasks complete: %s. Sleeping %s' % (count_complete, sleep_time)) + sleep( sleep_time ) + if sleep_time < 8: + sleep_time *= 2 + + log.debug('execution finished - beginning merge: %s' % command_line) + stdout, stderr = splitter.do_merge(job_wrapper, task_wrappers) + except Exception: job_wrapper.fail( "failure running job", exception=True ) log.exception("failure running job %d" % job_wrapper.job_id) --- a/lib/galaxy/jobs/splitters/basic.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/jobs/splitters/basic.py Wed Oct 12 11:19:42 2011 -0400 @@ -1,91 +1,23 @@ -import os, logging +import logging +import multi + log = logging.getLogger( __name__ ) -def _file_len(fname): - i = 0 - f = open(fname) - for i, l in enumerate(f): - pass - f.close() - return i + 1 +def set_basic_defaults(job_wrapper): + parent_job = job_wrapper.get_job() + job_wrapper.tool.parallelism.attributes['split_inputs'] = parent_job.input_datasets[0].name + job_wrapper.tool.parallelism.attributes['merge_outputs'] = job_wrapper.get_output_datasets_and_fnames().keys()[0] -def _fq_seq_count(fname): - count = 0 - f = open(fname) - for i, l in enumerate(f): - if l.startswith('@'): - count += 1 - f.close() - return count - -def split_fq(input_file, working_directory, parts): - # Temporary, switch this to use the fq reader in lib/galaxy_utils/sequence. - outputs = [] - length = _fq_seq_count(input_file) - if length < 1: - return outputs - if length < parts: - parts = length - len_each, remainder = divmod(length, parts) - f = open(input_file, 'rt') - for p in range(0, parts): - part_dir = os.path.join( os.path.abspath(working_directory), 'task_%s' % p) - if not os.path.exists( part_dir ): - os.mkdir( part_dir ) - part_path = os.path.join(part_dir, os.path.basename(input_file)) - part_file = open(part_path, 'w') - for l in range(0, len_each): - part_file.write(f.readline()) - part_file.write(f.readline()) - part_file.write(f.readline()) - part_file.write(f.readline()) - if remainder > 0: - part_file.write(f.readline()) - part_file.write(f.readline()) - part_file.write(f.readline()) - part_file.write(f.readline()) - remainder -= 1 - outputs.append(part_path) - part_file.close() - f.close() - return outputs - -def split_txt(input_file, working_directory, parts): - outputs = [] - length = _file_len(input_file) - if length < parts: - parts = length - len_each, remainder = divmod(length, parts) - f = open(input_file, 'rt') - for p in range(0, parts): - part_dir = os.path.join( os.path.abspath(working_directory), 'task_%s' % p) - if not os.path.exists( part_dir ): - os.mkdir( part_dir ) - part_path = os.path.join(part_dir, os.path.basename(input_file)) - part_file = open(part_path, 'w') - for l in range(0, len_each): - part_file.write(f.readline()) - if remainder > 0: - part_file.write(f.readline()) - remainder -= 1 - outputs.append(part_path) - part_file.close() - f.close() - return outputs +def do_split (job_wrapper): + if len(job_wrapper.get_input_fnames()) > 1 or len(job_wrapper.get_output_fnames()) > 1: + log.error("The basic splitter is not capable of handling jobs with multiple inputs or outputs.") + raise Exception, "Job Splitting Failed, the basic splitter only handles tools with one input and one output" + # add in the missing information for splitting the one input and merging the one output + set_basic_defaults(job_wrapper) + return multi.do_split(job_wrapper) -def split( input_file, working_directory, parts, file_type = None): - #Implement a better method for determining how to split. - if file_type.startswith('fastq'): - return split_fq(input_file, working_directory, parts) - else: - return split_txt(input_file, working_directory, parts) +def do_merge( job_wrapper, task_wrappers): + # add in the missing information for splitting the one input and merging the one output + set_basic_defaults(job_wrapper) + return multi.do_merge(job_wrapper, task_wrappers) -def merge( working_directory, output_file ): - output_file_name = os.path.basename(output_file) - task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] - task_dirs.sort(key = lambda x: int(x.split('task_')[-1])) - for task_dir in task_dirs: - try: - os.system( 'cat %s >> %s' % ( os.path.join(task_dir, output_file_name), output_file ) ) - except Exception, e: - log.error(str(e)) --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/jobs/splitters/multi.py Wed Oct 12 11:19:42 2011 -0400 @@ -0,0 +1,158 @@ +import os, logging, shutil +from galaxy import model, util + + +log = logging.getLogger( __name__ ) + +def do_split (job_wrapper): + parent_job = job_wrapper.get_job() + working_directory = os.path.abspath(job_wrapper.working_directory) + + parallel_settings = job_wrapper.tool.parallelism.attributes + # Syntax: split_inputs="input1,input2" shared_inputs="genome" + # Designates inputs to be split or shared + split_inputs=parallel_settings.get("split_inputs") + if split_inputs is None: + split_inputs = [] + else: + split_inputs = [x.strip() for x in split_inputs.split(",")] + + shared_inputs=parallel_settings.get("shared_inputs") + if shared_inputs is None: + shared_inputs = [] + else: + shared_inputs = [x.strip() for x in shared_inputs.split(",")] + illegal_inputs = [x for x in shared_inputs if x in split_inputs] + if len(illegal_inputs) > 0: + raise Exception("Inputs have conflicting parallelism attributes: %s" % str( illegal_inputs )) + + subdir_index = [0] # use a list to get around Python 2.x lame closure support + task_dirs = [] + def get_new_working_directory_name(): + dir=os.path.join(working_directory, 'task_%d' % subdir_index[0]) + subdir_index[0] = subdir_index[0] + 1 + if not os.path.exists(dir): + os.makedirs(dir) + task_dirs.append(dir) + return dir + + # For things like paired end alignment, we need two inputs to be split. Since all inputs to all + # derived subtasks need to be correlated, allow only one input type to be split + type_to_input_map = {} + for input in parent_job.input_datasets: + if input.name in split_inputs: + type_to_input_map.setdefault(input.dataset.datatype, []).append(input.name) + elif input.name in shared_inputs: + pass # pass original file name + else: + log_error = "The input '%s' does not define a method for implementing parallelism" % str(input.name) + log.error(log_error) + raise Exception(log_error) + + if len(type_to_input_map) > 1: + log_error = "The multi splitter does not support splitting inputs of more than one type" + log.error(log_error) + raise Exception(log_error) + + # split the first one to build up the task directories + input_datasets = [] + for input in parent_job.input_datasets: + if input.name in split_inputs: + this_input_files = job_wrapper.get_input_dataset_fnames(input.dataset) + if len(this_input_files) > 1: + log_error = "The input '%s' is composed of multiple files - splitting is not allowed" % str(input.name) + log.error(log_error) + raise Exception(log_error) + input_datasets.append(input.dataset) + + input_type = type_to_input_map.keys()[0] + # DBTODO execute an external task to do the splitting, this should happen at refactor. + # If the number of tasks is sufficiently high, we can use it to calculate job completion % and give a running status. + try: + input_type.split(input_datasets, get_new_working_directory_name, parallel_settings) + except AttributeError: + log_error = "The type '%s' does not define a method for splitting files" % str(input_type) + log.error(log_error) + raise + log.debug('do_split created %d parts' % len(task_dirs)) + # next, after we know how many divisions there are, add the shared inputs via soft links + for input in parent_job.input_datasets: + if input and input.name in shared_inputs: + names = job_wrapper.get_input_dataset_fnames(input.dataset) + for dir in task_dirs: + for file in names: + os.symlink(file, os.path.join(dir, os.path.basename(file))) + tasks = [] + prepare_files = os.path.join(util.galaxy_directory(), 'extract_dataset_parts.sh') + ' %s' + for dir in task_dirs: + task = model.Task(parent_job, dir, prepare_files % dir) + tasks.append(task) + return tasks + + +def do_merge( job_wrapper, task_wrappers): + parent_job = job_wrapper.get_job() + parallel_settings = job_wrapper.tool.parallelism.attributes + # Syntax: merge_outputs="export" pickone_outputs="genomesize" + # Designates outputs to be merged, or selected from as a representative + merge_outputs = parallel_settings.get("merge_outputs") + if merge_outputs is None: + merge_outputs = [] + else: + merge_outputs = [x.strip() for x in merge_outputs.split(",")] + pickone_outputs = parallel_settings.get("pickone_outputs") + if pickone_outputs is None: + pickone_outputs = [] + else: + pickone_outputs = [x.strip() for x in pickone_outputs.split(",")] + + illegal_outputs = [x for x in merge_outputs if x in pickone_outputs] + if len(illegal_outputs) > 0: + return ('Tool file error', 'Outputs have conflicting parallelism attributes: %s' % str( illegal_outputs )) + + stdout = '' + stderr = '' + + try: + working_directory = job_wrapper.working_directory + task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] + # TODO: Output datasets can be very complex. This doesn't handle metadata files + outputs = job_wrapper.get_output_datasets_and_fnames() + pickone_done = [] + task_dirs = [os.path.join(working_directory, x) for x in os.listdir(working_directory) if x.startswith('task_')] + for output in outputs: + output_file_name = str(outputs[output][1]) + base_output_name = os.path.basename(output_file_name) + if output in merge_outputs: + output_type = outputs[output][0].datatype + output_files = [os.path.join(dir,base_output_name) for dir in task_dirs] + log.debug('files %s ' % output_files) + output_type.merge(output_files, output_file_name) + log.debug('merge finished: %s' % output_file_name) + pass # TODO: merge all the files + elif output in pickone_outputs: + # just pick one of them + if output not in pickone_done: + task_file_name = os.path.join(task_dirs[0], base_output_name) + shutil.move( task_file_name, output_file_name ) + pickone_done.append(output) + else: + log_error = "The output '%s' does not define a method for implementing parallelism" % output + log.error(log_error) + raise Exception(log_error) + except Exception, e: + stdout = 'Error merging files'; + stderr = str(e) + + + for tw in task_wrappers: + # Prevent repetitive output, e.g. "Sequence File Aligned"x20 + # Eventually do a reduce for jobs that output "N reads mapped", combining all N for tasks. + out = tw.get_task().stdout.strip() + err = tw.get_task().stderr.strip() + if len(out) > 0: + stdout += tw.working_directory + ':\n' + out + if len(err) > 0: + stderr += tw.working_directory + ':\n' + err + return (stdout, stderr) + --- a/lib/galaxy/model/__init__.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/model/__init__.py Wed Oct 12 11:19:42 2011 -0400 @@ -204,18 +204,19 @@ ERROR = 'error', DELETED = 'deleted' ) - def __init__( self, job, part_file = None ): + def __init__( self, job, working_directory, prepare_files_cmd ): self.command_line = None self.parameters = [] self.state = Task.states.NEW self.info = None - self.part_file = part_file + self.working_directory = working_directory self.task_runner_name = None self.task_runner_external_id = None self.job = job self.stdout = None self.stderr = None - + self.prepare_input_files_cmd = prepare_files_cmd + def set_state( self, state ): self.state = state @@ -896,7 +897,9 @@ def get_converted_files_by_type( self, file_type ): for assoc in self.implicitly_converted_datasets: if not assoc.deleted and assoc.type == file_type: - return assoc.dataset + if assoc.dataset: + return assoc.dataset + return assoc.dataset_ldda return None def get_converted_dataset_deps(self, trans, target_ext): """ @@ -1588,7 +1591,12 @@ class ImplicitlyConvertedDatasetAssociation( object ): def __init__( self, id = None, parent = None, dataset = None, file_type = None, deleted = False, purged = False, metadata_safe = True ): self.id = id - self.dataset = dataset + if isinstance(dataset, HistoryDatasetAssociation): + self.dataset = dataset + elif isinstance(dataset, LibraryDatasetDatasetAssociation): + self.dataset_ldda = dataset + else: + raise AttributeError, 'Unknown dataset type provided for dataset: %s' % type( dataset ) if isinstance(parent, HistoryDatasetAssociation): self.parent_hda = parent elif isinstance(parent, LibraryDatasetDatasetAssociation): --- a/lib/galaxy/model/mapping.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/model/mapping.py Wed Oct 12 11:19:42 2011 -0400 @@ -148,6 +148,7 @@ Column( "create_time", DateTime, default=now ), Column( "update_time", DateTime, default=now, onupdate=now ), Column( "hda_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True, nullable=True ), + Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True, nullable=True ), Column( "hda_parent_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True ), Column( "ldda_parent_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True ), Column( "deleted", Boolean, index=True, default=False ), @@ -467,11 +468,13 @@ Column( "runner_name", String( 255 ) ), Column( "stdout", TEXT ), Column( "stderr", TEXT ), + Column( "info", TrimmedString ( 255 ) ), Column( "traceback", TEXT ), Column( "job_id", Integer, ForeignKey( "job.id" ), index=True, nullable=False ), - Column( "part_file", String(1024)), + Column( "working_directory", String(1024)), Column( "task_runner_name", String( 255 ) ), - Column( "task_runner_external_id", String( 255 ) ) ) + Column( "task_runner_external_id", String( 255 ) ), + Column( "prepare_input_files_cmd", TEXT ) ) PostJobAction.table = Table("post_job_action", metadata, Column("id", Integer, primary_key=True), @@ -1206,11 +1209,12 @@ properties=dict( parent_hda=relation( HistoryDatasetAssociation, primaryjoin=( ImplicitlyConvertedDatasetAssociation.table.c.hda_parent_id == HistoryDatasetAssociation.table.c.id ) ), - parent_ldda=relation( LibraryDatasetDatasetAssociation, primaryjoin=( ImplicitlyConvertedDatasetAssociation.table.c.ldda_parent_id == LibraryDatasetDatasetAssociation.table.c.id ) ), - + dataset_ldda=relation( + LibraryDatasetDatasetAssociation, + primaryjoin=( ImplicitlyConvertedDatasetAssociation.table.c.ldda_id == LibraryDatasetDatasetAssociation.table.c.id ) ), dataset=relation( HistoryDatasetAssociation, primaryjoin=( ImplicitlyConvertedDatasetAssociation.table.c.hda_id == HistoryDatasetAssociation.table.c.id ) ) ) ) @@ -1594,7 +1598,7 @@ annotations=relation( PageAnnotationAssociation, order_by=PageAnnotationAssociation.table.c.id, backref="pages" ), ratings=relation( PageRatingAssociation, order_by=PageRatingAssociation.table.c.id, backref="pages" ) ) ) - + assign_mapper( context, ToolShedRepository, ToolShedRepository.table ) # Set up proxy so that --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/model/migrate/versions/0083_add_prepare_files_to_task.py Wed Oct 12 11:19:42 2011 -0400 @@ -0,0 +1,63 @@ +""" +Migration script to add 'prepare_input_files_cmd' column to the task table and to rename a column. +""" + +from sqlalchemy import * +from sqlalchemy.orm import * +from migrate import * +from migrate.changeset import * + +import logging +log = logging.getLogger( __name__ ) + +metadata = MetaData( migrate_engine ) +db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) ) + +def upgrade(): + print __doc__ + metadata.reflect() + try: + task_table = Table( "task", metadata, autoload=True ) + c = Column( "prepare_input_files_cmd", TEXT, nullable=True ) + c.create( task_table ) + assert c is task_table.c.prepare_input_files_cmd + except Exception, e: + print "Adding prepare_input_files_cmd column to task table failed: %s" % str( e ) + log.debug( "Adding prepare_input_files_cmd column to task table failed: %s" % str( e ) ) + try: + task_table = Table( "task", metadata, autoload=True ) + c = Column( "working_directory", String ( 1024 ), nullable=True ) + c.create( task_table ) + assert c is task_table.c.working_directory + except Exception, e: + print "Adding working_directory column to task table failed: %s" % str( e ) + log.debug( "Adding working_directory column to task table failed: %s" % str( e ) ) + + # remove the 'part_file' column - nobody used tasks before this, so no data needs to be migrated + try: + task_table.c.part_file.drop() + except Exception, e: + log.debug( "Deleting column 'part_file' from the 'task' table failed: %s" % ( str( e ) ) ) + +def downgrade(): + metadata.reflect() + try: + task_table = Table( "task", metadata, autoload=True ) + task_table.c.prepare_input_files_cmd.drop() + except Exception, e: + print "Dropping prepare_input_files_cmd column from task table failed: %s" % str( e ) + log.debug( "Dropping prepare_input_files_cmd column from task table failed: %s" % str( e ) ) + try: + task_table = Table( "task", metadata, autoload=True ) + task_table.c.working_directory.drop() + except Exception, e: + print "Dropping working_directory column from task table failed: %s" % str( e ) + log.debug( "Dropping working_directory column from task table failed: %s" % str( e ) ) + try: + task_table = Table( "task", metadata, autoload=True ) + c = Column( "part_file", String ( 1024 ), nullable=True ) + c.create( task_table ) + assert c is task_table.c.part_file + except Exception, e: + print "Adding part_file column to task table failed: %s" % str( e ) + log.debug( "Adding part_file column to task table failed: %s" % str( e ) ) --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/model/migrate/versions/0084_add_ldda_id_to_implicit_conversion_table.py Wed Oct 12 11:19:42 2011 -0400 @@ -0,0 +1,35 @@ +""" +Migration script to add 'ldda_id' column to the implicitly_converted_dataset_association table. +""" + +from sqlalchemy import * +from sqlalchemy.orm import * +from migrate import * +from migrate.changeset import * + +import logging +log = logging.getLogger( __name__ ) + +metadata = MetaData( migrate_engine ) +db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) ) + +def upgrade(): + print __doc__ + metadata.reflect() + try: + Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True ) + c = Column( "ldda_id", Integer, ForeignKey( "library_dataset_dataset_association.id" ), index=True, nullable=True ) + c.create( Implicitly_converted_table ) + assert c is Implicitly_converted_table.c.ldda_id + except Exception, e: + print "Adding ldda_id column to implicitly_converted_dataset_association table failed: %s" % str( e ) + log.debug( "Adding ldda_id column to implicitly_converted_dataset_association table failed: %s" % str( e ) ) + +def downgrade(): + metadata.reflect() + try: + Implicitly_converted_table = Table( "implicitly_converted_dataset_association", metadata, autoload=True ) + Implicitly_converted_table.c.ldda_id.drop() + except Exception, e: + print "Dropping ldda_id column from implicitly_converted_dataset_association table failed: %s" % str( e ) + log.debug( "Dropping ldda_id column from implicitly_converted_dataset_association table failed: %s" % str( e ) ) --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/lib/galaxy/model/migrate/versions/0085_add_task_info.py Wed Oct 12 11:19:42 2011 -0400 @@ -0,0 +1,36 @@ +""" +Migration script to add 'info' column to the task table. +""" + +from sqlalchemy import * +from sqlalchemy.orm import * +from migrate import * +from migrate.changeset import * + +import logging +log = logging.getLogger( __name__ ) +from galaxy.model.custom_types import TrimmedString + +metadata = MetaData( migrate_engine ) +db_session = scoped_session( sessionmaker( bind=migrate_engine, autoflush=False, autocommit=True ) ) + +def upgrade(): + print __doc__ + metadata.reflect() + try: + task_table = Table( "task", metadata, autoload=True ) + c = Column( "info", TrimmedString (255) , nullable=True ) + c.create( task_table ) + assert c is task_table.c.info + except Exception, e: + print "Adding info column to table table failed: %s" % str( e ) + log.debug( "Adding info column to task table failed: %s" % str( e ) ) + +def downgrade(): + metadata.reflect() + try: + task_table = Table( "task", metadata, autoload=True ) + task_table.c.info.drop() + except Exception, e: + print "Dropping info column from task table failed: %s" % str( e ) + log.debug( "Dropping info column from task table failed: %s" % str( e ) ) --- a/lib/galaxy/tools/__init__.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/tools/__init__.py Wed Oct 12 11:19:42 2011 -0400 @@ -352,6 +352,21 @@ self.fabfile = fabfile self.method = method +class ToolParallelismInfo(object): + """ + Stores the information (if any) for running multiple instances of the tool in parallel + on the same set of inputs. + """ + def __init__(self, tag): + self.method = tag.get('method') + self.attributes = dict([item for item in tag.attrib.items() if item[0] != 'method' ]) + if len(self.attributes) == 0: + # legacy basic mode - provide compatible defaults + self.attributes['split_size'] = 20 + self.attributes['split_mode'] = 'number_of_parts' + + + class Tool: """ Represents a computational tool that can be executed through Galaxy. @@ -442,7 +457,7 @@ # Parallelism for tasks, read from tool config. parallelism = root.find("parallelism") if parallelism is not None and parallelism.get("method"): - self.parallelism = parallelism.get("method") + self.parallelism = ToolParallelismInfo(parallelism) else: self.parallelism = None if self.app.config.start_job_runners is None: --- a/lib/galaxy/util/__init__.py Mon Oct 10 18:42:31 2011 -0400 +++ b/lib/galaxy/util/__init__.py Wed Oct 12 11:19:42 2011 -0400 @@ -623,6 +623,9 @@ gbrowse_build_sites = read_build_sites( os.path.join( galaxy_root_path, "tool-data", "shared", "gbrowse", "gbrowse_build_sites.txt" ) ) genetrack_sites = read_build_sites( os.path.join( galaxy_root_path, "tool-data", "shared", "genetrack", "genetrack_sites.txt" ), check_builds=False ) +def galaxy_directory(): + return os.path.abspath(galaxy_root_path) + if __name__ == '__main__': import doctest, sys doctest.testmod(sys.modules[__name__], verbose=False) --- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/scripts/extract_dataset_part.py Wed Oct 12 11:19:42 2011 -0400 @@ -0,0 +1,48 @@ +""" +Reads a JSON file and uses it to call into a datatype class to extract +a subset of a dataset for processing. + +Used by jobs that split large files into pieces to be processed concurrently +on a gid in a scatter-gather mode. This does part of the scatter. + +""" +import os +import sys +import logging +logging.basicConfig() +log = logging.getLogger( __name__ ) + +new_path = [ os.path.join( os.getcwd(), "lib" ) ] +new_path.extend( sys.path[1:] ) # remove scripts/ from the path +sys.path = new_path + +from galaxy import eggs +import pkg_resources +pkg_resources.require("simplejson") +import simplejson + +# This junk is here to prevent loading errors +import galaxy.model.mapping #need to load this before we unpickle, in order to setup properties assigned by the mappers +galaxy.model.Job() #this looks REAL stupid, but it is REQUIRED in order for SA to insert parameters into the classes defined by the mappers --> it appears that instantiating ANY mapper'ed class would suffice here +galaxy.datatypes.metadata.DATABASE_CONNECTION_AVAILABLE = False #Let metadata know that there is no database connection, and to just assume object ids are valid + +def __main__(): + """ + Argument: a JSON file + """ + file_path = sys.argv.pop( 1 ) + data = simplejson.load(open(file_path, 'r')) + try: + class_name_parts = data['class_name'].split('.') + module_name = '.'.join(class_name_parts[:-1]) + class_name = class_name_parts[-1] + mod = __import__(module_name, globals(), locals(), [class_name]) + cls = getattr(mod, class_name) + if not cls.process_split_file(data): + sys.stderr.write('Writing split file failed\n') + sys.exit(1) + except Exception, e: + sys.stderr.write(str(e)) + sys.exit(1) + +__main__() http://bitbucket.org/galaxy/galaxy-central/changeset/44731af3e1c0/ changeset: 44731af3e1c0 user: dannon date: 2011-10-12 17:19:58 summary: Merge affected #: 9 files (-1 bytes) --- a/lib/galaxy/visualization/tracks/data_providers.py Wed Oct 12 11:19:42 2011 -0400 +++ b/lib/galaxy/visualization/tracks/data_providers.py Wed Oct 12 11:19:58 2011 -0400 @@ -205,7 +205,10 @@ # Score (filter data) if length >= 5 and filter_cols and filter_cols[0] == "Score": - payload.append( float(feature[4]) ) + try: + payload.append( float( feature[4] ) ) + except: + payload.append( feature[4] ) rval.append( payload ) @@ -804,7 +807,7 @@ # Return full feature. payload = [ feature.start, feature.end, - feature.name(), + feature.name(), feature.strand, # No notion of thick start, end in GFF, so make everything # thick. @@ -828,9 +831,16 @@ # Add filter data to payload. for col in filter_cols: if col == "Score": - payload.append( feature.score ) + try: + payload.append( float( feature.score ) ) + except: + payload.append( feature.score ) elif col in feature.attributes: - payload.append( feature.attributes[col] ) + try: + payload.append( float( feature.attributes[col] ) ) + except: + # Feature is not a float. + payload.append( feature.attributes[col] ) else: # Dummy value. payload.append( "na" ) --- a/lib/galaxy/web/controllers/dataset.py Wed Oct 12 11:19:42 2011 -0400 +++ b/lib/galaxy/web/controllers/dataset.py Wed Oct 12 11:19:58 2011 -0400 @@ -1134,13 +1134,13 @@ if history in target_histories: refresh_frames = ['history'] trans.sa_session.flush() - hist_names_str = ", ".join( [ hist.name for hist in target_histories ] ) + hist_names_str = ", ".join( ['<a href="%s" target="_top">%s</a>' % + ( url_for( controller="history", action="switch_to_history", \ + hist_id=trans.security.encode_id( hist.id ) ), hist.name ) \ + for hist in target_histories ] ) num_source = len( source_dataset_ids ) - invalid_datasets num_target = len(target_histories) done_msg = "%i %s copied to %i %s: %s." % (num_source, inflector.cond_plural(num_source, "dataset"), num_target, inflector.cond_plural(num_target, "history"), hist_names_str ) - if new_history is not None: - done_msg += " <a href=\"%s\" target=\"_top\">Switch to the new history.</a>" % url_for( - controller="history", action="switch_to_history", hist_id=trans.security.encode_id( new_history.id ) ) trans.sa_session.refresh( history ) source_datasets = history.visible_datasets target_histories = [history] --- a/static/june_2007_style/blue/trackster.css Wed Oct 12 11:19:42 2011 -0400 +++ b/static/june_2007_style/blue/trackster.css Wed Oct 12 11:19:58 2011 -0400 @@ -24,7 +24,7 @@ .track{background:white;} .track-header{text-align:left;padding:4px 0px;color:#666;} .track-header .menubutton{margin-left:0px;} -.track-content{overflow:hidden;text-align:center;border-top:1px solid #eee;border-bottom:2px solid #eee;background:#eee url('/static/images/tracks/diag_bg.gif');min-height:16px;} +.track-content{text-align:center;border-top:1px solid #eee;border-bottom:2px solid #eee;background:#eee url('/static/images/tracks/diag_bg.gif');min-height:16px;} .label-track .track-content{background:white;} .track-tile{background:white;} .track-tile canvas{position:relative;z-index:100;} @@ -62,3 +62,4 @@ .icon.more-across{background:url('../images/fugue/arrow-transition-bw.png') no-repeat 0px 0px;} .intro{padding:1em;} .intro > .action-button{background-color:#CCC;padding:1em;} +.feature-popup{background-color:#DDD;position:absolute;z-index:1000} --- a/static/june_2007_style/trackster.css.tmpl Wed Oct 12 11:19:42 2011 -0400 +++ b/static/june_2007_style/trackster.css.tmpl Wed Oct 12 11:19:58 2011 -0400 @@ -156,7 +156,6 @@ } .track-content { - overflow: hidden; text-align: center; border-top: 1px solid #eee; border-bottom: 2px solid #eee; @@ -335,4 +334,8 @@ background-color: #CCC; padding: 1em; } - +.feature-popup{ + background-color: #DDD; + position: absolute; + z-index: 1000 +} --- a/static/scripts/packed/trackster.js Wed Oct 12 11:19:42 2011 -0400 +++ b/static/scripts/packed/trackster.js Wed Oct 12 11:19:58 2011 -0400 @@ -1,1 +1,1 @@ -var class_module=function(b,a){var c=function(){var f=arguments[0];for(var e=1;e<arguments.length;e++){var d=arguments[e];for(key in d){f[key]=d[key]}}return f};a.extend=c};var requestAnimationFrame=(function(){return window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(b,a){window.setTimeout(b,1000/60)}})();var BEFORE=1001,CONTAINS=1002,OVERLAP_START=1003,OVERLAP_END=1004,CONTAINED_BY=1005,AFTER=1006;var compute_overlap=function(e,b){var g=e[0],f=e[1],d=b[0],c=b[1],a;if(g<d){if(f<d){a=BEFORE}else{if(f<=c){a=OVERLAP_START}else{a=CONTAINS}}}else{if(g>c){a=AFTER}else{if(f<=c){a=CONTAINED_BY}else{a=OVERLAP_END}}}return a};var is_overlap=function(c,b){var a=compute_overlap(c,b);return(a!==BEFORE&&a!==AFTER)};var trackster_module=function(f,aa){var q=f("class").extend,t=f("slotting"),N=f("painters");var ag=function(ah,ai){this.document=ah;this.default_font=ai!==undefined?ai:"9px Monaco, Lucida Console, monospace";this.dummy_canvas=this.new_canvas();this.dummy_context=this.dummy_canvas.getContext("2d");this.dummy_context.font=this.default_font;this.char_width_px=this.dummy_context.measureText("A").width;this.patterns={};this.load_pattern("right_strand","/visualization/strand_right.png");this.load_pattern("left_strand","/visualization/strand_left.png");this.load_pattern("right_strand_inv","/visualization/strand_right_inv.png");this.load_pattern("left_strand_inv","/visualization/strand_left_inv.png")};q(ag.prototype,{load_pattern:function(ah,al){var ai=this.patterns,aj=this.dummy_context,ak=new Image();ak.src=image_path+al;ak.onload=function(){ai[ah]=aj.createPattern(ak,"repeat")}},get_pattern:function(ah){return this.patterns[ah]},new_canvas:function(){var ah=this.document.createElement("canvas");if(window.G_vmlCanvasManager){G_vmlCanvasManager.initElement(ah)}ah.manager=this;return ah}});var o={};var m=function(ah,ai){o[ah.attr("id")]=ai};var n=function(ah,aj,al,ak){al=".group";var ai={};o[ah.attr("id")]=ak;ah.bind("drag",{handle:"."+aj,relative:true},function(au,av){var at=$(this);var ay=$(this).parent(),ap=ay.children(),ar=o[$(this).attr("id")],ao,an,aw,am,aq;an=$(this).parents(al);if(an.length!==0){aw=an.position().top;am=aw+an.outerHeight();if(av.offsetY<aw){$(this).insertBefore(an);var ax=o[an.attr("id")];ax.remove_drawable(ar);ax.container.add_drawable_before(ar,ax);return}else{if(av.offsetY>am){$(this).insertAfter(an);var ax=o[an.attr("id")];ax.remove_drawable(ar);ax.container.add_drawable(ar);return}}}an=null;for(aq=0;aq<ap.length;aq++){ao=$(ap.get(aq));aw=ao.position().top;am=aw+ao.outerHeight();if(ao.is(al)&&this!==ao.get(0)&&av.offsetY>=aw&&av.offsetY<=am){if(av.offsetY-aw<am-av.offsetY){ao.find(".content-div").prepend(this)}else{ao.find(".content-div").append(this)}if(ar.container){ar.container.remove_drawable(ar)}o[ao.attr("id")].add_drawable(ar);return}}for(aq=0;aq<ap.length;aq++){if(av.offsetY<$(ap.get(aq)).position().top){break}}if(aq===ap.length){if(this!==ap.get(aq-1)){ay.append(this);o[ay.attr("id")].move_drawable(ar,aq)}}else{if(this!==ap.get(aq)){$(this).insertBefore(ap.get(aq));o[ay.attr("id")].move_drawable(ar,(av.deltaY>0?aq-1:aq))}}}).bind("dragstart",function(){ai["border-top"]=ah.css("border-top");ai["border-bottom"]=ah.css("border-bottom");$(this).css({"border-top":"1px solid blue","border-bottom":"1px solid blue"})}).bind("dragend",function(){$(this).css(ai)})};aa.moveable=n;var af=16,I=9,F=20,V=I+2,B=100,K=12000,T=200,E=5,w=10,M=5000,x=100,p="There was an error in indexing this dataset. ",L="A converter for this dataset is not installed. Please check your datatypes_conf.xml file.",G="No data for this chrom/contig.",u="Currently indexing... please wait",z="Tool cannot be rerun: ",a="Loading data...",ab="Ready for display",d=10,v=5,D=5;function y(ah){return Math.round(ah*1000)/1000}var c=function(ah){this.num_elements=ah;this.clear()};q(c.prototype,{get:function(ai){var ah=this.key_ary.indexOf(ai);if(ah!==-1){if(this.obj_cache[ai].stale){this.key_ary.splice(ah,1);delete this.obj_cache[ai]}else{this.move_key_to_end(ai,ah)}}return this.obj_cache[ai]},set:function(ai,aj){if(!this.obj_cache[ai]){if(this.key_ary.length>=this.num_elements){var ah=this.key_ary.shift();delete this.obj_cache[ah]}this.key_ary.push(ai)}this.obj_cache[ai]=aj;return aj},move_key_to_end:function(ai,ah){this.key_ary.splice(ah,1);this.key_ary.push(ai)},clear:function(){this.obj_cache={};this.key_ary=[]},size:function(){return this.key_ary.length}});var U=function(ai,ah,aj){c.call(this,ai);this.track=ah;this.subset=(aj!==undefined?aj:true)};q(U.prototype,c.prototype,{load_data:function(aq,al,ao,ai,an){var ap=this.track.view.chrom,ak={chrom:ap,low:aq,high:al,mode:ao,resolution:ai,dataset_id:this.track.dataset_id,hda_ldda:this.track.hda_ldda};$.extend(ak,an);if(this.track.filters_manager){var ar=[];var ah=this.track.filters_manager.filters;for(var am=0;am<ah.length;am++){ar[ar.length]=ah[am].name}ak.filter_cols=JSON.stringify(ar)}var aj=this;return $.getJSON(this.track.data_url,ak,function(at){aj.set_data(aq,al,ao,at)})},get_data:function(ah,al,am,ai,ak){var aj=this.get_data_from_cache(ah,al,am);if(aj){return aj}aj=this.load_data(ah,al,am,ai,ak);this.set_data(ah,al,am,aj);return aj},DEEP_DATA_REQ:"deep",BROAD_DATA_REQ:"breadth",get_more_data:function(ap,ak,ao,aj,an,al){var aq=this.get_data_from_cache(ap,ak,ao);if(!aq){console.log("ERROR: no current data for: ",this.track,ap,ak,ao,aj,an);return}aq.stale=true;var ai=ap;if(al===this.DEEP_DATA_REQ){$.extend(an,{start_val:aq.data.length+1})}else{if(al===this.BROAD_DATA_REQ){ai=(aq.max_high?aq.max_high:aq.data[aq.data.length-1][2])+1}}var ah=this,am=this.load_data(ai,ak,ao,aj,an);new_data_available=$.Deferred();this.set_data(ap,ak,ao,new_data_available);$.when(am).then(function(ar){if(ar.data){ar.data=aq.data.concat(ar.data);if(ar.max_low){ar.max_low=aq.max_low}if(ar.message){ar.message=ar.message.replace(/[0-9]+/,ar.data.length)}}ah.set_data(ap,ak,ao,ar);new_data_available.resolve(ar)});return new_data_available},get_data_from_cache:function(ah,ai,aj){return this.get(this.gen_key(ah,ai,aj))},set_data:function(ai,aj,ak,ah){return this.set(this.gen_key(ai,aj,ak),ah)},gen_key:function(ah,aj,ak){var ai=ah+"_"+aj+"_"+ak;return ai},split_key:function(ah){return ah.split("_")}});var J=function(ai,ah,aj){U.call(this,ai,ah,aj)};q(J.prototype,U.prototype,c.prototype,{load_data:function(aj,ah,al,am,ai,ak){if(ai>1){return}return U.prototype.load_data.call(this,aj,ah,al,am,ai,ak)}});var r=function(ak,ai,ah,aj,al){this.name=ak;this.view=ai;this.container=ah;this.drag_handle_class=al;this.config=new H({track:this,params:[{key:"name",label:"Name",type:"text",default_value:ak}],saved_values:aj,onchange:function(){this.track.set_name(this.track.config.values.name)}});this.prefs=this.config.values};q(r.prototype,{init:function(){},request_draw:function(){},_draw:function(){},to_json:function(){},make_name_popup_menu:function(){},set_name:function(ah){this.old_name=this.name;this.name=ah;this.name_div.text(this.name)},revert_name:function(){this.name=this.old_name;this.name_div.text(this.name)},remove:function(){this.container.remove_drawable(this);this.container_div.fadeOut("slow",function(){$(this).remove();view.update_intro_div();view.has_changes=true})}});var A=function(al,ak,ai,ah,aj,am){r.call(this,ak,ai,ah,aj,am);this.obj_type=al;this.drawables=[]};q(A.prototype,r.prototype,{init:function(){for(var ah=0;ah<this.drawables.length;ah++){this.drawables[ah].init()}},_draw:function(){for(var ah=0;ah<this.drawables.length;ah++){this.drawables[ah]._draw()}},to_json:function(){var ai=[];for(var ah=0;ah<this.drawables.length;ah++){ai.push(this.drawables[ah].to_json())}return{name:this.name,prefs:this.prefs,obj_type:this.obj_type,drawables:ai}},add_drawable:function(ah){this.drawables.push(ah);ah.container=this},add_drawable_before:function(aj,ah){var ai=this.drawables.indexOf(ah);if(ai!=-1){this.drawables.splice(ai,0,aj);return true}return false},remove_drawable:function(ai){var ah=this.drawables.indexOf(ai);if(ah!=-1){this.drawables.splice(ah,1);ai.container=null;return true}return false},move_drawable:function(ai,aj){var ah=this.drawables.indexOf(ai);if(ah!=-1){this.drawables.splice(ah,1);this.drawables.splice(aj,0,ai);return true}return false}});var S=function(ak,ai,ah,aj){A.call(this,"DrawableGroup",ak,ai,ah,aj,"group-handle");if(!S.id_counter){S.id_counter=0}var al=S.id_counter++;this.container_div=$("<div/>").addClass("group").attr("id","group_"+al).appendTo(this.container.content_div);this.header_div=$("<div/>").addClass("track-header").appendTo(this.container_div);this.header_div.append($("<div/>").addClass(this.drag_handle_class));this.name_div=$("<div/>").addClass("group-name menubutton popup").text(this.name).appendTo(this.header_div);this.content_div=$("<div/>").addClass("content-div").attr("id","group_"+al+"_content_div").appendTo(this.container_div);m(this.container_div,this);m(this.content_div,this);n(this.container_div,this.drag_handle_class,".group",this);this.make_name_popup_menu()};q(S.prototype,r.prototype,A.prototype,{make_name_popup_menu:function(){var ai=this;var ah={};ah["Edit configuration"]=function(){var al=function(){hide_modal();$(window).unbind("keypress.check_enter_esc")},aj=function(){ai.config.update_from_form($(".dialog-box"));hide_modal();$(window).unbind("keypress.check_enter_esc")},ak=function(am){if((am.keyCode||am.which)===27){al()}else{if((am.keyCode||am.which)===13){aj()}}};$(window).bind("keypress.check_enter_esc",ak);show_modal("Configure Group",ai.config.build_form(),{Cancel:al,OK:aj})};ah.Remove=function(){ai.remove()};make_popupmenu(ai.name_div,ah)}});var ae=function(ah,ak,aj,ai){A.call(this,"View");this.container=ah;this.chrom=null;this.vis_id=aj;this.dbkey=ai;this.title=ak;this.tracks=this.drawables;this.label_tracks=[];this.tracks_to_be_redrawn=[];this.max_low=0;this.max_high=0;this.zoom_factor=3;this.min_separation=30;this.has_changes=false;this.load_chroms_deferred=null;this.init();this.canvas_manager=new ag(ah.get(0).ownerDocument);this.reset()};q(ae.prototype,A.prototype,{init:function(){var aj=this.container,ah=this;this.top_container=$("<div/>").addClass("top-container").appendTo(aj);this.browser_content_div=$("<div/>").addClass("content").css("position","relative").appendTo(aj);this.bottom_container=$("<div/>").addClass("bottom-container").appendTo(aj);this.top_labeltrack=$("<div/>").addClass("top-labeltrack").appendTo(this.top_container);this.viewport_container=$("<div/>").addClass("viewport-container").attr("id","viewport-container").appendTo(this.browser_content_div);this.content_div=this.viewport_container;m(this.viewport_container,ah);this.intro_div=$("<div/>").addClass("intro");var ak=$("<div/>").text("Add Datasets to Visualization").addClass("action-button").appendTo(this.intro_div).click(function(){add_tracks()});this.nav_labeltrack=$("<div/>").addClass("nav-labeltrack").appendTo(this.bottom_container);this.nav_container=$("<div/>").addClass("nav-container").prependTo(this.top_container);this.nav=$("<div/>").addClass("nav").appendTo(this.nav_container);this.overview=$("<div/>").addClass("overview").appendTo(this.bottom_container);this.overview_viewport=$("<div/>").addClass("overview-viewport").appendTo(this.overview);this.overview_close=$("<a href='javascript:void(0);'>Close Overview</a>").addClass("overview-close").hide().appendTo(this.overview_viewport);this.overview_highlight=$("<div/>").addClass("overview-highlight").hide().appendTo(this.overview_viewport);this.overview_box_background=$("<div/>").addClass("overview-boxback").appendTo(this.overview_viewport);this.overview_box=$("<div/>").addClass("overview-box").appendTo(this.overview_viewport);this.default_overview_height=this.overview_box.height();this.nav_controls=$("<div/>").addClass("nav-controls").appendTo(this.nav);this.chrom_select=$("<select/>").attr({name:"chrom"}).css("width","15em").addClass("no-autocomplete").append("<option value=''>Loading</option>").appendTo(this.nav_controls);var ai=function(al){if(al.type==="focusout"||(al.keyCode||al.which)===13||(al.keyCode||al.which)===27){if((al.keyCode||al.which)!==27){ah.go_to($(this).val())}$(this).hide();$(this).val("");ah.location_span.show();ah.chrom_select.show()}};this.nav_input=$("<input/>").addClass("nav-input").hide().bind("keyup focusout",ai).appendTo(this.nav_controls);this.location_span=$("<span/>").addClass("location").appendTo(this.nav_controls);this.location_span.click(function(){ah.location_span.hide();ah.chrom_select.hide();ah.nav_input.val(ah.chrom+":"+ah.low+"-"+ah.high);ah.nav_input.css("display","inline-block");ah.nav_input.select();ah.nav_input.focus()});if(this.vis_id!==undefined){this.hidden_input=$("<input/>").attr("type","hidden").val(this.vis_id).appendTo(this.nav_controls)}this.zo_link=$("<a id='zoom-out' />").click(function(){ah.zoom_out();ah.request_redraw()}).appendTo(this.nav_controls);this.zi_link=$("<a id='zoom-in' />").click(function(){ah.zoom_in();ah.request_redraw()}).appendTo(this.nav_controls);this.load_chroms_deferred=this.load_chroms({low:0});this.chrom_select.bind("change",function(){ah.change_chrom(ah.chrom_select.val())});this.browser_content_div.click(function(al){$(this).find("input").trigger("blur")});this.browser_content_div.bind("dblclick",function(al){ah.zoom_in(al.pageX,this.viewport_container)});this.overview_box.bind("dragstart",function(al,am){this.current_x=am.offsetX}).bind("drag",function(al,an){var ao=an.offsetX-this.current_x;this.current_x=an.offsetX;var am=Math.round(ao/ah.viewport_container.width()*(ah.max_high-ah.max_low));ah.move_delta(-am)});this.overview_close.click(function(){ah.reset_overview()});this.viewport_container.bind("draginit",function(al,am){if(al.clientX>ah.viewport_container.width()-16){return false}}).bind("dragstart",function(al,am){am.original_low=ah.low;am.current_height=al.clientY;am.current_x=am.offsetX}).bind("drag",function(an,ap){var al=$(this);var aq=ap.offsetX-ap.current_x;var am=al.scrollTop()-(an.clientY-ap.current_height);al.scrollTop(am);ap.current_height=an.clientY;ap.current_x=ap.offsetX;var ao=Math.round(aq/ah.viewport_container.width()*(ah.high-ah.low));ah.move_delta(ao)}).bind("mousewheel",function(an,ap,am,al){if(am){var ao=Math.round(-am/ah.viewport_container.width()*(ah.high-ah.low));ah.move_delta(ao)}});this.top_labeltrack.bind("dragstart",function(al,am){return $("<div />").css({height:ah.browser_content_div.height()+ah.top_labeltrack.height()+ah.nav_labeltrack.height()+1,top:"0px",position:"absolute","background-color":"#ccf",opacity:0.5,"z-index":1000}).appendTo($(this))}).bind("drag",function(ap,aq){$(aq.proxy).css({left:Math.min(ap.pageX,aq.startX),width:Math.abs(ap.pageX-aq.startX)});var am=Math.min(ap.pageX,aq.startX)-ah.container.offset().left,al=Math.max(ap.pageX,aq.startX)-ah.container.offset().left,ao=(ah.high-ah.low),an=ah.viewport_container.width();ah.update_location(Math.round(am/an*ao)+ah.low,Math.round(al/an*ao)+ah.low)}).bind("dragend",function(aq,ar){var am=Math.min(aq.pageX,ar.startX),al=Math.max(aq.pageX,ar.startX),ao=(ah.high-ah.low),an=ah.viewport_container.width(),ap=ah.low;ah.low=Math.round(am/an*ao)+ap;ah.high=Math.round(al/an*ao)+ap;$(ar.proxy).remove();ah.request_redraw()});this.add_label_track(new ad(this,{content_div:this.top_labeltrack}));this.add_label_track(new ad(this,{content_div:this.nav_labeltrack}));$(window).bind("resize",function(){ah.resize_window()});$(document).bind("redraw",function(){ah.redraw()});this.reset();$(window).trigger("resize");this.update_intro_div()},update_intro_div:function(){if(this.num_tracks===0){this.intro_div.appendTo(this.viewport_container)}else{this.intro_div.remove()}},update_location:function(ah,ai){this.location_span.text(commatize(ah)+" - "+commatize(ai));this.nav_input.val(this.chrom+":"+commatize(ah)+"-"+commatize(ai))},load_chroms:function(aj){aj.num=x;$.extend(aj,(this.vis_id!==undefined?{vis_id:this.vis_id}:{dbkey:this.dbkey}));var ah=this,ai=$.Deferred();$.ajax({url:chrom_url,data:aj,dataType:"json",success:function(al){if(al.chrom_info.length===0){alert("Invalid chromosome: "+aj.chrom);return}if(al.reference){ah.add_label_track(new C(ah))}ah.chrom_data=al.chrom_info;var ao='<option value="">Select Chrom/Contig</option>';for(var an=0,ak=ah.chrom_data.length;an<ak;an++){var am=ah.chrom_data[an].chrom;ao+='<option value="'+am+'">'+am+"</option>"}if(al.prev_chroms){ao+='<option value="previous">Previous '+x+"</option>"}if(al.next_chroms){ao+='<option value="next">Next '+x+"</option>"}ah.chrom_select.html(ao);ah.chrom_start_index=al.start_index;ai.resolve(al)},error:function(){alert("Could not load chroms for this dbkey:",ah.dbkey)}});return ai},change_chrom:function(al,ai,an){if(!al||al==="None"){return}var ak=this;if(al==="previous"){ak.load_chroms({low:this.chrom_start_index-x});return}if(al==="next"){ak.load_chroms({low:this.chrom_start_index+x});return}var am=$.grep(ak.chrom_data,function(ap,aq){return ap.chrom===al})[0];if(am===undefined){ak.load_chroms({chrom:al},function(){ak.change_chrom(al,ai,an)});return}else{if(al!==ak.chrom){ak.chrom=al;ak.chrom_select.val(ak.chrom);ak.max_high=am.len-1;ak.reset();ak.request_redraw(true);for(var ao=0,ah=ak.tracks.length;ao<ah;ao++){var aj=ak.tracks[ao];if(aj.init){aj.init()}}}if(ai!==undefined&&an!==undefined){ak.low=Math.max(ai,0);ak.high=Math.min(an,ak.max_high)}ak.reset_overview();ak.request_redraw()}},go_to:function(al){var ap=this,ah,ak,ai=al.split(":"),an=ai[0],ao=ai[1];if(ao!==undefined){try{var am=ao.split("-");ah=parseInt(am[0].replace(/,/g,""),10);ak=parseInt(am[1].replace(/,/g,""),10)}catch(aj){return false}}ap.change_chrom(an,ah,ak)},move_fraction:function(aj){var ah=this;var ai=ah.high-ah.low;this.move_delta(aj*ai)},move_delta:function(aj){var ah=this;var ai=ah.high-ah.low;if(ah.low-aj<ah.max_low){ah.low=ah.max_low;ah.high=ah.max_low+ai}else{if(ah.high-aj>ah.max_high){ah.high=ah.max_high;ah.low=ah.max_high-ai}else{ah.high-=aj;ah.low-=aj}}ah.request_redraw()},add_drawable:function(ah){A.prototype.add_drawable.call(this,ah);ah.init();this.has_changes=true;this.update_intro_div()},add_label_track:function(ah){ah.view=this;ah.init();this.label_tracks.push(ah)},remove_drawable:function(aj,ai){A.prototype.remove_drawable.call(this,aj);if(ai){var ah=this;aj.container_div.fadeOut("slow",function(){$(this).remove();ah.update_intro_div()});this.has_changes=true}},reset:function(){this.low=this.max_low;this.high=this.max_high;this.viewport_container.find(".yaxislabel").remove()},request_redraw:function(ap,ah,ao,ai){var an=this,al=(ai?[ai]:an.tracks),aj;var ai;for(var am=0;am<al.length;am++){ai=al[am];aj=-1;for(var ak=0;ak<an.tracks_to_be_redrawn.length;ak++){if(an.tracks_to_be_redrawn[ak][0]===ai){aj=ak;break}}if(aj<0){an.tracks_to_be_redrawn.push([ai,ah,ao])}else{an.tracks_to_be_redrawn[am][1]=ah;an.tracks_to_be_redrawn[am][2]=ao}}requestAnimationFrame(function(){an._redraw(ap)})},_redraw:function(ar){var ao=this.low,ak=this.high;if(ao<this.max_low){ao=this.max_low}if(ak>this.max_high){ak=this.max_high}var aq=this.high-this.low;if(this.high!==0&&aq<this.min_separation){ak=ao+this.min_separation}this.low=Math.floor(ao);this.high=Math.ceil(ak);this.resolution=Math.pow(E,Math.ceil(Math.log((this.high-this.low)/T)/Math.log(E)));this.zoom_res=Math.pow(w,Math.max(0,Math.ceil(Math.log(this.resolution,w)/Math.log(w))));var ah=(this.low/(this.max_high-this.max_low)*this.overview_viewport.width())||0;var an=((this.high-this.low)/(this.max_high-this.max_low)*this.overview_viewport.width())||0;var at=13;this.overview_box.css({left:ah,width:Math.max(at,an)}).show();if(an<at){this.overview_box.css("left",ah-(at-an)/2)}if(this.overview_highlight){this.overview_highlight.css({left:ah,width:an})}this.update_location(this.low,this.high);if(!ar){var aj,ai,ap;for(var al=0,am=this.tracks_to_be_redrawn.length;al<am;al++){aj=this.tracks_to_be_redrawn[al][0];ai=this.tracks_to_be_redrawn[al][1];ap=this.tracks_to_be_redrawn[al][2];if(aj){aj._draw(ai,ap)}}this.tracks_to_be_redrawn=[];for(al=0,am=this.label_tracks.length;al<am;al++){this.label_tracks[al]._draw()}}},zoom_in:function(ai,aj){if(this.max_high===0||this.high-this.low<this.min_separation){return}var ak=this.high-this.low,al=ak/2+this.low,ah=(ak/this.zoom_factor)/2;if(ai){al=ai/this.viewport_container.width()*(this.high-this.low)+this.low}this.low=Math.round(al-ah);this.high=Math.round(al+ah);this.request_redraw()},zoom_out:function(){if(this.max_high===0){return}var ai=this.high-this.low,aj=ai/2+this.low,ah=(ai*this.zoom_factor)/2;this.low=Math.round(aj-ah);this.high=Math.round(aj+ah);this.request_redraw()},resize_window:function(){this.viewport_container.height(this.container.height()-this.top_container.height()-this.bottom_container.height());this.nav_container.width(this.container.width());this.request_redraw()},set_overview:function(ah){$.when(ah.get_overview_tile()).then(function(ai){view.overview_viewport.find(".track-tile").remove();view.overview_close.show();view.overview_viewport.append(ai.canvas);view.overview_highlight.show().height(ai.canvas.height());view.overview_viewport.height(ai.canvas.height()+view.overview_box.outerHeight());view.resize_window();if(view.overview_track){view.overview_track.set_is_overview(false)}view.overview_track=ah;ah.set_is_overview(true)});view.has_changes=true},reset_overview:function(){this.overview_viewport.find(".track-tile").remove();this.overview_viewport.height(this.default_overview_height);this.overview_box.height(this.default_overview_height);this.overview_close.hide();this.overview_highlight.hide();view.resize_window();if(view.overview_track){view.overview_track.set_is_overview(false)}view.overview_track=null}});var s=function(aj,an){this.track=aj;this.name=an.name;this.params=[];var av=an.params;for(var ak=0;ak<av.length;ak++){var ap=av[ak],ai=ap.name,au=ap.label,al=unescape(ap.html),aw=ap.value,ar=ap.type;if(ar==="number"){this.params[this.params.length]=new g(ai,au,al,aw,ap.min,ap.max)}else{if(ar=="select"){this.params[this.params.length]=new P(ai,au,al,aw)}else{console.log("WARNING: unrecognized tool parameter type:",ai,ar)}}}this.parent_div=$("<div/>").addClass("dynamic-tool").hide();this.parent_div.bind("drag",function(ay){ay.stopPropagation()}).click(function(ay){ay.stopPropagation()}).bind("dblclick",function(ay){ay.stopPropagation()});var at=$("<div class='tool-name'>").appendTo(this.parent_div).text(this.name);var aq=this.params;var ao=this;$.each(this.params,function(az,aC){var aB=$("<div>").addClass("param-row").appendTo(ao.parent_div);var ay=$("<div>").addClass("param-label").text(aC.label).appendTo(aB);var aA=$("<div/>").addClass("slider").html(aC.html).appendTo(aB);aA.find(":input").val(aC.value);$("<div style='clear: both;'/>").appendTo(aB)});this.parent_div.find("input").click(function(){$(this).select()});var ax=$("<div>").addClass("param-row").appendTo(this.parent_div);var am=$("<input type='submit'>").attr("value","Run on complete dataset").appendTo(ax);var ah=$("<input type='submit'>").attr("value","Run on visible region").css("margin-left","3em").appendTo(ax);var ao=this;ah.click(function(){ao.run_on_region()});am.click(function(){ao.run_on_dataset()})};q(s.prototype,{get_param_values_dict:function(){var ah={};this.parent_div.find(":input").each(function(){var ai=$(this).attr("name"),aj=$(this).val();ah[ai]=JSON.stringify(aj)});return ah},get_param_values:function(){var ai=[];var ah={};this.parent_div.find(":input").each(function(){var aj=$(this).attr("name"),ak=$(this).val();if(aj){ai[ai.length]=ak}});return ai},run_on_dataset:function(){var ah=this;ah.run({dataset_id:this.track.original_dataset_id,tool_id:ah.name},null,function(ai){show_modal(ah.name+" is Running",ah.name+" is running on the complete dataset. Tool outputs are in dataset's history.",{Close:hide_modal})})},run_on_region:function(){var ai={dataset_id:this.track.original_dataset_id,chrom:this.track.view.chrom,low:this.track.view.low,high:this.track.view.high,tool_id:this.name},al=this.track,aj=ai.tool_id+al.tool_region_and_parameters_str(ai.chrom,ai.low,ai.high),ah,am;if(al.container===view){var ak=new S(this.name,this.track.view,this.track.container);al.container.add_drawable(ak);al.container.remove_drawable(al);ak.add_drawable(al);al.container_div.appendTo(ak.content_div);ah=ak}else{ah=al.container}if(al instanceof e){am=new X(aj,view,ah,"hda");am.change_mode(al.mode);ah.add_drawable(am)}am.content_div.text("Starting job.");this.run(ai,am,function(an){am.dataset_id=an.dataset_id;am.content_div.text("Running job.");am.init()})},run:function(ai,aj,ak){$.extend(ai,this.get_param_values_dict());var ah=function(){$.getJSON(rerun_tool_url,ai,function(al){if(al==="no converter"){aj.container_div.addClass("error");aj.content_div.text(L)}else{if(al.error){aj.container_div.addClass("error");aj.content_div.text(z+al.message)}else{if(al==="pending"){aj.container_div.addClass("pending");aj.content_div.text("Converting input data so that it can be used quickly with tool.");setTimeout(ah,2000)}else{ak(al)}}}})};ah()}});var P=function(ai,ah,aj,ak){this.name=ai;this.label=ah;this.html=aj;this.value=ak};var g=function(aj,ai,al,am,ak,ah){P.call(this,aj,ai,al,am);this.min=ak;this.max=ah};var h=function(ai,ah,aj,ak){this.name=ai;this.index=ah;this.tool_id=aj;this.tool_exp_name=ak};var Y=function(ai,ah,aj,ak){h.call(this,ai,ah,aj,ak);this.low=-Number.MAX_VALUE;this.high=Number.MAX_VALUE;this.min=Number.MAX_VALUE;this.max=-Number.MAX_VALUE;this.container=null;this.slider=null;this.slider_label=null};q(Y.prototype,{applies_to:function(ah){if(ah.length>this.index){return true}return false},keep:function(ah){if(!this.applies_to(ah)){return true}var ai=parseFloat(ah[this.index]);return(isNaN(ai)||(ai>=this.low&&ai<=this.high))},update_attrs:function(ai){var ah=false;if(!this.applies_to(ai)){return ah}if(ai[this.index]<this.min){this.min=Math.floor(ai[this.index]);ah=true}if(ai[this.index]>this.max){this.max=Math.ceil(ai[this.index]);ah=true}return ah},update_ui_elt:function(){if(this.min!=this.max){this.container.show()}else{this.container.hide()}var aj=function(am,ak){var al=ak-am;return(al<=2?0.01:1)};var ai=this.slider.slider("option","min"),ah=this.slider.slider("option","max");if(this.min<ai||this.max>ah){this.slider.slider("option","min",this.min);this.slider.slider("option","max",this.max);this.slider.slider("option","step",aj(this.min,this.max));this.slider.slider("option","values",[this.min,this.max])}}});var ac=function(ar,ay){this.track=ar;this.filters=[];for(var at=0;at<ay.length;at++){var au=ay[at],az=au.name,ah=au.type,ak=au.index,ax=au.tool_id,aw=au.tool_exp_name;if(ah==="int"||ah==="float"){this.filters[at]=new Y(az,ak,ax,aw)}else{console.log("ERROR: unsupported filter: ",az,ah)}}var al=function(aA,aB,aC){aA.click(function(){var aD=aB.text();max=parseFloat(aC.slider("option","max")),input_size=(max<=1?4:max<=1000000?max.toString().length:6),multi_value=false;if(aC.slider("option","values")){input_size=2*input_size+1;multi_value=true}aB.text("");$("<input type='text'/>").attr("size",input_size).attr("maxlength",input_size).attr("value",aD).appendTo(aB).focus().select().click(function(aE){aE.stopPropagation()}).blur(function(){$(this).remove();aB.text(aD)}).keyup(function(aI){if(aI.keyCode===27){$(this).trigger("blur")}else{if(aI.keyCode===13){var aG=aC.slider("option","min"),aE=aC.slider("option","max"),aH=function(aJ){return(isNaN(aJ)||aJ>aE||aJ<aG)},aF=$(this).val();if(!multi_value){aF=parseFloat(aF);if(aH(aF)){alert("Parameter value must be in the range ["+aG+"-"+aE+"]");return $(this)}}else{aF=aF.split("-");aF=[parseFloat(aF[0]),parseFloat(aF[1])];if(aH(aF[0])||aH(aF[1])){alert("Parameter value must be in the range ["+aG+"-"+aE+"]");return $(this)}}aC.slider((multi_value?"values":"value"),aF)}}})})};this.parent_div=$("<div/>").addClass("filters").hide();this.parent_div.bind("drag",function(aA){aA.stopPropagation()}).click(function(aA){aA.stopPropagation()}).bind("dblclick",function(aA){aA.stopPropagation()}).bind("keydown",function(aA){aA.stopPropagation()});var av=$("<div/>").addClass("sliders").appendTo(this.parent_div);var ap=this;$.each(this.filters,function(aD,aF){aF.container=$("<div/>").addClass("slider-row").appendTo(av);var aE=$("<div/>").addClass("elt-label").appendTo(aF.container);var aC=$("<span/>").addClass("slider-name").text(aF.name+" ").appendTo(aE);var aB=$("<span/>");var aH=$("<span/>").addClass("slider-value").appendTo(aE).append("[").append(aB).append("]");var aA=$("<div/>").addClass("slider").appendTo(aF.container);aF.control_element=$("<div/>").attr("id",aF.name+"-filter-control").appendTo(aA);var aG=[0,0];aF.control_element.slider({range:true,min:Number.MAX_VALUE,max:-Number.MIN_VALUE,values:[0,0],slide:function(aJ,aK){var aI=aK.values;aB.text(aI[0]+"-"+aI[1]);aF.low=aI[0];aF.high=aI[1];ap.track.request_draw(true,true)},change:function(aI,aJ){aF.control_element.slider("option","slide").call(aF.control_element,aI,aJ)}});aF.slider=aF.control_element;aF.slider_label=aB;al(aH,aB,aF.control_element);$("<div style='clear: both;'/>").appendTo(aF.container)});if(this.filters.length!==0){var am=$("<div/>").addClass("param-row").appendTo(av);var ao=$("<input type='submit'/>").attr("value","Run on complete dataset").appendTo(am);var aj=this;ao.click(function(){aj.run_on_dataset()})}var aq=$("<div/>").addClass("display-controls").appendTo(this.parent_div),an=$("<span/>").addClass("elt-label").text("Transparency:").appendTo(aq),ai=$("<select/>").attr("name","alpha_dropdown").appendTo(aq);this.alpha_filter=null;$("<option/>").attr("value",-1).text("== None ==").appendTo(ai);for(var at=0;at<this.filters.length;at++){$("<option/>").attr("value",at).text(this.filters[at].name).appendTo(ai)}ai.change(function(){$(this).children("option:selected").each(function(){var aA=parseInt($(this).val());ap.alpha_filter=(aA>=0?ap.filters[aA]:null);ap.track.request_draw(true,true)})});$("<div style='clear: both;'/>").appendTo(this.parent_div)};q(ac.prototype,{reset_filters:function(){for(var ah=0;ah<this.filters.length;ah++){filter=this.filters[ah];filter.slider.slider("option","values",[filter.min,filter.max])}this.alpha_filter=null},run_on_dataset:function(){var ap=function(au,ar,at){if(!(ar in au)){au[ar]=at}return au[ar]};var aj={},ah,ai,ak;for(var al=0;al<this.filters.length;al++){ah=this.filters[al];if(ah.tool_id){if(ah.min!=ah.low){ai=ap(aj,ah.tool_id,[]);ai[ai.length]=ah.tool_exp_name+" >= "+ah.low}if(ah.max!=ah.high){ai=ap(aj,ah.tool_id,[]);ai[ai.length]=ah.tool_exp_name+" <= "+ah.high}}}var an=[];for(var aq in aj){an[an.length]=[aq,aj[aq]]}var ao=an.length;(function am(ay,av){var at=av[0],au=at[0],ax=at[1],aw="("+ax.join(") and (")+")",ar={cond:aw,input:ay,target_dataset_id:ay,tool_id:au},av=av.slice(1);$.getJSON(run_tool_url,ar,function(az){if(az.error){show_modal("Filter Dataset","Error running tool "+au,{Close:hide_modal})}else{if(av.length===0){show_modal("Filtering Dataset","Filter(s) are running on the complete dataset. Outputs are in dataset's history.",{Close:hide_modal})}else{am(az.dataset_id,av)}}})})(this.track.dataset_id,an)}});var Q=function(ai,ah){N.AlphaGenerator.call(this,ah);this.filter=ai};Q.prototype.gen_alpha=function(ah){if(this.filter.high===Number.MAX_VALUE||this.filter.low===-Number.MAX_VALUE||this.filter.low===this.filter.high){return this.default_alpha}return((parseFloat(ah[this.filter.index])-this.filter.low)/(this.filter.high-this.filter.low))};var H=function(ah){this.track=ah.track;this.params=ah.params;this.values={};this.restore_values((ah.saved_values?ah.saved_values:{}));this.onchange=ah.onchange};q(H.prototype,{restore_values:function(ah){var ai=this;$.each(this.params,function(aj,ak){if(ah[ak.key]!==undefined){ai.values[ak.key]=ah[ak.key]}else{ai.values[ak.key]=ak.default_value}})},build_form:function(){var ai=this;var ah=$("<div />");$.each(this.params,function(am,ak){if(!ak.hidden){var aj="param_"+am;var ao=ai.values[ak.key];var ar=$("<div class='form-row' />").appendTo(ah);ar.append($("<label />").attr("for",aj).text(ak.label+":"));if(ak.type==="bool"){ar.append($('<input type="checkbox" />').attr("id",aj).attr("name",aj).attr("checked",ao))}else{if(ak.type==="text"){ar.append($('<input type="text"/>').attr("id",aj).val(ao).click(function(){$(this).select()}))}else{if(ak.type==="color"){var an=$("<input />").attr("id",aj).attr("name",aj).val(ao);var ap=$("<div class='tipsy tipsy-north' style='position: absolute;' />").hide();var al=$("<div style='background-color: black; padding: 10px;'></div>").appendTo(ap);var aq=$("<div/>").appendTo(al).farbtastic({width:100,height:100,callback:an,color:ao});$("<div />").append(an).append(ap).appendTo(ar).bind("click",function(at){ap.css({left:$(this).position().left+($(an).width()/2)-60,top:$(this).position().top+$(this.height)}).show();$(document).bind("click.color-picker",function(){ap.hide();$(document).unbind("click.color-picker")});at.stopPropagation()})}else{ar.append($("<input />").attr("id",aj).attr("name",aj).val(ao))}}}}});return ah},update_from_form:function(ah){var aj=this;var ai=false;$.each(this.params,function(ak,am){if(!am.hidden){var an="param_"+ak;var al=ah.find("#"+an).val();if(am.type==="float"){al=parseFloat(al)}else{if(am.type==="int"){al=parseInt(al)}else{if(am.type==="bool"){al=ah.find("#"+an).is(":checked")}}}if(al!==aj.values[am.key]){aj.values[am.key]=al;ai=true}}});if(ai){this.onchange()}}});var b=function(aj,ai,ah,ak){this.index=aj;this.low=aj*T*ai;this.high=(aj+1)*T*ai;this.resolution=ai;this.canvas=$("<div class='track-tile'/>").append(ah);this.data=ak;this.stale=false};var l=function(aj,ai,ah,ak,al){b.call(this,aj,ai,ah,ak);this.max_val=al};var R=function(ak,aj,ai,an,am,ah){b.call(this,ak,aj,ai,an);this.message=am;this.feature_mapper=ah;var al=this;$(this.canvas).mousemove(function(ap){var ao=al.feature_mapper.get_feature_data(ap.offsetX,ap.offsetY)})};var j=function(ak,ai,ah,aj,al,am){r.call(this,ak,ai,ah,{},"draghandle");this.data_url=(al?al:default_data_url);this.data_url_extra_params={};this.data_query_wait=(am?am:M);this.dataset_check_url=converted_datasets_state_url;if(!j.id_counter){j.id_counter=0}this.container_div=$("<div />").addClass("track").attr("id","track_"+j.id_counter++).css("position","relative");if(!this.hidden){this.header_div=$("<div class='track-header' />").appendTo(this.container_div);if(this.view.editor){this.drag_div=$("<div/>").addClass(this.drag_handle_class).appendTo(this.header_div)}this.name_div=$("<div class='menubutton popup' />").appendTo(this.header_div);this.name_div.text(this.name);this.name_div.attr("id",this.name.replace(/\s+/g,"-").replace(/[^a-zA-Z0-9\-]/g,"").toLowerCase())}this.content_div=$("<div class='track-content'>").appendTo(this.container_div);this.container.content_div.append(this.container_div)};q(j.prototype,r.prototype,{get_type:function(){if(this instanceof ad){return"LabelTrack"}else{if(this instanceof C){return"ReferenceTrack"}else{if(this instanceof k){return"LineTrack"}else{if(this instanceof Z){return"ReadTrack"}else{if(this instanceof X){return"ToolDataFeatureTrack"}else{if(this instanceof W){return"VcfTrack"}else{if(this instanceof e){return"FeatureTrack"}}}}}}}return""},init:function(){var ah=this;ah.enabled=false;ah.tile_cache.clear();ah.data_manager.clear();ah.initial_canvas=undefined;ah.content_div.css("height","auto");ah.container_div.removeClass("nodata error pending");if(!ah.dataset_id){return}$.getJSON(converted_datasets_state_url,{hda_ldda:ah.hda_ldda,dataset_id:ah.dataset_id,chrom:ah.view.chrom},function(ai){if(!ai||ai==="error"||ai.kind==="error"){ah.container_div.addClass("error");ah.content_div.text(p);if(ai.message){var ak=ah.view.tracks.indexOf(ah);var aj=$(" <a href='javascript:void(0);'></a>").text("View error").click(function(){show_modal("Trackster Error","<pre>"+ai.message+"</pre>",{Close:hide_modal})});ah.content_div.append(aj)}}else{if(ai==="no converter"){ah.container_div.addClass("error");ah.content_div.text(L)}else{if(ai==="no data"||(ai.data!==undefined&&(ai.data===null||ai.data.length===0))){ah.container_div.addClass("nodata");ah.content_div.text(G)}else{if(ai==="pending"){ah.container_div.addClass("pending");ah.content_div.text(u);setTimeout(function(){ah.init()},ah.data_query_wait)}else{if(ai.status==="data"){if(ai.valid_chroms){ah.valid_chroms=ai.valid_chroms;ah.make_name_popup_menu()}ah.content_div.text(ab);if(ah.view.chrom){ah.content_div.text("");ah.content_div.css("height",ah.height_px+"px");ah.enabled=true;$.when(ah.predraw_init()).done(function(){ah.container_div.removeClass("nodata error pending");ah.request_draw()})}}}}}}})},predraw_init:function(){},});var O=function(ao,am){var ai=this,ap=ai.view;n(ai.container_div,ai.drag_handle_class,".group",ai);this.filters_manager=new ac(this,(ao!==undefined?ao:{}));this.filters_available=false;this.filters_visible=false;this.tool=(am!==undefined&&obj_length(am)>0?new s(this,am):undefined);this.is_overview=false;if(ai.hidden){return}if(this.filters_manager){this.filters_div=this.filters_manager.parent_div;this.header_div.after(this.filters_div)}if(this.tool){this.dynamic_tool_div=this.tool.parent_div;this.header_div.after(this.dynamic_tool_div)}if(ai.display_modes!==undefined){if(ai.mode_div===undefined){ai.mode_div=$("<div class='right-float menubutton popup' />").appendTo(ai.header_div);var aj=(ai.config&&ai.config.values.mode?ai.config.values.mode:ai.display_modes[0]);ai.mode=aj;ai.mode_div.text(aj);var ah={};for(var ak=0,an=ai.display_modes.length;ak<an;ak++){var al=ai.display_modes[ak];ah[al]=function(aq){return function(){ai.change_mode(aq)}}(al)}make_popupmenu(ai.mode_div,ah)}else{ai.mode_div.hide()}}this.make_name_popup_menu()};q(O.prototype,r.prototype,j.prototype,{to_json:function(){return{track_type:this.get_type(),name:this.name,hda_ldda:this.hda_ldda,dataset_id:this.dataset_id,prefs:this.prefs,mode:this.mode,}},change_mode:function(ai){var ah=this;ah.mode_div.text(ai);ah.mode=ai;ah.config.values.mode=ai;ah.tile_cache.clear();ah.request_draw();return ah},make_name_popup_menu:function(){var ai=this;var ah={};ah[(this.is_overview?"Hide overview":"Set as overview")]=function(){if(ai.is_overview){ai.view.reset_overview()}else{ai.view.set_overview(ai)}};ah["Edit configuration"]=function(){var am=function(){hide_modal();$(window).unbind("keypress.check_enter_esc")},ak=function(){ai.config.update_from_form($(".dialog-box"));hide_modal();$(window).unbind("keypress.check_enter_esc")},al=function(an){if((an.keyCode||an.which)===27){am()}else{if((an.keyCode||an.which)===13){ak()}}};$(window).bind("keypress.check_enter_esc",al);show_modal("Configure Track",ai.config.build_form(),{Cancel:am,OK:ak})};if(ai.filters_available>0){var aj=(ai.filters_div.is(":visible")?"Hide filters":"Show filters");ah[aj]=function(){ai.filters_visible=(ai.filters_div.is(":visible"));if(ai.filters_visible){ai.filters_manager.reset_filters()}ai.filters_div.toggle();ai.make_name_popup_menu()}}if(ai.tool){var aj=(ai.dynamic_tool_div.is(":visible")?"Hide tool":"Show tool");ah[aj]=function(){if(!ai.dynamic_tool_div.is(":visible")){ai.set_name(ai.name+ai.tool_region_and_parameters_str())}else{menu_option_text="Show dynamic tool";ai.revert_name()}ai.dynamic_tool_div.toggle();ai.make_name_popup_menu()}}ah.Remove=function(){ai.remove()};make_popupmenu(ai.name_div,ah)},set_is_overview:function(ah){this.is_overview=ah;this.make_name_popup_menu()},get_overview_tile:function(){var ah=this;view=ah.view,resolution=Math.pow(E,Math.ceil(Math.log((view.max_high-view.max_low)/T)/Math.log(E))),view_width=view.container.width(),w_scale=view_width/(view.max_high-view.max_low),overview_tile=$.Deferred();$.when(ah.data_manager.get_data(view.max_low,view.max_high,"Auto",resolution,ah.data_url_extra_params)).then(function(ai){var ak=ah._gen_tile_cache_key(view_width,w_scale,0),am=ah.tile_cache.get(ak);if(!am){am=ah.draw_tile(ai,"Auto",resolution,0,w_scale);ah.tile_cache.set(ak,am)}var ap=$(am.canvas.find("canvas")),al=ap.clone(),ao=ap.get(0).getContext("2d"),aj=al.get(0).getContext("2d"),an=ao.getImageData(0,0,ao.canvas.width,ao.canvas.height);aj.putImageData(an,-ah.left_offset,(am.data.dataset_type==="summary_tree"?V:0));new_tile=new b(-1,resolution,al);overview_tile.resolve(new_tile)});return overview_tile},_gen_tile_cache_key:function(ai,aj,ah){return ai+"_"+aj+"_"+ah},request_draw:function(ai,ah){this.view.request_redraw(false,ai,ah,this)},_draw:function(aj,ar){if(!this.enabled){return}if(!(this instanceof C)&&(!this.dataset_id)){return}var aq=this.view.low,an=this.view.high,ao=an-aq,ak=this.view.container.width(),av=ak/ao,am=this.view.resolution,au=$("<div style='position: relative;'></div>");if(!ar){this.content_div.children().remove()}this.content_div.append(au);this.max_height=0;var ai=Math.floor(aq/am/T);var ap=true;var at=[];var ah=0;while((ai*T*am)<an){tile=this.draw_helper(aj,ak,ai,am,au,av);if(tile){at.push(tile)}else{ap=false}ai+=1;ah++}var al=this;if(ap){al.postdraw_actions(at,ak,av,ar)}},postdraw_actions:function(al,am,an,ah){var aj=this;var ak=false;for(var ai=0;ai<al.length;ai++){if(al[ai].message){ak=true;break}}if(ak){for(var ai=0;ai<al.length;ai++){tile=al[ai];if(!tile.message){tile.canvas.css("padding-top",F)}}}},draw_helper:function(ai,aj,ak,an,au,ay,av,ao){var al=this,at=this._gen_tile_cache_key(aj,ay,ak),ap=ak*T*an,ax=ap+T*an;var aq=(ai?undefined:al.tile_cache.get(at));if(aq){al.show_tile(aq,au,ay);return aq}var ar=function(az){return("isResolved" in az)};var am=true;var ah=al.data_manager.get_data(ap,ax,al.mode,an,al.data_url_extra_params);if(ar(ah)){am=false}var aw;if(view.reference_track&&ay>view.canvas_manager.char_width_px){aw=view.reference_track.data_manager.get_data(ap,ax,al.mode,an,view.reference_track.data_url_extra_params);if(ar(aw)){am=false}}if(am){q(ah,ao);var aq=al.draw_tile(ah,al.mode,an,ak,ay,aw);if(aq!==undefined){al.tile_cache.set(at,aq);al.show_tile(aq,au,ay)}return aq}$.when(ah,aw).then(function(){view.request_redraw()});return null},show_tile:function(ao,aq,ar){var aj=this,ai=ao.canvas,an=ai;if(ao.message){var at=$("<div/>"),ap=$("<div/>").addClass("tile-message").text(ao.message).css({height:F-1,width:ao.canvas.width}).appendTo(at),al=$("<a href='javascript:void(0);'/>").addClass("icon more-down").appendTo(ap),ah=$("<a href='javascript:void(0);'/>").addClass("icon more-across").appendTo(ap);at.append(ai);an=at;al.click(function(){ao.stale=true;aj.data_manager.get_more_data(ao.low,ao.high,aj.mode,ao.resolution,{},aj.data_manager.DEEP_DATA_REQ);aj.request_draw()}).dblclick(function(au){au.stopPropagation()});ah.click(function(){ao.stale=true;aj.data_manager.get_more_data(ao.low,ao.high,aj.mode,ao.resolution,{},aj.data_manager.BROAD_DATA_REQ);aj.request_draw()}).dblclick(function(au){au.stopPropagation()})}var am=this.view.high-this.view.low,ak=(ao.low-this.view.low)*ar;if(this.left_offset){ak-=this.left_offset}an.css({position:"absolute",top:0,left:ak,height:""});aq.append(an);aj.max_height=Math.max(aj.max_height,an.height());aj.content_div.css("height",aj.max_height+"px");aq.children().css("height",aj.max_height+"px")},_get_tile_bounds:function(ah,ai){var ak=ah*T*ai,al=T*ai,aj=(ak+al<=this.view.max_high?ak+al:this.view.max_high);return[ak,aj]},tool_region_and_parameters_str:function(aj,ah,ak){var ai=this,al=(aj!==undefined&&ah!==undefined&&ak!==undefined?aj+":"+ah+"-"+ak:"all");return" - region=["+al+"], parameters=["+ai.tool.get_param_values().join(", ")+"]"}});var ad=function(ai,ah){this.hidden=true;j.call(this,"label",ai,ah,{});this.container_div.addClass("label-track")};q(ad.prototype,j.prototype,{init:function(){this.enabled=true},_draw:function(){var aj=this.view,ak=aj.high-aj.low,an=Math.floor(Math.pow(10,Math.floor(Math.log(ak)/Math.log(10)))),ah=Math.floor(aj.low/an)*an,al=this.view.container.width(),ai=$("<div style='position: relative; height: 1.3em;'></div>");while(ah<aj.high){var am=(ah-aj.low)/ak*al;ai.append($("<div class='label'>"+commatize(ah)+"</div>").css({position:"absolute",left:am-1}));ah+=an}this.content_div.children(":first").remove();this.content_div.append(ai)}});var C=function(ah){this.hidden=true;j.call(this,"reference",ah,{content_div:ah.top_labeltrack},{});O.call(this);ah.reference_track=this;this.left_offset=200;this.height_px=12;this.container_div.addClass("reference-track");this.content_div.css("background","none");this.content_div.css("min-height","0px");this.content_div.css("border","none");this.data_url=reference_url;this.data_url_extra_params={dbkey:ah.dbkey};this.data_manager=new J(D,this,false);this.tile_cache=new c(v)};q(C.prototype,r.prototype,O.prototype,{init:function(){this.enabled=true},draw_tile:function(ar,an,am,ai,at){var al=this,aj=T*am;if(at>this.view.canvas_manager.char_width_px){if(ar===null){al.content_div.css("height","0px");return}var ak=this.view.canvas_manager.new_canvas();var aq=ak.getContext("2d");ak.width=Math.ceil(aj*at+al.left_offset);ak.height=al.height_px;aq.font=aq.canvas.manager.default_font;aq.textAlign="center";ar=ar.data;for(var ao=0,ap=ar.length;ao<ap;ao++){var ah=Math.round(ao*at);aq.fillText(ar[ao],ah+al.left_offset,10)}return new b(ai,am,ak,ar)}this.content_div.css("height","0px")}});var k=function(am,ak,aj,an,ah,al){var ai=this;this.display_modes=["Histogram","Line","Filled","Intensity"];this.mode="Histogram";j.call(this,am,ak,aj,al);O.call(this);this.min_height_px=16;this.max_height_px=400;this.height_px=80;this.hda_ldda=an;this.dataset_id=ah;this.original_dataset_id=ah;this.data_manager=new U(D,this);this.tile_cache=new c(v);this.left_offset=0;this.config=new H({track:this,params:[{key:"name",label:"Name",type:"text",default_value:am},{key:"color",label:"Color",type:"color",default_value:"black"},{key:"min_value",label:"Min Value",type:"float",default_value:undefined},{key:"max_value",label:"Max Value",type:"float",default_value:undefined},{key:"mode",type:"string",default_value:this.mode,hidden:true},{key:"height",type:"int",default_value:this.height_px,hidden:true}],saved_values:al,onchange:function(){ai.set_name(ai.prefs.name);ai.vertical_range=ai.prefs.max_value-ai.prefs.min_value;$("#linetrack_"+ai.track_id+"_minval").text(ai.prefs.min_value);$("#linetrack_"+ai.track_id+"_maxval").text(ai.prefs.max_value);ai.tile_cache.clear();ai.request_draw()}});this.prefs=this.config.values;this.height_px=this.config.values.height;this.vertical_range=this.config.values.max_value-this.config.values.min_value;this.add_resize_handle()};q(k.prototype,r.prototype,O.prototype,{add_resize_handle:function(){var ah=this;var ak=false;var aj=false;var ai=$("<div class='track-resize'>");$(ah.container_div).hover(function(){ak=true;ai.show()},function(){ak=false;if(!aj){ai.hide()}});ai.hide().bind("dragstart",function(al,am){aj=true;am.original_height=$(ah.content_div).height()}).bind("drag",function(am,an){var al=Math.min(Math.max(an.original_height+an.deltaY,ah.min_height_px),ah.max_height_px);$(ah.content_div).css("height",al);ah.height_px=al;ah.request_draw(true)}).bind("dragend",function(al,am){ah.tile_cache.clear();aj=false;if(!ak){ai.hide()}ah.config.values.height=ah.height_px}).appendTo(ah.container_div)},predraw_init:function(){var ah=this,ai=ah.view.tracks.indexOf(ah);ah.vertical_range=undefined;return $.getJSON(ah.data_url,{stats:true,chrom:ah.view.chrom,low:null,high:null,hda_ldda:ah.hda_ldda,dataset_id:ah.dataset_id},function(aj){ah.container_div.addClass("line-track");var al=aj.data;if(isNaN(parseFloat(ah.prefs.min_value))||isNaN(parseFloat(ah.prefs.max_value))){ah.prefs.min_value=al.min;ah.prefs.max_value=al.max;$("#track_"+ai+"_minval").val(ah.prefs.min_value);$("#track_"+ai+"_maxval").val(ah.prefs.max_value)}ah.vertical_range=ah.prefs.max_value-ah.prefs.min_value;ah.total_frequency=al.total_frequency;ah.container_div.find(".yaxislabel").remove();var am=$("<div />").addClass("yaxislabel").attr("id","linetrack_"+ai+"_minval").text(y(ah.prefs.min_value));var ak=$("<div />").addClass("yaxislabel").attr("id","linetrack_"+ai+"_maxval").text(y(ah.prefs.max_value));ak.css({position:"absolute",top:"24px",left:"10px"});ak.prependTo(ah.container_div);am.css({position:"absolute",bottom:"2px",left:"10px"});am.prependTo(ah.container_div)})},draw_tile:function(au,am,al,aj,at){if(this.vertical_range===undefined){return}var ah=this._get_tile_bounds(aj,al),an=ah[0],ar=ah[1],ai=Math.ceil((ar-an)*at),ap=this.height_px;var ak=this.view.canvas_manager.new_canvas();ak.width=ai,ak.height=ap;var aq=ak.getContext("2d");var ao=new N.LinePainter(au.data,an,ar,this.prefs,am);ao.draw(aq,ai,ap);return new b(aj,al,ak,au.data)}});var e=function(ah,an,ai,am,ap,ao,ak,al){var aj=this;this.display_modes=["Auto","Histogram","Dense","Squish","Pack"];j.call(this,ah,an,ai,ao);O.call(this,ak,al);this.config=new H({track:this,params:[{key:"name",label:"Name",type:"text",default_value:ah},{key:"block_color",label:"Block color",type:"color",default_value:"#444"},{key:"label_color",label:"Label color",type:"color",default_value:"black"},{key:"show_counts",label:"Show summary counts",type:"bool",default_value:true},{key:"mode",type:"string",default_value:this.mode,hidden:true},],saved_values:ao,onchange:function(){aj.set_name(aj.prefs.name);aj.tile_cache.clear();aj.request_draw()}});this.prefs=this.config.values;this.height_px=0;this.container_div.addClass("feature-track");this.hda_ldda=am;this.dataset_id=ap;this.original_dataset_id=ap;this.show_labels_scale=0.001;this.showing_details=false;this.summary_draw_height=30;this.inc_slots={};this.start_end_dct={};this.tile_cache=new c(d);this.data_manager=new U(20,this);this.left_offset=200;this.painter=N.LinkedFeaturePainter};q(e.prototype,r.prototype,O.prototype,{postdraw_actions:function(ax,ah,ay,aw){O.prototype.postdraw_actions.call(this,ax,aw);var ak=this;if(aw){var am=ak.content_div.children();var an=false;for(var al=am.length-1,ar=0;al>=ar;al--){var aj=$(am[al]);if(an){aj.remove()}else{if(aj.children().length!==0){an=true}}}}if(ak.mode=="Histogram"){var aq=-1;for(var al=0;al<ax.length;al++){var av=ax[al].max_val;if(av>aq){aq=av}}for(var al=0;al<ax.length;al++){var au=ax[al];if(au.max_val!==aq){au.canvas.remove();ak.draw_helper(true,ah,au.index,au.resolution,au.canvas.parent(),ay,[],{max:aq})}}}if(ak.filters_manager){var ai=ak.filters_manager.filters;for(var ap=0;ap<ai.length;ap++){ai[ap].update_ui_elt()}var ao=false,at;for(var al=0;al<ax.length;al++){if(ax[al].data.length){at=ax[al].data[0];for(var ap=0;ap<ai.length;ap++){if(ai[ap].applies_to(at)){ao=true;break}}}}if(ak.filters_available!==ao){ak.filters_available=ao;if(!ak.filters_available){ak.filters_div.hide()}ak.make_name_popup_menu()}}},update_auto_mode:function(ah){if(this.mode=="Auto"){if(ah=="no_detail"){ah="feature spans"}else{if(ah=="summary_tree"){ah="coverage histogram"}}this.mode_div.text("Auto ("+ah+")")}},incremental_slots:function(al,ai,ak){var aj=this.view.canvas_manager.dummy_context,ah=this.inc_slots[al];if(!ah||(ah.mode!==ak)){ah=new (t.FeatureSlotter)(al,ak==="Pack",B,function(am){return aj.measureText(am)});ah.mode=ak;this.inc_slots[al]=ah}return ah.slot_features(ai)},get_summary_tree_data:function(al,ao,aj,ax){if(ax>aj-ao){ax=aj-ao}var at=Math.floor((aj-ao)/ax),aw=[],ak=0;var am=0,an=0,ar,av=0,ap=[],au,aq;var ai=function(aA,az,aB,ay){aA[0]=az+aB*ay;aA[1]=az+(aB+1)*ay};while(av<ax&&am!==al.length){var ah=false;for(;av<ax&&!ah;av++){ai(ap,ao,av,at);for(an=am;an<al.length;an++){ar=al[an].slice(1,3);if(is_overlap(ar,ap)){ah=true;break}}if(ah){break}}data_start_index=an;aw[aw.length]=au=[ap[0],0];for(;an<al.length;an++){ar=al[an].slice(1,3);if(is_overlap(ar,ap)){au[1]++}else{break}}if(au[1]>ak){ak=au[1]}av++}return{max:ak,delta:at,data:aw}},draw_tile:function(aw,az,aE,aI,ar,ak){var aB=this,am=aB._get_tile_bounds(aI,aE),aL=am[0],ai=am[1],ay=ai-aL,aC=Math.ceil(ay*ar),aP=25,al=this.left_offset,ax,an;if(az==="Auto"){if(aw.dataset_type==="summary_tree"){az=aw.dataset_type}else{if(aw.extra_info==="no_detail"){az="no_detail"}else{var aO=aw.data;if(this.view.high-this.view.low>K){az="Squish"}else{az="Pack"}}}this.update_auto_mode(az)}if(az==="summary_tree"||az==="Histogram"){an=this.summary_draw_height;this.container_div.find(".yaxislabel").remove();var ah=$("<div />").addClass("yaxislabel");ah.text(aw.max);ah.css({position:"absolute",top:"24px",left:"10px",color:this.prefs.label_color});ah.prependTo(this.container_div);var aj=this.view.canvas_manager.new_canvas();aj.width=aC+al;aj.height=an+V;if(aw.dataset_type!="summary_tree"){var at=this.get_summary_tree_data(aw.data,aL,ai,200);if(aw.max){at.max=aw.max}aw=at}var aM=new N.SummaryTreePainter(aw,aL,ai,this.prefs);var aD=aj.getContext("2d");aD.translate(al,V);aM.draw(aD,aC,an);return new l(aI,aE,aj,aw.data,aw.max)}var ax,ap=1;if(az==="no_detail"||az==="Squish"||az==="Pack"){ap=this.incremental_slots(ar,aw.data,az);ax=this.inc_slots[ar].slots}var aq=[];if(aw.data){var au=this.filters_manager.filters;for(var aF=0,aH=aw.data.length;aF<aH;aF++){var ao=aw.data[aF];var aG=false;var av;for(var aK=0,aN=au.length;aK<aN;aK++){av=au[aK];av.update_attrs(ao);if(!av.keep(ao)){aG=true;break}}if(!aG){aq.push(ao)}}}var aA=(this.filters_manager.alpha_filter?new Q(this.filters_manager.alpha_filter):null);var aM=new (this.painter)(aq,aL,ai,this.prefs,az,aA,ak);var an=Math.max(af,aM.get_required_height(ap));var aj=this.view.canvas_manager.new_canvas();var aJ=null;aj.width=aC+al;aj.height=an;var aD=aj.getContext("2d");aD.fillStyle=this.prefs.block_color;aD.font=aD.canvas.manager.default_font;aD.textAlign="right";this.container_div.find(".yaxislabel").remove();if(aw.data){aD.translate(al,0);aJ=aM.draw(aD,aC,an,ax);aJ.translation=-al}return new R(aI,aE,aj,aw.data,aw.message,aJ)}});var W=function(al,aj,ai,an,ah,ak,am){e.call(this,al,aj,ai,an,ah,ak,am);this.painter=N.VariantPainter};q(W.prototype,r.prototype,O.prototype,e.prototype);var Z=function(al,aj,ai,an,ah,ak,am){e.call(this,al,aj,ai,an,ah,ak,am);this.config=new H({track:this,params:[{key:"name",label:"Name",type:"text",default_value:al},{key:"block_color",label:"Block color",type:"color",default_value:"#444"},{key:"label_color",label:"Label color",type:"color",default_value:"black"},{key:"show_insertions",label:"Show insertions",type:"bool",default_value:false},{key:"show_differences",label:"Show differences only",type:"bool",default_value:true},{key:"show_counts",label:"Show summary counts",type:"bool",default_value:true},{key:"mode",type:"string",default_value:this.mode,hidden:true},],saved_values:ak,onchange:function(){this.track.set_name(this.track.prefs.name);this.track.tile_cache.clear();this.track.request_draw()}});this.prefs=this.config.values;this.painter=N.ReadPainter;this.make_name_popup_menu()};q(Z.prototype,r.prototype,O.prototype,e.prototype);var X=function(al,aj,ai,an,ah,ak,am){e.call(this,al,aj,ai,an,ah,ak,am,{});this.data_url=raw_data_url;this.data_query_wait=1000;this.dataset_check_url=dataset_state_url};q(X.prototype,r.prototype,O.prototype,e.prototype,{predraw_init:function(){var ai=this;var ah=function(){if(ai.data_manager.size()===0){setTimeout(ah,300)}else{ai.data_url=default_data_url;ai.data_query_wait=M;ai.dataset_state_url=converted_datasets_state_url;$.getJSON(ai.dataset_state_url,{dataset_id:ai.dataset_id,hda_ldda:ai.hda_ldda},function(aj){})}};ah()}});aa.View=ae;aa.DrawableGroup=S;aa.LineTrack=k;aa.FeatureTrack=e;aa.ReadTrack=Z};var slotting_module=function(c,b){var e=c("class").extend;var d=2,a=5;b.FeatureSlotter=function(j,h,f,g){this.slots={};this.start_end_dct={};this.w_scale=j;this.include_label=h;this.max_rows=f;this.measureText=g};e(b.FeatureSlotter.prototype,{slot_features:function(m){var p=this.w_scale,s=this.slots,h=this.start_end_dct,y=[],A=[],n=0,z=this.max_rows;for(var w=0,x=m.length;w<x;w++){var l=m[w],o=l[0];if(s[o]!==undefined){n=Math.max(n,s[o]);A.push(s[o])}else{y.push(w)}}var q=function(G,H){for(var F=0;F<=z;F++){var D=false,I=h[F];if(I!==undefined){for(var C=0,E=I.length;C<E;C++){var B=I[C];if(H>B[0]&&G<B[1]){D=true;break}}}if(!D){return F}}return -1};for(var w=0,x=y.length;w<x;w++){var l=m[y[w]],o=l[0],u=l[1],f=l[2],r=l[3],g=Math.floor(u*p),k=Math.ceil(f*p),v=this.measureText(r).width,j;if(r!==undefined&&this.include_label){v+=(d+a);if(g-v>=0){g-=v;j="left"}else{k+=v;j="right"}}var t=q(g,k);if(t>=0){if(h[t]===undefined){h[t]=[]}h[t].push([g,k]);s[o]=t;n=Math.max(n,t)}else{}}return n+1}})};var painters_module=function(k,y){var v=k("class").extend;var q=function(J,B,H,A,G,E){if(E===undefined){E=4}var D=A-B;var C=G-H;var F=Math.floor(Math.sqrt(D*D+C*C)/E);var K=D/F;var I=C/F;var z;for(z=0;z<F;z++,B+=K,H+=I){if(z%2!==0){continue}J.fillRect(B,H,E,1)}};var r=function(B,A,z,E){var D=A-E/2,C=A+E/2,F=z-Math.sqrt(E*3/2);B.beginPath();B.moveTo(D,F);B.lineTo(C,F);B.lineTo(A,z);B.lineTo(D,F);B.strokeStyle=this.fillStyle;B.fill();B.stroke();B.closePath()};var g=function(z){this.default_alpha=(z?z:1)};g.prototype.gen_alpha=function(z){return this.default_alpha};var n=function(B,D,z,A,C){this.data=B;this.view_start=D;this.view_end=z;this.prefs=v({},this.default_prefs,A);this.mode=C};n.prototype.default_prefs={};var w=function(B,D,z,A,C){n.call(this,B,D,z,A,C)};w.prototype.default_prefs={show_counts:false};w.prototype.draw=function(M,z,L){var E=this.view_start,O=this.view_end-this.view_start,N=z/O;var J=this.data.data,I=this.data.delta,G=this.data.max,B=L;delta_x_px=Math.ceil(I*N);M.save();for(var C=0,D=J.length;C<D;C++){var H=Math.floor((J[C][0]-E)*N);var F=J[C][1];if(!F){continue}var K=F/G*L;if(F!==0&&K<1){K=1}M.fillStyle=this.prefs.block_color;M.fillRect(H,B-K,delta_x_px,K);var A=4;if(this.prefs.show_counts&&(M.measureText(F).width+A)<delta_x_px){M.fillStyle=this.prefs.label_color;M.textAlign="center";M.fillText(F,H+(delta_x_px/2),10)}}M.restore()};var c=function(z,D,F,G,B){n.call(this,z,D,F,G,B);if(this.prefs.min_value===undefined){var H=Infinity;for(var A=0,C=this.data.length;A<C;A++){H=Math.min(H,this.data[A][1])}this.prefs.min_value=H}if(this.prefs.max_value===undefined){var E=-Infinity;for(var A=0,C=this.data.length;A<C;A++){E=Math.max(E,this.data[A][1])}this.prefs.max_value=E}};c.prototype.default_prefs={min_value:undefined,max_value:undefined,mode:"Histogram",color:"#000",overflow_color:"#F66"};c.prototype.draw=function(N,M,K){var F=false,H=this.prefs.min_value,D=this.prefs.max_value,J=D-H,z=K,A=this.view_start,L=this.view_end-this.view_start,B=M/L,I=this.mode,T=this.data;N.save();var U=Math.round(K+H/J*K);if(I!=="Intensity"){N.fillStyle="#aaa";N.fillRect(0,U,M,1)}N.beginPath();var R,E,C;if(T.length>1){C=Math.ceil((T[1][0]-T[0][0])*B)}else{C=10}for(var O=0,P=T.length;O<P;O++){N.fillStyle=this.prefs.color;R=Math.round((T[O][0]-A)*B);E=T[O][1];var Q=false,G=false;if(E===null){if(F&&I==="Filled"){N.lineTo(R,z)}F=false;continue}if(E<H){G=true;E=H}else{if(E>D){Q=true;E=D}}if(I==="Histogram"){E=Math.round(E/J*z);N.fillRect(R,U,C,-E)}else{if(I==="Intensity"){E=255-Math.floor((E-H)/J*255);N.fillStyle="rgb("+E+","+E+","+E+")";N.fillRect(R,0,C,z)}else{E=Math.round(z-(E-H)/J*z);if(F){N.lineTo(R,E)}else{F=true;if(I==="Filled"){N.moveTo(R,z);N.lineTo(R,E)}else{N.moveTo(R,E)}}}}N.fillStyle=this.prefs.overflow_color;if(Q||G){var S;if(I==="Histogram"||I==="Intensity"){S=C}else{R-=2;S=4}if(Q){N.fillRect(R,0,S,3)}if(G){N.fillRect(R,z-3,S,3)}}N.fillStyle=this.prefs.color}if(I==="Filled"){if(F){N.lineTo(R,U);N.lineTo(0,U)}N.fill()}else{N.stroke()}N.restore()};var o=function(z){this.feature_positions={};this.slot_height=z;this.translation=0};o.prototype.map_feature_data=function(A,C,z,B){if(!this.feature_positions[C]){this.feature_positions[C]=[]}this.feature_positions[C].push({data:A,x_start:z,x_end:B})};o.prototype.get_feature_data=function(z,D){var C=Math.floor(D/this.slot_height),B;if(!this.feature_positions[C]){return null}z+=this.translation;for(var A=0;A<this.feature_positions[C].length;A++){B=this.feature_positions[C][A];if(z>=B.x_start&&z<=B.x_end){return B.data}}};var p=function(C,E,z,B,D,A){n.call(this,C,E,z,B,D);this.alpha_generator=(A?A:new g())};p.prototype.default_prefs={block_color:"#FFF",connector_color:"#FFF"};v(p.prototype,{get_required_height:function(A){var z=y_scale=this.get_row_height(),B=this.mode;if(B==="no_detail"||B==="Squish"||B==="Pack"){z=A*y_scale}return z+Math.max(Math.round(y_scale/2),5)},draw:function(L,J,H,G){var R=this.data,E=this.view_start,N=this.view_end;L.save();L.fillStyle=this.prefs.block_color;L.textAlign="right";var I=this.view_end-this.view_start,F=J/I,M=this.get_row_height(),Q=new o(M),C;for(var O=0,P=R.length;O<P;O++){var B=R[O],D=B[0],K=B[1],z=B[2],A=(G&&G[D]!==undefined?G[D]:null);if((K<N&&z>E)&&(this.mode=="Dense"||A!==null)){C=this.draw_element(L,this.mode,B,A,E,N,F,M,J);Q.map_feature_data(B,A,C[0],C[1])}}L.restore();return Q},draw_element:function(F,B,H,D,C,E,G,A,z){console.log("WARNING: Unimplemented function.");return[0,0]}});var d=10,j=3,m=5,x=10,f=1,t=3,e=3,a=9,l=2,h="#ccc";var s=function(C,E,z,B,D,A){p.call(this,C,E,z,B,D,A)};v(s.prototype,p.prototype,{get_row_height:function(){var A=this.mode,z;if(A==="Dense"){z=d}else{if(A==="no_detail"){z=j}else{if(A==="Squish"){z=m}else{z=x}}}return z},draw_element:function(L,E,T,G,N,ae,ai,ak,z){var Q=T[0],ag=T[1],Y=T[2],O=T[3],Z=Math.floor(Math.max(0,(ag-N)*ai)),M=Math.ceil(Math.min(z,Math.max(0,(Y-N)*ai))),X=Z,aj=M,W=(E==="Dense"?0:(0+G))*ak,K,ac,P=null,al=null,C=this.prefs.block_color,ab=this.prefs.label_color;L.globalAlpha=this.alpha_generator.gen_alpha(T);if(E=="Dense"){G=1}if(E==="no_detail"){L.fillStyle=C;L.fillRect(Z,W+5,M-Z,f)}else{var J=T[4],V=T[5],aa=T[6],D=T[7];if(V&&aa){P=Math.floor(Math.max(0,(V-N)*ai));al=Math.ceil(Math.min(z,Math.max(0,(aa-N)*ai)))}var ah,R;if(E==="Squish"||E==="Dense"){ah=1;R=e}else{ah=5;R=a}if(!D){if(T.strand){if(T.strand==="+"){L.fillStyle=L.canvas.manager.get_pattern("right_strand_inv")}else{if(T.strand==="-"){L.fillStyle=L.canvas.manager.get_pattern("left_strand_inv")}}}else{L.fillStyle=C}L.fillRect(Z,W,M-Z,R)}else{var I,S;if(E==="Squish"||E==="Dense"){L.fillStyle=h;I=W+Math.floor(e/2)+1;S=1}else{if(J){var I=W;var S=R;if(J==="+"){L.fillStyle=L.canvas.manager.get_pattern("right_strand")}else{if(J==="-"){L.fillStyle=L.canvas.manager.get_pattern("left_strand")}}}else{L.fillStyle=h;I+=(e/2)+1;S=1}}L.fillRect(Z,I,M-Z,S);for(var af=0,B=D.length;af<B;af++){var F=D[af],A=Math.floor(Math.max(0,(F[0]-N)*ai)),U=Math.ceil(Math.min(z,Math.max((F[1]-N)*ai)));if(A>U){continue}L.fillStyle=C;L.fillRect(A,W+(R-ah)/2+1,U-A,ah);if(P!==undefined&&aa>V&&!(A>al||U<P)){var ad=Math.max(A,P),H=Math.min(U,al);L.fillRect(ad,W+1,H-ad,R);if(D.length==1&&E=="Pack"){if(J==="+"){L.fillStyle=L.canvas.manager.get_pattern("right_strand_inv")}else{if(J==="-"){L.fillStyle=L.canvas.manager.get_pattern("left_strand_inv")}}if(ad+14<H){ad+=2;H-=2}L.fillRect(ad,W+1,H-ad,R)}}}}if(E==="Pack"&&ag>N){L.fillStyle=ab;if(N===0&&Z-L.measureText(O).width<0){L.textAlign="left";L.fillText(O,M+l,W+8);aj+=L.measureText(O).width+l}else{L.textAlign="right";L.fillText(O,Z-l,W+8);X-=L.measureText(O).width+l}}}L.globalAlpha=1;return[X,aj]}});var b=function(C,E,z,B,D,A){p.call(this,C,E,z,B,D,A)};v(b.prototype,p.prototype,{draw_element:function(S,N,H,D,V,B,K,T,Q){var H=data[i],J=H[0],R=H[1],C=H[2],M=H[3],F=Math.floor(Math.max(0,(R-V)*K)),I=Math.ceil(Math.min(Q,Math.max(0,(C-V)*K))),E=(N==="Dense"?0:(0+D))*T,z,W,A=null,L=null;if(no_label){S.fillStyle=block_color;S.fillRect(F+left_offset,E+5,I-F,1)}else{var U=H[4],P=H[5],G=H[6];z=9;W=1;S.fillRect(F+left_offset,E,I-F,z);if(N!=="Dense"&&M!==undefined&&R>V){S.fillStyle=label_color;if(V===0&&F-S.measureText(M).width<0){S.textAlign="left";S.fillText(M,I+2+left_offset,E+8)}else{S.textAlign="right";S.fillText(M,F-2+left_offset,E+8)}S.fillStyle=block_color}var O=U+" / "+P;if(R>V&&S.measureText(O).width<(I-F)){S.fillStyle="white";S.textAlign="center";S.fillText(O,left_offset+F+(I-F)/2,E+8);S.fillStyle=block_color}}return[F,I]}});var u=function(D,F,z,C,E,A,B){p.call(this,D,F,z,C,E,A);this.ref_seq=B};u.prototype.default_prefs=v({},p.prototype.default_prefs,{show_insertions:false});v(u.prototype,p.prototype,{get_row_height:function(){var z,A=this.mode;if(A==="Dense"){z=d}else{if(A==="Squish"){z=m}else{z=x;if(this.prefs.show_insertions){z*=2}}}return z},draw_read:function(V,Q,M,aa,B,U,J,G,F){V.textAlign="center";var T=this,A=[aa,B],P=0,W=0,S=0;ref_seq=this.ref_seq,char_width_px=V.canvas.manager.char_width_px;var af=[];if((Q==="Pack"||this.mode==="Auto")&&G!==undefined&&M>char_width_px){S=Math.round(M/2)}if(!J){J=[[0,G.length]]}for(var N=0,Y=J.length;N<Y;N++){var K=J[N],C="MIDNSHP=X"[K[0]],O=K[1];if(C==="H"||C==="S"){P-=O}var H=U+P,ae=Math.floor(Math.max(0,(H-aa)*M)),I=Math.floor(Math.max(0,(H+O-aa)*M));if(ae===I){I+=1}switch(C){case"H":break;case"S":case"M":case"=":if(is_overlap([H,H+O],A)){var R=G.slice(W,W+O);if(S>0){V.fillStyle=this.prefs.block_color;V.fillRect(ae-S,F+1,I-ae,9);V.fillStyle=h;for(var ac=0,z=R.length;ac<z;ac++){if(this.prefs.show_differences&&ref_seq){var L=ref_seq[H-aa+ac];if(!L||L.toLowerCase()===R[ac].toLowerCase()){continue}}if(H+ac>=aa&&H+ac<=B){var ad=Math.floor(Math.max(0,(H+ac-aa)*M));V.fillText(R[ac],ad,F+9)}}}else{V.fillStyle=this.prefs.block_color;V.fillRect(ae,F+4,I-ae,e)}}W+=O;P+=O;break;case"N":V.fillStyle=h;V.fillRect(ae-S,F+5,I-ae,1);P+=O;break;case"D":V.fillStyle="red";V.fillRect(ae-S,F+4,I-ae,3);P+=O;break;case"P":break;case"I":var Z=ae-S;if(is_overlap([H,H+O],A)){var R=G.slice(W,W+O);if(this.prefs.show_insertions){var E=ae-(I-ae)/2;if((Q==="Pack"||this.mode==="Auto")&&G!==undefined&&M>char_width_px){V.fillStyle="yellow";V.fillRect(E-S,F-9,I-ae,9);af[af.length]={type:"triangle",data:[Z,F+4,5]};V.fillStyle=h;switch(seq_tile_overlap){case (OVERLAP_START):R=R.slice(aa-H);break;case (OVERLAP_END):R=R.slice(0,H-B);break;case (CONTAINED_BY):break;case (CONTAINS):R=R.slice(aa-H,H-B);break}for(var ac=0,z=R.length;ac<z;ac++){var ad=Math.floor(Math.max(0,(H+ac-aa)*M));V.fillText(R[ac],ad-(I-ae)/2,F)}}else{V.fillStyle="yellow";V.fillRect(E,F+(this.mode!=="Dense"?2:5),I-ae,(Q!=="Dense"?e:t))}}else{if((Q==="Pack"||this.mode==="Auto")&&G!==undefined&&M>char_width_px){af[af.length]={type:"text",data:[R.length,Z,F+9]}}else{}}}W+=O;break;case"X":W+=O;break}}V.fillStyle="yellow";var ab,D,ag;for(var X=0;X<af.length;X++){ab=af[X];D=ab.type;ag=ab.data;if(D==="text"){V.save();V.font="bold "+V.font;V.fillText(ag[0],ag[1],ag[2]);V.restore()}else{if(D=="triangle"){r(V,ag[0],ag[1],ag[2])}}}},draw_element:function(S,N,F,C,V,A,J,T,Q){var I=F[0],R=F[1],B=F[2],K=F[3],E=Math.floor(Math.max(0,(R-V)*J)),G=Math.ceil(Math.min(Q,Math.max(0,(B-V)*J))),D=(N==="Dense"?0:(0+C))*T,W=this.prefs.block_color,H=this.prefs.label_color,P=0;if((N==="Pack"||this.mode==="Auto")&&J>S.canvas.manager.char_width_px){var P=Math.round(J/2)}S.fillStyle=W;if(F[5] instanceof Array){var O=Math.floor(Math.max(0,(F[4][0]-V)*J)),M=Math.ceil(Math.min(Q,Math.max(0,(F[4][1]-V)*J))),L=Math.floor(Math.max(0,(F[5][0]-V)*J)),z=Math.ceil(Math.min(Q,Math.max(0,(F[5][1]-V)*J)));if(F[4][1]>=V&&F[4][0]<=A&&F[4][2]){this.draw_read(S,N,J,V,A,F[4][0],F[4][2],F[4][3],D)}if(F[5][1]>=V&&F[5][0]<=A&&F[5][2]){this.draw_read(S,N,J,V,A,F[5][0],F[5][2],F[5][3],D)}if(L>M){S.fillStyle=h;q(S,M-P,D+5,L-P,D+5)}}else{S.fillStyle=W;this.draw_read(S,N,J,V,A,R,F[4],F[5],D)}if(N==="Pack"&&R>V){S.fillStyle=this.prefs.label_color;var U=1;if(U===0&&E-S.measureText(K).width<0){S.textAlign="left";S.fillText(K,G+l-P,D+8)}else{S.textAlign="right";S.fillText(K,E-l-P,D+8)}S.fillStyle=W}return[0,0]}});y.AlphaGenerator=g;y.SummaryTreePainter=w;y.LinePainter=c;y.LinkedFeaturePainter=s;y.ReadPainter=u;y.VariantPainter=b};(function(d){var c={};var b=function(e){return c[e]};var a=function(f,g){var e={};g(b,e);c[f]=e};a("class",class_module);a("slotting",slotting_module);a("painters",painters_module);a("trackster",trackster_module);for(key in c.trackster){d[key]=c.trackster[key]}})(window); \ No newline at end of file +var class_module=function(b,a){var c=function(){var f=arguments[0];for(var e=1;e<arguments.length;e++){var d=arguments[e];for(key in d){f[key]=d[key]}}return f};a.extend=c};var requestAnimationFrame=(function(){return window.requestAnimationFrame||window.webkitRequestAnimationFrame||window.mozRequestAnimationFrame||window.oRequestAnimationFrame||window.msRequestAnimationFrame||function(b,a){window.setTimeout(b,1000/60)}})();var BEFORE=1001,CONTAINS=1002,OVERLAP_START=1003,OVERLAP_END=1004,CONTAINED_BY=1005,AFTER=1006;var compute_overlap=function(e,b){var g=e[0],f=e[1],d=b[0],c=b[1],a;if(g<d){if(f<d){a=BEFORE}else{if(f<=c){a=OVERLAP_START}else{a=CONTAINS}}}else{if(g>c){a=AFTER}else{if(f<=c){a=CONTAINED_BY}else{a=OVERLAP_END}}}return a};var is_overlap=function(c,b){var a=compute_overlap(c,b);return(a!==BEFORE&&a!==AFTER)};var trackster_module=function(f,Z){var q=f("class").extend,t=f("slotting"),M=f("painters");var ag=function(ah,ai){this.document=ah;this.default_font=ai!==undefined?ai:"9px Monaco, Lucida Console, monospace";this.dummy_canvas=this.new_canvas();this.dummy_context=this.dummy_canvas.getContext("2d");this.dummy_context.font=this.default_font;this.char_width_px=this.dummy_context.measureText("A").width;this.patterns={};this.load_pattern("right_strand","/visualization/strand_right.png");this.load_pattern("left_strand","/visualization/strand_left.png");this.load_pattern("right_strand_inv","/visualization/strand_right_inv.png");this.load_pattern("left_strand_inv","/visualization/strand_left_inv.png")};q(ag.prototype,{load_pattern:function(ah,al){var ai=this.patterns,aj=this.dummy_context,ak=new Image();ak.src=image_path+al;ak.onload=function(){ai[ah]=aj.createPattern(ak,"repeat")}},get_pattern:function(ah){return this.patterns[ah]},new_canvas:function(){var ah=this.document.createElement("canvas");if(window.G_vmlCanvasManager){G_vmlCanvasManager.initElement(ah)}ah.manager=this;return ah}});var o={};var m=function(ah,ai){o[ah.attr("id")]=ai};var n=function(ah,aj,al,ak){al=".group";var ai={};o[ah.attr("id")]=ak;ah.bind("drag",{handle:"."+aj,relative:true},function(au,av){var at=$(this);var ay=$(this).parent(),ap=ay.children(),ar=o[$(this).attr("id")],ao,an,aw,am,aq;an=$(this).parents(al);if(an.length!==0){aw=an.position().top;am=aw+an.outerHeight();if(av.offsetY<aw){$(this).insertBefore(an);var ax=o[an.attr("id")];ax.remove_drawable(ar);ax.container.add_drawable_before(ar,ax);return}else{if(av.offsetY>am){$(this).insertAfter(an);var ax=o[an.attr("id")];ax.remove_drawable(ar);ax.container.add_drawable(ar);return}}}an=null;for(aq=0;aq<ap.length;aq++){ao=$(ap.get(aq));aw=ao.position().top;am=aw+ao.outerHeight();if(ao.is(al)&&this!==ao.get(0)&&av.offsetY>=aw&&av.offsetY<=am){if(av.offsetY-aw<am-av.offsetY){ao.find(".content-div").prepend(this)}else{ao.find(".content-div").append(this)}if(ar.container){ar.container.remove_drawable(ar)}o[ao.attr("id")].add_drawable(ar);return}}for(aq=0;aq<ap.length;aq++){if(av.offsetY<$(ap.get(aq)).position().top){break}}if(aq===ap.length){if(this!==ap.get(aq-1)){ay.append(this);o[ay.attr("id")].move_drawable(ar,aq)}}else{if(this!==ap.get(aq)){$(this).insertBefore(ap.get(aq));o[ay.attr("id")].move_drawable(ar,(av.deltaY>0?aq-1:aq))}}}).bind("dragstart",function(){ai["border-top"]=ah.css("border-top");ai["border-bottom"]=ah.css("border-bottom");$(this).css({"border-top":"1px solid blue","border-bottom":"1px solid blue"})}).bind("dragend",function(){$(this).css(ai)})};Z.moveable=n;var af=16,H=9,E=20,U=H+2,A=100,J=12000,S=200,D=5,w=10,L=5000,x=100,p="There was an error in indexing this dataset. ",K="A converter for this dataset is not installed. Please check your datatypes_conf.xml file.",F="No data for this chrom/contig.",u="Currently indexing... please wait",y="Tool cannot be rerun: ",a="Loading data...",aa="Ready for display",d=10,v=5,C=5;function ab(ai,ah){if(!ah){ah=0}var aj=Math.pow(10,ah);return Math.round(ai*aj)/aj}var c=function(ah){this.num_elements=ah;this.clear()};q(c.prototype,{get:function(ai){var ah=this.key_ary.indexOf(ai);if(ah!==-1){if(this.obj_cache[ai].stale){this.key_ary.splice(ah,1);delete this.obj_cache[ai]}else{this.move_key_to_end(ai,ah)}}return this.obj_cache[ai]},set:function(ai,aj){if(!this.obj_cache[ai]){if(this.key_ary.length>=this.num_elements){var ah=this.key_ary.shift();delete this.obj_cache[ah]}this.key_ary.push(ai)}this.obj_cache[ai]=aj;return aj},move_key_to_end:function(ai,ah){this.key_ary.splice(ah,1);this.key_ary.push(ai)},clear:function(){this.obj_cache={};this.key_ary=[]},size:function(){return this.key_ary.length}});var T=function(ai,ah,aj){c.call(this,ai);this.track=ah;this.subset=(aj!==undefined?aj:true)};q(T.prototype,c.prototype,{load_data:function(aq,al,ao,ai,an){var ap=this.track.view.chrom,ak={chrom:ap,low:aq,high:al,mode:ao,resolution:ai,dataset_id:this.track.dataset_id,hda_ldda:this.track.hda_ldda};$.extend(ak,an);if(this.track.filters_manager){var ar=[];var ah=this.track.filters_manager.filters;for(var am=0;am<ah.length;am++){ar[ar.length]=ah[am].name}ak.filter_cols=JSON.stringify(ar)}var aj=this;return $.getJSON(this.track.data_url,ak,function(at){aj.set_data(aq,al,ao,at)})},get_data:function(ah,al,am,ai,ak){var aj=this.get_data_from_cache(ah,al,am);if(aj){return aj}aj=this.load_data(ah,al,am,ai,ak);this.set_data(ah,al,am,aj);return aj},DEEP_DATA_REQ:"deep",BROAD_DATA_REQ:"breadth",get_more_data:function(ap,ak,ao,aj,an,al){var aq=this.get_data_from_cache(ap,ak,ao);if(!aq){console.log("ERROR: no current data for: ",this.track,ap,ak,ao,aj,an);return}aq.stale=true;var ai=ap;if(al===this.DEEP_DATA_REQ){$.extend(an,{start_val:aq.data.length+1})}else{if(al===this.BROAD_DATA_REQ){ai=(aq.max_high?aq.max_high:aq.data[aq.data.length-1][2])+1}}var ah=this,am=this.load_data(ai,ak,ao,aj,an);new_data_available=$.Deferred();this.set_data(ap,ak,ao,new_data_available);$.when(am).then(function(ar){if(ar.data){ar.data=aq.data.concat(ar.data);if(ar.max_low){ar.max_low=aq.max_low}if(ar.message){ar.message=ar.message.replace(/[0-9]+/,ar.data.length)}}ah.set_data(ap,ak,ao,ar);new_data_available.resolve(ar)});return new_data_available},get_data_from_cache:function(ah,ai,aj){return this.get(this.gen_key(ah,ai,aj))},set_data:function(ai,aj,ak,ah){return this.set(this.gen_key(ai,aj,ak),ah)},gen_key:function(ah,aj,ak){var ai=ah+"_"+aj+"_"+ak;return ai},split_key:function(ah){return ah.split("_")}});var I=function(ai,ah,aj){T.call(this,ai,ah,aj)};q(I.prototype,T.prototype,c.prototype,{load_data:function(aj,ah,al,am,ai,ak){if(ai>1){return}return T.prototype.load_data.call(this,aj,ah,al,am,ai,ak)}});var r=function(ak,ai,ah,aj,al){this.name=ak;this.view=ai;this.container=ah;this.drag_handle_class=al;this.config=new G({track:this,params:[{key:"name",label:"Name",type:"text",default_value:ak}],saved_values:aj,onchange:function(){this.track.set_name(this.track.config.values.name)}});this.prefs=this.config.values};q(r.prototype,{init:function(){},request_draw:function(){},_draw:function(){},to_json:function(){},make_name_popup_menu:function(){},set_name:function(ah){this.old_name=this.name;this.name=ah;this.name_div.text(this.name)},revert_name:function(){this.name=this.old_name;this.name_div.text(this.name)},remove:function(){this.container.remove_drawable(this);this.container_div.fadeOut("slow",function(){$(this).remove();view.update_intro_div();view.has_changes=true})}});var z=function(al,ak,ai,ah,aj,am){r.call(this,ak,ai,ah,aj,am);this.obj_type=al;this.drawables=[]};q(z.prototype,r.prototype,{init:function(){for(var ah=0;ah<this.drawables.length;ah++){this.drawables[ah].init()}},_draw:function(){for(var ah=0;ah<this.drawables.length;ah++){this.drawables[ah]._draw()}},to_json:function(){var ai=[];for(var ah=0;ah<this.drawables.length;ah++){ai.push(this.drawables[ah].to_json())}return{name:this.name,prefs:this.prefs,obj_type:this.obj_type,drawables:ai}},add_drawable:function(ah){this.drawables.push(ah);ah.container=this},add_drawable_before:function(aj,ah){var ai=this.drawables.indexOf(ah);if(ai!=-1){this.drawables.splice(ai,0,aj);return true}return false},remove_drawable:function(ai){var ah=this.drawables.indexOf(ai);if(ah!=-1){this.drawables.splice(ah,1);ai.container=null;return true}return false},move_drawable:function(ai,aj){var ah=this.drawables.indexOf(ai);if(ah!=-1){this.drawables.splice(ah,1);this.drawables.splice(aj,0,ai);return true}return false}});var R=function(ak,ai,ah,aj){z.call(this,"DrawableGroup",ak,ai,ah,aj,"group-handle");if(!R.id_counter){R.id_counter=0}var al=R.id_counter++;this.container_div=$("<div/>").addClass("group").attr("id","group_"+al).appendTo(this.container.content_div);this.header_div=$("<div/>").addClass("track-header").appendTo(this.container_div);this.header_div.append($("<div/>").addClass(this.drag_handle_class));this.name_div=$("<div/>").addClass("group-name menubutton popup").text(this.name).appendTo(this.header_div);this.content_div=$("<div/>").addClass("content-div").attr("id","group_"+al+"_content_div").appendTo(this.container_div);m(this.container_div,this);m(this.content_div,this);n(this.container_div,this.drag_handle_class,".group",this);this.make_name_popup_menu()};q(R.prototype,r.prototype,z.prototype,{make_name_popup_menu:function(){var ai=this;var ah={};ah["Edit configuration"]=function(){var al=function(){hide_modal();$(window).unbind("keypress.check_enter_esc")},aj=function(){ai.config.update_from_form($(".dialog-box"));hide_modal();$(window).unbind("keypress.check_enter_esc")},ak=function(am){if((am.keyCode||am.which)===27){al()}else{if((am.keyCode||am.which)===13){aj()}}};$(window).bind("keypress.check_enter_esc",ak);show_modal("Configure Group",ai.config.build_form(),{Cancel:al,OK:aj})};ah.Remove=function(){ai.remove()};make_popupmenu(ai.name_div,ah)}});var ae=function(ah,ak,aj,ai){z.call(this,"View");this.container=ah;this.chrom=null;this.vis_id=aj;this.dbkey=ai;this.title=ak;this.label_tracks=[];this.tracks_to_be_redrawn=[];this.max_low=0;this.max_high=0;this.zoom_factor=3;this.min_separation=30;this.has_changes=false;this.load_chroms_deferred=null;this.init();this.canvas_manager=new ag(ah.get(0).ownerDocument);this.reset()};q(ae.prototype,z.prototype,{init:function(){var aj=this.container,ah=this;this.top_container=$("<div/>").addClass("top-container").appendTo(aj);this.browser_content_div=$("<div/>").addClass("content").css("position","relative").appendTo(aj);this.bottom_container=$("<div/>").addClass("bottom-container").appendTo(aj);this.top_labeltrack=$("<div/>").addClass("top-labeltrack").appendTo(this.top_container);this.viewport_container=$("<div/>").addClass("viewport-container").attr("id","viewport-container").appendTo(this.browser_content_div);this.content_div=this.viewport_container;m(this.viewport_container,ah);this.intro_div=$("<div/>").addClass("intro");var ak=$("<div/>").text("Add Datasets to Visualization").addClass("action-button").appendTo(this.intro_div).click(function(){add_tracks()});this.nav_labeltrack=$("<div/>").addClass("nav-labeltrack").appendTo(this.bottom_container);this.nav_container=$("<div/>").addClass("nav-container").prependTo(this.top_container);this.nav=$("<div/>").addClass("nav").appendTo(this.nav_container);this.overview=$("<div/>").addClass("overview").appendTo(this.bottom_container);this.overview_viewport=$("<div/>").addClass("overview-viewport").appendTo(this.overview);this.overview_close=$("<a href='javascript:void(0);'>Close Overview</a>").addClass("overview-close").hide().appendTo(this.overview_viewport);this.overview_highlight=$("<div/>").addClass("overview-highlight").hide().appendTo(this.overview_viewport);this.overview_box_background=$("<div/>").addClass("overview-boxback").appendTo(this.overview_viewport);this.overview_box=$("<div/>").addClass("overview-box").appendTo(this.overview_viewport);this.default_overview_height=this.overview_box.height();this.nav_controls=$("<div/>").addClass("nav-controls").appendTo(this.nav);this.chrom_select=$("<select/>").attr({name:"chrom"}).css("width","15em").addClass("no-autocomplete").append("<option value=''>Loading</option>").appendTo(this.nav_controls);var ai=function(al){if(al.type==="focusout"||(al.keyCode||al.which)===13||(al.keyCode||al.which)===27){if((al.keyCode||al.which)!==27){ah.go_to($(this).val())}$(this).hide();$(this).val("");ah.location_span.show();ah.chrom_select.show()}};this.nav_input=$("<input/>").addClass("nav-input").hide().bind("keyup focusout",ai).appendTo(this.nav_controls);this.location_span=$("<span/>").addClass("location").appendTo(this.nav_controls);this.location_span.click(function(){ah.location_span.hide();ah.chrom_select.hide();ah.nav_input.val(ah.chrom+":"+ah.low+"-"+ah.high);ah.nav_input.css("display","inline-block");ah.nav_input.select();ah.nav_input.focus()});if(this.vis_id!==undefined){this.hidden_input=$("<input/>").attr("type","hidden").val(this.vis_id).appendTo(this.nav_controls)}this.zo_link=$("<a id='zoom-out' />").click(function(){ah.zoom_out();ah.request_redraw()}).appendTo(this.nav_controls);this.zi_link=$("<a id='zoom-in' />").click(function(){ah.zoom_in();ah.request_redraw()}).appendTo(this.nav_controls);this.load_chroms_deferred=this.load_chroms({low:0});this.chrom_select.bind("change",function(){ah.change_chrom(ah.chrom_select.val())});this.browser_content_div.click(function(al){$(this).find("input").trigger("blur")});this.browser_content_div.bind("dblclick",function(al){ah.zoom_in(al.pageX,this.viewport_container)});this.overview_box.bind("dragstart",function(al,am){this.current_x=am.offsetX}).bind("drag",function(al,an){var ao=an.offsetX-this.current_x;this.current_x=an.offsetX;var am=Math.round(ao/ah.viewport_container.width()*(ah.max_high-ah.max_low));ah.move_delta(-am)});this.overview_close.click(function(){ah.reset_overview()});this.viewport_container.bind("draginit",function(al,am){if(al.clientX>ah.viewport_container.width()-16){return false}}).bind("dragstart",function(al,am){am.original_low=ah.low;am.current_height=al.clientY;am.current_x=am.offsetX}).bind("drag",function(an,ap){var al=$(this);var aq=ap.offsetX-ap.current_x;var am=al.scrollTop()-(an.clientY-ap.current_height);al.scrollTop(am);ap.current_height=an.clientY;ap.current_x=ap.offsetX;var ao=Math.round(aq/ah.viewport_container.width()*(ah.high-ah.low));ah.move_delta(ao)}).bind("mousewheel",function(an,ap,am,al){if(am){var ao=Math.round(-am/ah.viewport_container.width()*(ah.high-ah.low));ah.move_delta(ao)}});this.top_labeltrack.bind("dragstart",function(al,am){return $("<div />").css({height:ah.browser_content_div.height()+ah.top_labeltrack.height()+ah.nav_labeltrack.height()+1,top:"0px",position:"absolute","background-color":"#ccf",opacity:0.5,"z-index":1000}).appendTo($(this))}).bind("drag",function(ap,aq){$(aq.proxy).css({left:Math.min(ap.pageX,aq.startX),width:Math.abs(ap.pageX-aq.startX)});var am=Math.min(ap.pageX,aq.startX)-ah.container.offset().left,al=Math.max(ap.pageX,aq.startX)-ah.container.offset().left,ao=(ah.high-ah.low),an=ah.viewport_container.width();ah.update_location(Math.round(am/an*ao)+ah.low,Math.round(al/an*ao)+ah.low)}).bind("dragend",function(aq,ar){var am=Math.min(aq.pageX,ar.startX),al=Math.max(aq.pageX,ar.startX),ao=(ah.high-ah.low),an=ah.viewport_container.width(),ap=ah.low;ah.low=Math.round(am/an*ao)+ap;ah.high=Math.round(al/an*ao)+ap;$(ar.proxy).remove();ah.request_redraw()});this.add_label_track(new ad(this,{content_div:this.top_labeltrack}));this.add_label_track(new ad(this,{content_div:this.nav_labeltrack}));$(window).bind("resize",function(){ah.resize_window()});$(document).bind("redraw",function(){ah.redraw()});this.reset();$(window).trigger("resize");this.update_intro_div()},update_intro_div:function(){if(this.num_tracks===0){this.intro_div.appendTo(this.viewport_container)}else{this.intro_div.remove()}},update_location:function(ah,ai){this.location_span.text(commatize(ah)+" - "+commatize(ai));this.nav_input.val(this.chrom+":"+commatize(ah)+"-"+commatize(ai))},load_chroms:function(aj){aj.num=x;$.extend(aj,(this.vis_id!==undefined?{vis_id:this.vis_id}:{dbkey:this.dbkey}));var ah=this,ai=$.Deferred();$.ajax({url:chrom_url,data:aj,dataType:"json",success:function(al){if(al.chrom_info.length===0){alert("Invalid chromosome: "+aj.chrom);return}if(al.reference){ah.add_label_track(new B(ah))}ah.chrom_data=al.chrom_info;var ao='<option value="">Select Chrom/Contig</option>';for(var an=0,ak=ah.chrom_data.length;an<ak;an++){var am=ah.chrom_data[an].chrom;ao+='<option value="'+am+'">'+am+"</option>"}if(al.prev_chroms){ao+='<option value="previous">Previous '+x+"</option>"}if(al.next_chroms){ao+='<option value="next">Next '+x+"</option>"}ah.chrom_select.html(ao);ah.chrom_start_index=al.start_index;ai.resolve(al)},error:function(){alert("Could not load chroms for this dbkey:",ah.dbkey)}});return ai},change_chrom:function(am,ai,ao){if(!am||am==="None"){return}var aj=this;if(am==="previous"){aj.load_chroms({low:this.chrom_start_index-x});return}if(am==="next"){aj.load_chroms({low:this.chrom_start_index+x});return}var an=$.grep(aj.chrom_data,function(ap,aq){return ap.chrom===am})[0];if(an===undefined){aj.load_chroms({chrom:am},function(){aj.change_chrom(am,ai,ao)});return}else{if(am!==aj.chrom){aj.chrom=am;aj.chrom_select.val(aj.chrom);aj.max_high=an.len-1;aj.reset();aj.request_redraw(true);for(var al=0,ah=aj.drawables.length;al<ah;al++){var ak=aj.drawables[al];if(ak.init){ak.init()}}}if(ai!==undefined&&ao!==undefined){aj.low=Math.max(ai,0);aj.high=Math.min(ao,aj.max_high)}aj.reset_overview();aj.request_redraw()}},go_to:function(al){var ap=this,ah,ak,ai=al.split(":"),an=ai[0],ao=ai[1];if(ao!==undefined){try{var am=ao.split("-");ah=parseInt(am[0].replace(/,/g,""),10);ak=parseInt(am[1].replace(/,/g,""),10)}catch(aj){return false}}ap.change_chrom(an,ah,ak)},move_fraction:function(aj){var ah=this;var ai=ah.high-ah.low;this.move_delta(aj*ai)},move_delta:function(aj){var ah=this;var ai=ah.high-ah.low;if(ah.low-aj<ah.max_low){ah.low=ah.max_low;ah.high=ah.max_low+ai}else{if(ah.high-aj>ah.max_high){ah.high=ah.max_high;ah.low=ah.max_high-ai}else{ah.high-=aj;ah.low-=aj}}ah.request_redraw()},add_drawable:function(ah){z.prototype.add_drawable.call(this,ah);ah.init();this.has_changes=true;this.update_intro_div()},add_label_track:function(ah){ah.view=this;ah.init();this.label_tracks.push(ah)},remove_drawable:function(aj,ai){z.prototype.remove_drawable.call(this,aj);if(ai){var ah=this;aj.container_div.fadeOut("slow",function(){$(this).remove();ah.update_intro_div()});this.has_changes=true}},reset:function(){this.low=this.max_low;this.high=this.max_high;this.viewport_container.find(".yaxislabel").remove()},request_redraw:function(ap,ah,ao,ai){var an=this,al=(ai?[ai]:an.drawables),aj;var ai;for(var am=0;am<al.length;am++){ai=al[am];aj=-1;for(var ak=0;ak<an.tracks_to_be_redrawn.length;ak++){if(an.tracks_to_be_redrawn[ak][0]===ai){aj=ak;break}}if(aj<0){an.tracks_to_be_redrawn.push([ai,ah,ao])}else{an.tracks_to_be_redrawn[am][1]=ah;an.tracks_to_be_redrawn[am][2]=ao}}requestAnimationFrame(function(){an._redraw(ap)})},_redraw:function(ar){var ao=this.low,ak=this.high;if(ao<this.max_low){ao=this.max_low}if(ak>this.max_high){ak=this.max_high}var aq=this.high-this.low;if(this.high!==0&&aq<this.min_separation){ak=ao+this.min_separation}this.low=Math.floor(ao);this.high=Math.ceil(ak);this.resolution=Math.pow(D,Math.ceil(Math.log((this.high-this.low)/S)/Math.log(D)));this.zoom_res=Math.pow(w,Math.max(0,Math.ceil(Math.log(this.resolution,w)/Math.log(w))));var ah=(this.low/(this.max_high-this.max_low)*this.overview_viewport.width())||0;var an=((this.high-this.low)/(this.max_high-this.max_low)*this.overview_viewport.width())||0;var at=13;this.overview_box.css({left:ah,width:Math.max(at,an)}).show();if(an<at){this.overview_box.css("left",ah-(at-an)/2)}if(this.overview_highlight){this.overview_highlight.css({left:ah,width:an})}this.update_location(this.low,this.high);if(!ar){var aj,ai,ap;for(var al=0,am=this.tracks_to_be_redrawn.length;al<am;al++){aj=this.tracks_to_be_redrawn[al][0];ai=this.tracks_to_be_redrawn[al][1];ap=this.tracks_to_be_redrawn[al][2];if(aj){aj._draw(ai,ap)}}this.tracks_to_be_redrawn=[];for(al=0,am=this.label_tracks.length;al<am;al++){this.label_tracks[al]._draw()}}},zoom_in:function(ai,aj){if(this.max_high===0||this.high-this.low<this.min_separation){return}var ak=this.high-this.low,al=ak/2+this.low,ah=(ak/this.zoom_factor)/2;if(ai){al=ai/this.viewport_container.width()*(this.high-this.low)+this.low}this.low=Math.round(al-ah);this.high=Math.round(al+ah);this.request_redraw()},zoom_out:function(){if(this.max_high===0){return}var ai=this.high-this.low,aj=ai/2+this.low,ah=(ai*this.zoom_factor)/2;this.low=Math.round(aj-ah);this.high=Math.round(aj+ah);this.request_redraw()},resize_window:function(){this.viewport_container.height(this.container.height()-this.top_container.height()-this.bottom_container.height());this.nav_container.width(this.container.width());this.request_redraw()},set_overview:function(ah){$.when(ah.get_overview_tile()).then(function(ai){view.overview_viewport.find(".track-tile").remove();view.overview_close.show();view.overview_viewport.append(ai.canvas);view.overview_highlight.show().height(ai.canvas.height());view.overview_viewport.height(ai.canvas.height()+view.overview_box.outerHeight());view.resize_window();if(view.overview_track){view.overview_track.set_is_overview(false)}view.overview_track=ah;ah.set_is_overview(true)});view.has_changes=true},reset_overview:function(){this.overview_viewport.find(".track-tile").remove();this.overview_viewport.height(this.default_overview_height);this.overview_box.height(this.default_overview_height);this.overview_close.hide();this.overview_highlight.hide();view.resize_window();if(view.overview_track){view.overview_track.set_is_overview(false)}view.overview_track=null}});var s=function(aj,an){this.track=aj;this.name=an.name;this.params=[];var av=an.params;for(var ak=0;ak<av.length;ak++){var ap=av[ak],ai=ap.name,au=ap.label,al=unescape(ap.html),aw=ap.value,ar=ap.type;if(ar==="number"){this.params[this.params.length]=new g(ai,au,al,aw,ap.min,ap.max)}else{if(ar=="select"){this.params[this.params.length]=new O(ai,au,al,aw)}else{console.log("WARNING: unrecognized tool parameter type:",ai,ar)}}}this.parent_div=$("<div/>").addClass("dynamic-tool").hide();this.parent_div.bind("drag",function(ay){ay.stopPropagation()}).click(function(ay){ay.stopPropagation()}).bind("dblclick",function(ay){ay.stopPropagation()});var at=$("<div class='tool-name'>").appendTo(this.parent_div).text(this.name);var aq=this.params;var ao=this;$.each(this.params,function(az,aC){var aB=$("<div>").addClass("param-row").appendTo(ao.parent_div);var ay=$("<div>").addClass("param-label").text(aC.label).appendTo(aB);var aA=$("<div/>").addClass("slider").html(aC.html).appendTo(aB);aA.find(":input").val(aC.value);$("<div style='clear: both;'/>").appendTo(aB)});this.parent_div.find("input").click(function(){$(this).select()});var ax=$("<div>").addClass("param-row").appendTo(this.parent_div);var am=$("<input type='submit'>").attr("value","Run on complete dataset").appendTo(ax);var ah=$("<input type='submit'>").attr("value","Run on visible region").css("margin-left","3em").appendTo(ax);var ao=this;ah.click(function(){ao.run_on_region()});am.click(function(){ao.run_on_dataset()})};q(s.prototype,{get_param_values_dict:function(){var ah={};this.parent_div.find(":input").each(function(){var ai=$(this).attr("name"),aj=$(this).val();ah[ai]=JSON.stringify(aj)});return ah},get_param_values:function(){var ai=[];var ah={};this.parent_div.find(":input").each(function(){var aj=$(this).attr("name"),ak=$(this).val();if(aj){ai[ai.length]=ak}});return ai},run_on_dataset:function(){var ah=this;ah.run({dataset_id:this.track.original_dataset_id,tool_id:ah.name},null,function(ai){show_modal(ah.name+" is Running",ah.name+" is running on the complete dataset. Tool outputs are in dataset's history.",{Close:hide_modal})})},run_on_region:function(){var ai={dataset_id:this.track.original_dataset_id,chrom:this.track.view.chrom,low:this.track.view.low,high:this.track.view.high,tool_id:this.name},al=this.track,aj=ai.tool_id+al.tool_region_and_parameters_str(ai.chrom,ai.low,ai.high),ah,am;if(al.container===view){var ak=new R(this.name,this.track.view,this.track.container);al.container.add_drawable(ak);al.container.remove_drawable(al);ak.add_drawable(al);al.container_div.appendTo(ak.content_div);ah=ak}else{ah=al.container}if(al instanceof e){am=new W(aj,view,ah,"hda");am.change_mode(al.mode);ah.add_drawable(am)}am.content_div.text("Starting job.");this.run(ai,am,function(an){am.dataset_id=an.dataset_id;am.content_div.text("Running job.");am.init()})},run:function(ai,aj,ak){$.extend(ai,this.get_param_values_dict());var ah=function(){$.getJSON(rerun_tool_url,ai,function(al){if(al==="no converter"){aj.container_div.addClass("error");aj.content_div.text(K)}else{if(al.error){aj.container_div.addClass("error");aj.content_div.text(y+al.message)}else{if(al==="pending"){aj.container_div.addClass("pending");aj.content_div.text("Converting input data so that it can be used quickly with tool.");setTimeout(ah,2000)}else{ak(al)}}}})};ah()}});var O=function(ai,ah,aj,ak){this.name=ai;this.label=ah;this.html=aj;this.value=ak};var g=function(aj,ai,al,am,ak,ah){O.call(this,aj,ai,al,am);this.min=ak;this.max=ah};var h=function(ai,ah,aj,ak){this.name=ai;this.index=ah;this.tool_id=aj;this.tool_exp_name=ak};var X=function(ai,ah,aj,ak){h.call(this,ai,ah,aj,ak);this.low=-Number.MAX_VALUE;this.high=Number.MAX_VALUE;this.min=Number.MAX_VALUE;this.max=-Number.MAX_VALUE;this.container=null;this.slider=null;this.slider_label=null};q(X.prototype,{applies_to:function(ah){if(ah.length>this.index){return true}return false},keep:function(ah){if(!this.applies_to(ah)){return true}var ai=ah[this.index];return(isNaN(ai)||(ai>=this.low&&ai<=this.high))},update_attrs:function(ai){var ah=false;if(!this.applies_to(ai)){return ah}if(ai[this.index]<this.min){this.min=Math.floor(ai[this.index]);ah=true}if(ai[this.index]>this.max){this.max=Math.ceil(ai[this.index]);ah=true}return ah},update_ui_elt:function(){if(this.min!=this.max){this.container.show()}else{this.container.hide()}var aj=function(am,ak){var al=ak-am;return(al<=2?0.01:1)};var ai=this.slider.slider("option","min"),ah=this.slider.slider("option","max");if(this.min<ai||this.max>ah){this.slider.slider("option","min",this.min);this.slider.slider("option","max",this.max);this.slider.slider("option","step",aj(this.min,this.max));this.slider.slider("option","values",[this.min,this.max])}}});var ac=function(ar,ay){this.track=ar;this.filters=[];for(var at=0;at<ay.length;at++){var au=ay[at],az=au.name,ah=au.type,ak=au.index,ax=au.tool_id,aw=au.tool_exp_name;if(ah==="int"||ah==="float"){this.filters[at]=new X(az,ak,ax,aw)}else{console.log("ERROR: unsupported filter: ",az,ah)}}var al=function(aA,aB,aC){aA.click(function(){var aD=aB.text();max=parseFloat(aC.slider("option","max")),input_size=(max<=1?4:max<=1000000?max.toString().length:6),multi_value=false;if(aC.slider("option","values")){input_size=2*input_size+1;multi_value=true}aB.text("");$("<input type='text'/>").attr("size",input_size).attr("maxlength",input_size).attr("value",aD).appendTo(aB).focus().select().click(function(aE){aE.stopPropagation()}).blur(function(){$(this).remove();aB.text(aD)}).keyup(function(aI){if(aI.keyCode===27){$(this).trigger("blur")}else{if(aI.keyCode===13){var aG=aC.slider("option","min"),aE=aC.slider("option","max"),aH=function(aJ){return(isNaN(aJ)||aJ>aE||aJ<aG)},aF=$(this).val();if(!multi_value){aF=parseFloat(aF);if(aH(aF)){alert("Parameter value must be in the range ["+aG+"-"+aE+"]");return $(this)}}else{aF=aF.split("-");aF=[parseFloat(aF[0]),parseFloat(aF[1])];if(aH(aF[0])||aH(aF[1])){alert("Parameter value must be in the range ["+aG+"-"+aE+"]");return $(this)}}aC.slider((multi_value?"values":"value"),aF)}}})})};this.parent_div=$("<div/>").addClass("filters").hide();this.parent_div.bind("drag",function(aA){aA.stopPropagation()}).click(function(aA){aA.stopPropagation()}).bind("dblclick",function(aA){aA.stopPropagation()}).bind("keydown",function(aA){aA.stopPropagation()});var av=$("<div/>").addClass("sliders").appendTo(this.parent_div);var ap=this;$.each(this.filters,function(aD,aF){aF.container=$("<div/>").addClass("slider-row").appendTo(av);var aE=$("<div/>").addClass("elt-label").appendTo(aF.container);var aC=$("<span/>").addClass("slider-name").text(aF.name+" ").appendTo(aE);var aB=$("<span/>");var aH=$("<span/>").addClass("slider-value").appendTo(aE).append("[").append(aB).append("]");var aA=$("<div/>").addClass("slider").appendTo(aF.container);aF.control_element=$("<div/>").attr("id",aF.name+"-filter-control").appendTo(aA);var aG=[0,0];aF.control_element.slider({range:true,min:Number.MAX_VALUE,max:-Number.MIN_VALUE,values:[0,0],slide:function(aJ,aK){var aI=aK.values;aB.text(aI[0]+"-"+aI[1]);aF.low=aI[0];aF.high=aI[1];ap.track.request_draw(true,true)},change:function(aI,aJ){aF.control_element.slider("option","slide").call(aF.control_element,aI,aJ)}});aF.slider=aF.control_element;aF.slider_label=aB;al(aH,aB,aF.control_element);$("<div style='clear: both;'/>").appendTo(aF.container)});if(this.filters.length!==0){var am=$("<div/>").addClass("param-row").appendTo(av);var ao=$("<input type='submit'/>").attr("value","Run on complete dataset").appendTo(am);var aj=this;ao.click(function(){aj.run_on_dataset()})}var aq=$("<div/>").addClass("display-controls").appendTo(this.parent_div),an=$("<span/>").addClass("elt-label").text("Transparency:").appendTo(aq),ai=$("<select/>").attr("name","alpha_dropdown").appendTo(aq);this.alpha_filter=null;$("<option/>").attr("value",-1).text("== None ==").appendTo(ai);for(var at=0;at<this.filters.length;at++){$("<option/>").attr("value",at).text(this.filters[at].name).appendTo(ai)}ai.change(function(){$(this).children("option:selected").each(function(){var aA=parseInt($(this).val());ap.alpha_filter=(aA>=0?ap.filters[aA]:null);ap.track.request_draw(true,true)})});$("<div style='clear: both;'/>").appendTo(this.parent_div)};q(ac.prototype,{reset_filters:function(){for(var ah=0;ah<this.filters.length;ah++){filter=this.filters[ah];filter.slider.slider("option","values",[filter.min,filter.max])}this.alpha_filter=null},run_on_dataset:function(){var ap=function(au,ar,at){if(!(ar in au)){au[ar]=at}return au[ar]};var aj={},ah,ai,ak;for(var al=0;al<this.filters.length;al++){ah=this.filters[al];if(ah.tool_id){if(ah.min!=ah.low){ai=ap(aj,ah.tool_id,[]);ai[ai.length]=ah.tool_exp_name+" >= "+ah.low}if(ah.max!=ah.high){ai=ap(aj,ah.tool_id,[]);ai[ai.length]=ah.tool_exp_name+" <= "+ah.high}}}var an=[];for(var aq in aj){an[an.length]=[aq,aj[aq]]}var ao=an.length;(function am(ay,av){var at=av[0],au=at[0],ax=at[1],aw="("+ax.join(") and (")+")",ar={cond:aw,input:ay,target_dataset_id:ay,tool_id:au},av=av.slice(1);$.getJSON(run_tool_url,ar,function(az){if(az.error){show_modal("Filter Dataset","Error running tool "+au,{Close:hide_modal})}else{if(av.length===0){show_modal("Filtering Dataset","Filter(s) are running on the complete dataset. Outputs are in dataset's history.",{Close:hide_modal})}else{am(az.dataset_id,av)}}})})(this.track.dataset_id,an)}});var P=function(ai,ah){M.AlphaGenerator.call(this,ah);this.filter=ai};P.prototype.gen_alpha=function(ah){if(this.filter.high===Number.MAX_VALUE||this.filter.low===-Number.MAX_VALUE||this.filter.low===this.filter.high){return this.default_alpha}return((parseFloat(ah[this.filter.index])-this.filter.low)/(this.filter.high-this.filter.low))};var G=function(ah){this.track=ah.track;this.params=ah.params;this.values={};this.restore_values((ah.saved_values?ah.saved_values:{}));this.onchange=ah.onchange};q(G.prototype,{restore_values:function(ah){var ai=this;$.each(this.params,function(aj,ak){if(ah[ak.key]!==undefined){ai.values[ak.key]=ah[ak.key]}else{ai.values[ak.key]=ak.default_value}})},build_form:function(){var ai=this;var ah=$("<div />");$.each(this.params,function(am,ak){if(!ak.hidden){var aj="param_"+am;var ao=ai.values[ak.key];var ar=$("<div class='form-row' />").appendTo(ah);ar.append($("<label />").attr("for",aj).text(ak.label+":"));if(ak.type==="bool"){ar.append($('<input type="checkbox" />').attr("id",aj).attr("name",aj).attr("checked",ao))}else{if(ak.type==="text"){ar.append($('<input type="text"/>').attr("id",aj).val(ao).click(function(){$(this).select()}))}else{if(ak.type==="color"){var an=$("<input />").attr("id",aj).attr("name",aj).val(ao);var ap=$("<div class='tipsy tipsy-north' style='position: absolute;' />").hide();var al=$("<div style='background-color: black; padding: 10px;'></div>").appendTo(ap);var aq=$("<div/>").appendTo(al).farbtastic({width:100,height:100,callback:an,color:ao});$("<div />").append(an).append(ap).appendTo(ar).bind("click",function(at){ap.css({left:$(this).position().left+($(an).width()/2)-60,top:$(this).position().top+$(this.height)}).show();$(document).bind("click.color-picker",function(){ap.hide();$(document).unbind("click.color-picker")});at.stopPropagation()})}else{ar.append($("<input />").attr("id",aj).attr("name",aj).val(ao))}}}}});return ah},update_from_form:function(ah){var aj=this;var ai=false;$.each(this.params,function(ak,am){if(!am.hidden){var an="param_"+ak;var al=ah.find("#"+an).val();if(am.type==="float"){al=parseFloat(al)}else{if(am.type==="int"){al=parseInt(al)}else{if(am.type==="bool"){al=ah.find("#"+an).is(":checked")}}}if(al!==aj.values[am.key]){aj.values[am.key]=al;ai=true}}});if(ai){this.onchange()}}});var b=function(ah,ak,aj,ai,al){this.track=ah;this.index=ak;this.low=ak*S*aj;this.high=(ak+1)*S*aj;this.resolution=aj;this.canvas=$("<div class='track-tile'/>").append(ai);this.data=al;this.stale=false};b.prototype.predisplay_actions=function(){};var l=function(ah,ak,aj,ai,al,am){b.call(this,ah,ak,aj,ai,al);this.max_val=am};q(l.prototype,b.prototype);var Q=function(ah,al,ak,aj,an,ao,am,ai){b.call(this,ah,al,ak,aj,an);this.mode=ao;this.message=am;this.feature_mapper=ai};q(Q.prototype,b.prototype);Q.prototype.predisplay_actions=function(){var ai=this,ah={};if(ai.mode!=="Pack"){return}$(this.canvas).mousemove(function(au){var ao=$(this).offset(),at=au.pageX-ao.left,ar=au.pageY-ao.top,ay=ai.feature_mapper.get_feature_data(at,ar),ap=(ay?ay[0]:null);$(this).siblings(".feature-popup").each(function(){if(!ap||$(this).attr("id")!==ap.toString()){$(this).remove()}});if(ay){var ak=ah[ap];if(!ak){var ap=ay[0],av={name:ay[3],start:ay[1],end:ay[2],strand:ay[4]},an=ai.track.filters_manager.filters,am;for(var aq=0;aq<an.length;aq++){am=an[aq];av[am.name]=ay[am.index]}var ak=$("<div/>").attr("id",ap).addClass("feature-popup"),ax,aw,az=$("<table/>").appendTo(ak),aA;for(ax in av){aw=av[ax];aA=$("<tr/>").appendTo(az);$("<th/>").appendTo(aA).text(ax);$("<td/>").attr("align","left").appendTo(aA).text(typeof(aw)=="number"?ab(aw,2):aw)}ah[ap]=ak}ak.appendTo($(ai.canvas).parent());var al=at+parseInt(ai.canvas.css("left"))+7,aj=ar+parseInt(ai.canvas.css("top"))+7;ak.css("left",al+"px").css("top",aj+"px")}else{if(!au.isPropagationStopped()){au.stopPropagation();$(this).siblings().each(function(){$(this).trigger(au)})}}}).mouseleave(function(){$(this).siblings(".feature-popup").remove()})};var j=function(ak,ai,ah,aj,al,am){r.call(this,ak,ai,ah,{},"draghandle");this.data_url=(al?al:default_data_url);this.data_url_extra_params={};this.data_query_wait=(am?am:L);this.dataset_check_url=converted_datasets_state_url;if(!j.id_counter){j.id_counter=0}this.container_div=$("<div />").addClass("track").attr("id","track_"+j.id_counter++).css("position","relative");if(!this.hidden){this.header_div=$("<div class='track-header' />").appendTo(this.container_div);if(this.view.editor){this.drag_div=$("<div/>").addClass(this.drag_handle_class).appendTo(this.header_div)}this.name_div=$("<div class='menubutton popup' />").appendTo(this.header_div);this.name_div.text(this.name);this.name_div.attr("id",this.name.replace(/\s+/g,"-").replace(/[^a-zA-Z0-9\-]/g,"").toLowerCase())}this.content_div=$("<div class='track-content'>").appendTo(this.container_div);this.container.content_div.append(this.container_div)};q(j.prototype,r.prototype,{get_type:function(){if(this instanceof ad){return"LabelTrack"}else{if(this instanceof B){return"ReferenceTrack"}else{if(this instanceof k){return"LineTrack"}else{if(this instanceof Y){return"ReadTrack"}else{if(this instanceof W){return"ToolDataFeatureTrack"}else{if(this instanceof V){return"VcfTrack"}else{if(this instanceof e){return"FeatureTrack"}}}}}}}return""},init:function(){var ah=this;ah.enabled=false;ah.tile_cache.clear();ah.data_manager.clear();ah.initial_canvas=undefined;ah.content_div.css("height","auto");ah.container_div.removeClass("nodata error pending");if(!ah.dataset_id){return}$.getJSON(converted_datasets_state_url,{hda_ldda:ah.hda_ldda,dataset_id:ah.dataset_id,chrom:ah.view.chrom},function(ai){if(!ai||ai==="error"||ai.kind==="error"){ah.container_div.addClass("error");ah.content_div.text(p);if(ai.message){var aj=$(" <a href='javascript:void(0);'></a>").text("View error").click(function(){show_modal("Trackster Error","<pre>"+ai.message+"</pre>",{Close:hide_modal})});ah.content_div.append(aj)}}else{if(ai==="no converter"){ah.container_div.addClass("error");ah.content_div.text(K)}else{if(ai==="no data"||(ai.data!==undefined&&(ai.data===null||ai.data.length===0))){ah.container_div.addClass("nodata");ah.content_div.text(F)}else{if(ai==="pending"){ah.container_div.addClass("pending");ah.content_div.text(u);setTimeout(function(){ah.init()},ah.data_query_wait)}else{if(ai.status==="data"){if(ai.valid_chroms){ah.valid_chroms=ai.valid_chroms;ah.make_name_popup_menu()}ah.content_div.text(aa);if(ah.view.chrom){ah.content_div.text("");ah.content_div.css("height",ah.height_px+"px");ah.enabled=true;$.when(ah.predraw_init()).done(function(){ah.container_div.removeClass("nodata error pending");ah.request_draw()})}}}}}}})},predraw_init:function(){},});var N=function(ao,am){var ai=this,ap=ai.view;n(ai.container_div,ai.drag_handle_class,".group",ai);this.filters_manager=new ac(this,(ao!==undefined?ao:{}));this.filters_available=false;this.filters_visible=false;this.tool=(am!==undefined&&obj_length(am)>0?new s(this,am):undefined);this.is_overview=false;if(ai.hidden){return}if(this.filters_manager){this.filters_div=this.filters_manager.parent_div;this.header_div.after(this.filters_div)}if(this.tool){this.dynamic_tool_div=this.tool.parent_div;this.header_div.after(this.dynamic_tool_div)}if(ai.display_modes!==undefined){if(ai.mode_div===undefined){ai.mode_div=$("<div class='right-float menubutton popup' />").appendTo(ai.header_div);var aj=(ai.config&&ai.config.values.mode?ai.config.values.mode:ai.display_modes[0]);ai.mode=aj;ai.mode_div.text(aj);var ah={};for(var ak=0,an=ai.display_modes.length;ak<an;ak++){var al=ai.display_modes[ak];ah[al]=function(aq){return function(){ai.change_mode(aq)}}(al)}make_popupmenu(ai.mode_div,ah)}else{ai.mode_div.hide()}}this.make_name_popup_menu()};q(N.prototype,r.prototype,j.prototype,{to_json:function(){return{track_type:this.get_type(),name:this.name,hda_ldda:this.hda_ldda,dataset_id:this.dataset_id,prefs:this.prefs,mode:this.mode,}},change_mode:function(ai){var ah=this;ah.mode_div.text(ai);ah.mode=ai;ah.config.values.mode=ai;ah.tile_cache.clear();ah.request_draw();return ah},make_name_popup_menu:function(){var ai=this;var ah={};ah[(this.is_overview?"Hide overview":"Set as overview")]=function(){if(ai.is_overview){ai.view.reset_overview()}else{ai.view.set_overview(ai)}};ah["Edit configuration"]=function(){var am=function(){hide_modal();$(window).unbind("keypress.check_enter_esc")},ak=function(){ai.config.update_from_form($(".dialog-box"));hide_modal();$(window).unbind("keypress.check_enter_esc")},al=function(an){if((an.keyCode||an.which)===27){am()}else{if((an.keyCode||an.which)===13){ak()}}};$(window).bind("keypress.check_enter_esc",al);show_modal("Configure Track",ai.config.build_form(),{Cancel:am,OK:ak})};if(ai.filters_available>0){var aj=(ai.filters_div.is(":visible")?"Hide filters":"Show filters");ah[aj]=function(){ai.filters_visible=(ai.filters_div.is(":visible"));if(ai.filters_visible){ai.filters_manager.reset_filters()}ai.filters_div.toggle();ai.make_name_popup_menu()}}if(ai.tool){var aj=(ai.dynamic_tool_div.is(":visible")?"Hide tool":"Show tool");ah[aj]=function(){if(!ai.dynamic_tool_div.is(":visible")){ai.set_name(ai.name+ai.tool_region_and_parameters_str())}else{menu_option_text="Show dynamic tool";ai.revert_name()}ai.dynamic_tool_div.toggle();ai.make_name_popup_menu()}}ah.Remove=function(){ai.remove()};make_popupmenu(ai.name_div,ah)},set_is_overview:function(ah){this.is_overview=ah;this.make_name_popup_menu()},get_overview_tile:function(){var ah=this;view=ah.view,resolution=Math.pow(D,Math.ceil(Math.log((view.max_high-view.max_low)/S)/Math.log(D))),view_width=view.container.width(),w_scale=view_width/(view.max_high-view.max_low),overview_tile=$.Deferred();$.when(ah.data_manager.get_data(view.max_low,view.max_high,"Auto",resolution,ah.data_url_extra_params)).then(function(ai){var ak=ah._gen_tile_cache_key(view_width,w_scale,0),am=ah.tile_cache.get(ak);if(!am){am=ah.draw_tile(ai,"Auto",resolution,0,w_scale);ah.tile_cache.set(ak,am)}var ap=$(am.canvas.find("canvas")),al=ap.clone(),ao=ap.get(0).getContext("2d"),aj=al.get(0).getContext("2d"),an=ao.getImageData(0,0,ao.canvas.width,ao.canvas.height);aj.putImageData(an,-ah.left_offset,(am.data.dataset_type==="summary_tree"?U:0));new_tile=new b(ah,-1,resolution,al);overview_tile.resolve(new_tile)});return overview_tile},_gen_tile_cache_key:function(ai,aj,ah){return ai+"_"+aj+"_"+ah},request_draw:function(ai,ah){this.view.request_redraw(false,ai,ah,this)},_draw:function(aj,ar){if(!this.enabled){return}if(!(this instanceof B)&&(!this.dataset_id)){return}var aq=this.view.low,an=this.view.high,ao=an-aq,ak=this.view.container.width(),av=ak/ao,am=this.view.resolution,au=$("<div style='position: relative;'></div>");if(!ar){this.content_div.children().remove()}this.content_div.append(au);this.max_height=0;var ai=Math.floor(aq/am/S);var ap=true;var at=[];var ah=0;while((ai*S*am)<an){tile=this.draw_helper(aj,ak,ai,am,au,av);if(tile){at.push(tile)}else{ap=false}ai+=1;ah++}var al=this;if(ap){al.postdraw_actions(at,ak,av,ar)}},postdraw_actions:function(al,am,an,ah){var aj=this;var ak=false;for(var ai=0;ai<al.length;ai++){if(al[ai].message){ak=true;break}}if(ak){for(var ai=0;ai<al.length;ai++){tile=al[ai];if(!tile.message){tile.canvas.css("padding-top",E)}}}},draw_helper:function(ai,aj,ak,an,au,ay,av,ao){var al=this,at=this._gen_tile_cache_key(aj,ay,ak),ap=ak*S*an,ax=ap+S*an;var aq=(ai?undefined:al.tile_cache.get(at));if(aq){al.show_tile(aq,au,ay);return aq}var ar=function(az){return("isResolved" in az)};var am=true;var ah=al.data_manager.get_data(ap,ax,al.mode,an,al.data_url_extra_params);if(ar(ah)){am=false}var aw;if(view.reference_track&&ay>view.canvas_manager.char_width_px){aw=view.reference_track.data_manager.get_data(ap,ax,al.mode,an,view.reference_track.data_url_extra_params);if(ar(aw)){am=false}}if(am){q(ah,ao);var aq=al.draw_tile(ah,al.mode,an,ak,ay,aw);if(aq!==undefined){al.tile_cache.set(at,aq);al.show_tile(aq,au,ay)}return aq}$.when(ah,aw).then(function(){view.request_redraw()});return null},show_tile:function(ao,aq,ar){var aj=this,ai=ao.canvas,an=ai;if(ao.message){var at=$("<div/>"),ap=$("<div/>").addClass("tile-message").text(ao.message).css({height:E-1,width:ao.canvas.width}).appendTo(at),al=$("<a href='javascript:void(0);'/>").addClass("icon more-down").appendTo(ap),ah=$("<a href='javascript:void(0);'/>").addClass("icon more-across").appendTo(ap);at.append(ai);an=at;al.click(function(){ao.stale=true;aj.data_manager.get_more_data(ao.low,ao.high,aj.mode,ao.resolution,{},aj.data_manager.DEEP_DATA_REQ);aj.request_draw()}).dblclick(function(au){au.stopPropagation()});ah.click(function(){ao.stale=true;aj.data_manager.get_more_data(ao.low,ao.high,aj.mode,ao.resolution,{},aj.data_manager.BROAD_DATA_REQ);aj.request_draw()}).dblclick(function(au){au.stopPropagation()})}ao.predisplay_actions();var am=this.view.high-this.view.low,ak=(ao.low-this.view.low)*ar;if(this.left_offset){ak-=this.left_offset}an.css({position:"absolute",top:0,left:ak,height:""});aq.append(an);aj.max_height=Math.max(aj.max_height,an.height());aj.content_div.css("height",aj.max_height+"px");aq.children().css("height",aj.max_height+"px")},_get_tile_bounds:function(ah,ai){var ak=ah*S*ai,al=S*ai,aj=(ak+al<=this.view.max_high?ak+al:this.view.max_high);return[ak,aj]},tool_region_and_parameters_str:function(aj,ah,ak){var ai=this,al=(aj!==undefined&&ah!==undefined&&ak!==undefined?aj+":"+ah+"-"+ak:"all");return" - region=["+al+"], parameters=["+ai.tool.get_param_values().join(", ")+"]"}});var ad=function(ai,ah){this.hidden=true;j.call(this,"label",ai,ah,{});this.container_div.addClass("label-track")};q(ad.prototype,j.prototype,{init:function(){this.enabled=true},_draw:function(){var aj=this.view,ak=aj.high-aj.low,an=Math.floor(Math.pow(10,Math.floor(Math.log(ak)/Math.log(10)))),ah=Math.floor(aj.low/an)*an,al=this.view.container.width(),ai=$("<div style='position: relative; height: 1.3em;'></div>");while(ah<aj.high){var am=(ah-aj.low)/ak*al;ai.append($("<div class='label'>"+commatize(ah)+"</div>").css({position:"absolute",left:am-1}));ah+=an}this.content_div.children(":first").remove();this.content_div.append(ai)}});var B=function(ah){this.hidden=true;j.call(this,"reference",ah,{content_div:ah.top_labeltrack},{});N.call(this);ah.reference_track=this;this.left_offset=200;this.height_px=12;this.container_div.addClass("reference-track");this.content_div.css("background","none");this.content_div.css("min-height","0px");this.content_div.css("border","none");this.data_url=reference_url;this.data_url_extra_params={dbkey:ah.dbkey};this.data_manager=new I(C,this,false);this.tile_cache=new c(v)};q(B.prototype,r.prototype,N.prototype,{init:function(){this.enabled=true},draw_tile:function(ar,an,am,ai,at){var al=this,aj=S*am;if(at>this.view.canvas_manager.char_width_px){if(ar===null){al.content_div.css("height","0px");return}var ak=this.view.canvas_manager.new_canvas();var aq=ak.getContext("2d");ak.width=Math.ceil(aj*at+al.left_offset);ak.height=al.height_px;aq.font=aq.canvas.manager.default_font;aq.textAlign="center";ar=ar.data;for(var ao=0,ap=ar.length;ao<ap;ao++){var ah=Math.round(ao*at);aq.fillText(ar[ao],ah+al.left_offset,10)}return new b(al,ai,am,ak,ar)}this.content_div.css("height","0px")}});var k=function(am,ak,aj,an,ah,al){var ai=this;this.display_modes=["Histogram","Line","Filled","Intensity"];this.mode="Histogram";j.call(this,am,ak,aj,al);N.call(this);this.min_height_px=16;this.max_height_px=400;this.height_px=80;this.hda_ldda=an;this.dataset_id=ah;this.original_dataset_id=ah;this.data_manager=new T(C,this);this.tile_cache=new c(v);this.left_offset=0;this.config=new G({track:this,params:[{key:"name",label:"Name",type:"text",default_value:am},{key:"color",label:"Color",type:"color",default_value:"black"},{key:"min_value",label:"Min Value",type:"float",default_value:undefined},{key:"max_value",label:"Max Value",type:"float",default_value:undefined},{key:"mode",type:"string",default_value:this.mode,hidden:true},{key:"height",type:"int",default_value:this.height_px,hidden:true}],saved_values:al,onchange:function(){ai.set_name(ai.prefs.name);ai.vertical_range=ai.prefs.max_value-ai.prefs.min_value;$("#linetrack_"+ai.dataset_id+"_minval").text(ai.prefs.min_value);$("#linetrack_"+ai.dataset_id+"_maxval").text(ai.prefs.max_value);ai.tile_cache.clear();ai.request_draw()}});this.prefs=this.config.values;this.height_px=this.config.values.height;this.vertical_range=this.config.values.max_value-this.config.values.min_value;this.add_resize_handle()};q(k.prototype,r.prototype,N.prototype,{add_resize_handle:function(){var ah=this;var ak=false;var aj=false;var ai=$("<div class='track-resize'>");$(ah.container_div).hover(function(){ak=true;ai.show()},function(){ak=false;if(!aj){ai.hide()}});ai.hide().bind("dragstart",function(al,am){aj=true;am.original_height=$(ah.content_div).height()}).bind("drag",function(am,an){var al=Math.min(Math.max(an.original_height+an.deltaY,ah.min_height_px),ah.max_height_px);$(ah.content_div).css("height",al);ah.height_px=al;ah.request_draw(true)}).bind("dragend",function(al,am){ah.tile_cache.clear();aj=false;if(!ak){ai.hide()}ah.config.values.height=ah.height_px}).appendTo(ah.container_div)},predraw_init:function(){var ah=this;ah.vertical_range=undefined;return $.getJSON(ah.data_url,{stats:true,chrom:ah.view.chrom,low:null,high:null,hda_ldda:ah.hda_ldda,dataset_id:ah.dataset_id},function(ai){ah.container_div.addClass("line-track");var ak=ai.data;if(isNaN(parseFloat(ah.prefs.min_value))||isNaN(parseFloat(ah.prefs.max_value))){ah.prefs.min_value=ak.min;ah.prefs.max_value=ak.max;$("#track_"+ah.dataset_id+"_minval").val(ah.prefs.min_value);$("#track_"+ah.dataset_id+"_maxval").val(ah.prefs.max_value)}ah.vertical_range=ah.prefs.max_value-ah.prefs.min_value;ah.total_frequency=ak.total_frequency;ah.container_div.find(".yaxislabel").remove();var al=$("<div />").addClass("yaxislabel").attr("id","linetrack_"+ah.dataset_id+"_minval").text(ab(ah.prefs.min_value,3));var aj=$("<div />").addClass("yaxislabel").attr("id","linetrack_"+ah.dataset_id+"_maxval").text(ab(ah.prefs.max_value,3));aj.css({position:"absolute",top:"24px",left:"10px"});aj.prependTo(ah.container_div);al.css({position:"absolute",bottom:"2px",left:"10px"});al.prependTo(ah.container_div)})},draw_tile:function(au,am,al,aj,at){if(this.vertical_range===undefined){return}var ah=this._get_tile_bounds(aj,al),an=ah[0],ar=ah[1],ai=Math.ceil((ar-an)*at),ap=this.height_px;var ak=this.view.canvas_manager.new_canvas();ak.width=ai,ak.height=ap;var aq=ak.getContext("2d");var ao=new M.LinePainter(au.data,an,ar,this.prefs,am);ao.draw(aq,ai,ap);return new b(this.track,aj,al,ak,au.data)}});var e=function(ah,an,ai,am,ap,ao,ak,al){var aj=this;this.display_modes=["Auto","Histogram","Dense","Squish","Pack"];j.call(this,ah,an,ai,ao);N.call(this,ak,al);this.config=new G({track:this,params:[{key:"name",label:"Name",type:"text",default_value:ah},{key:"block_color",label:"Block color",type:"color",default_value:"#444"},{key:"label_color",label:"Label color",type:"color",default_value:"black"},{key:"show_counts",label:"Show summary counts",type:"bool",default_value:true},{key:"mode",type:"string",default_value:this.mode,hidden:true},],saved_values:ao,onchange:function(){aj.set_name(aj.prefs.name);aj.tile_cache.clear();aj.request_draw()}});this.prefs=this.config.values;this.height_px=0;this.container_div.addClass("feature-track");this.hda_ldda=am;this.dataset_id=ap;this.original_dataset_id=ap;this.show_labels_scale=0.001;this.showing_details=false;this.summary_draw_height=30;this.inc_slots={};this.start_end_dct={};this.tile_cache=new c(d);this.data_manager=new T(20,this);this.left_offset=200;this.painter=M.LinkedFeaturePainter};q(e.prototype,r.prototype,N.prototype,{postdraw_actions:function(ax,ah,ay,aw){N.prototype.postdraw_actions.call(this,ax,aw);var ak=this;if(aw){var am=ak.content_div.children();var an=false;for(var al=am.length-1,ar=0;al>=ar;al--){var aj=$(am[al]);if(an){aj.remove()}else{if(aj.children().length!==0){an=true}}}}if(ak.mode=="Histogram"){var aq=-1;for(var al=0;al<ax.length;al++){var av=ax[al].max_val;if(av>aq){aq=av}}for(var al=0;al<ax.length;al++){var au=ax[al];if(au.max_val!==aq){au.canvas.remove();ak.draw_helper(true,ah,au.index,au.resolution,au.canvas.parent(),ay,[],{max:aq})}}}if(ak.filters_manager){var ai=ak.filters_manager.filters;for(var ap=0;ap<ai.length;ap++){ai[ap].update_ui_elt()}var ao=false,at;for(var al=0;al<ax.length;al++){if(ax[al].data.length){at=ax[al].data[0];for(var ap=0;ap<ai.length;ap++){if(ai[ap].applies_to(at)){ao=true;break}}}}if(ak.filters_available!==ao){ak.filters_available=ao;if(!ak.filters_available){ak.filters_div.hide()}ak.make_name_popup_menu()}}},update_auto_mode:function(ah){if(this.mode=="Auto"){if(ah=="no_detail"){ah="feature spans"}else{if(ah=="summary_tree"){ah="coverage histogram"}}this.mode_div.text("Auto ("+ah+")")}},incremental_slots:function(al,ai,ak){var aj=this.view.canvas_manager.dummy_context,ah=this.inc_slots[al];if(!ah||(ah.mode!==ak)){ah=new (t.FeatureSlotter)(al,ak==="Pack",A,function(am){return aj.measureText(am)});ah.mode=ak;this.inc_slots[al]=ah}return ah.slot_features(ai)},get_summary_tree_data:function(al,ao,aj,ax){if(ax>aj-ao){ax=aj-ao}var at=Math.floor((aj-ao)/ax),aw=[],ak=0;var am=0,an=0,ar,av=0,ap=[],au,aq;var ai=function(aA,az,aB,ay){aA[0]=az+aB*ay;aA[1]=az+(aB+1)*ay};while(av<ax&&am!==al.length){var ah=false;for(;av<ax&&!ah;av++){ai(ap,ao,av,at);for(an=am;an<al.length;an++){ar=al[an].slice(1,3);if(is_overlap(ar,ap)){ah=true;break}}if(ah){break}}data_start_index=an;aw[aw.length]=au=[ap[0],0];for(;an<al.length;an++){ar=al[an].slice(1,3);if(is_overlap(ar,ap)){au[1]++}else{break}}if(au[1]>ak){ak=au[1]}av++}return{max:ak,delta:at,data:aw}},draw_tile:function(aw,az,aE,aI,ar,ak){var aB=this,am=aB._get_tile_bounds(aI,aE),aL=am[0],ai=am[1],ay=ai-aL,aC=Math.ceil(ay*ar),aP=25,al=this.left_offset,ax,an;if(az==="Auto"){if(aw.dataset_type==="summary_tree"){az=aw.dataset_type}else{if(aw.extra_info==="no_detail"){az="no_detail"}else{var aO=aw.data;if(this.view.high-this.view.low>J){az="Squish"}else{az="Pack"}}}this.update_auto_mode(az)}if(az==="summary_tree"||az==="Histogram"){an=this.summary_draw_height;this.container_div.find(".yaxislabel").remove();var ah=$("<div />").addClass("yaxislabel");ah.text(aw.max);ah.css({position:"absolute",top:"24px",left:"10px",color:this.prefs.label_color});ah.prependTo(this.container_div);var aj=this.view.canvas_manager.new_canvas();aj.width=aC+al;aj.height=an+U;if(aw.dataset_type!="summary_tree"){var at=this.get_summary_tree_data(aw.data,aL,ai,200);if(aw.max){at.max=aw.max}aw=at}var aM=new M.SummaryTreePainter(aw,aL,ai,this.prefs);var aD=aj.getContext("2d");aD.translate(al,U);aM.draw(aD,aC,an);return new l(aB,aI,aE,aj,aw.data,aw.max)}var ax,ap=1;if(az==="no_detail"||az==="Squish"||az==="Pack"){ap=this.incremental_slots(ar,aw.data,az);ax=this.inc_slots[ar].slots}var aq=[];if(aw.data){var au=this.filters_manager.filters;for(var aF=0,aH=aw.data.length;aF<aH;aF++){var ao=aw.data[aF];var aG=false;var av;for(var aK=0,aN=au.length;aK<aN;aK++){av=au[aK];av.update_attrs(ao);if(!av.keep(ao)){aG=true;break}}if(!aG){aq.push(ao)}}}var aA=(this.filters_manager.alpha_filter?new P(this.filters_manager.alpha_filter):null);var aM=new (this.painter)(aq,aL,ai,this.prefs,az,aA,ak);var an=Math.max(af,aM.get_required_height(ap));var aj=this.view.canvas_manager.new_canvas();var aJ=null;aj.width=aC+al;aj.height=an;var aD=aj.getContext("2d");aD.fillStyle=this.prefs.block_color;aD.font=aD.canvas.manager.default_font;aD.textAlign="right";this.container_div.find(".yaxislabel").remove();if(aw.data){aD.translate(al,0);aJ=aM.draw(aD,aC,an,ax);aJ.translation=-al}return new Q(aB,aI,aE,aj,aw.data,az,aw.message,aJ)}});var V=function(al,aj,ai,an,ah,ak,am){e.call(this,al,aj,ai,an,ah,ak,am);this.painter=M.VariantPainter};q(V.prototype,r.prototype,N.prototype,e.prototype);var Y=function(al,aj,ai,an,ah,ak,am){e.call(this,al,aj,ai,an,ah,ak,am);this.config=new G({track:this,params:[{key:"name",label:"Name",type:"text",default_value:al},{key:"block_color",label:"Block color",type:"color",default_value:"#444"},{key:"label_color",label:"Label color",type:"color",default_value:"black"},{key:"show_insertions",label:"Show insertions",type:"bool",default_value:false},{key:"show_differences",label:"Show differences only",type:"bool",default_value:true},{key:"show_counts",label:"Show summary counts",type:"bool",default_value:true},{key:"mode",type:"string",default_value:this.mode,hidden:true},],saved_values:ak,onchange:function(){this.track.set_name(this.track.prefs.name);this.track.tile_cache.clear();this.track.request_draw()}});this.prefs=this.config.values;this.painter=M.ReadPainter;this.make_name_popup_menu()};q(Y.prototype,r.prototype,N.prototype,e.prototype);var W=function(al,aj,ai,an,ah,ak,am){e.call(this,al,aj,ai,an,ah,ak,am,{});this.data_url=raw_data_url;this.data_query_wait=1000;this.dataset_check_url=dataset_state_url};q(W.prototype,r.prototype,N.prototype,e.prototype,{predraw_init:function(){var ai=this;var ah=function(){if(ai.data_manager.size()===0){setTimeout(ah,300)}else{ai.data_url=default_data_url;ai.data_query_wait=L;ai.dataset_state_url=converted_datasets_state_url;$.getJSON(ai.dataset_state_url,{dataset_id:ai.dataset_id,hda_ldda:ai.hda_ldda},function(aj){})}};ah()}});Z.View=ae;Z.DrawableGroup=R;Z.LineTrack=k;Z.FeatureTrack=e;Z.ReadTrack=Y};var slotting_module=function(c,b){var e=c("class").extend;var d=2,a=5;b.FeatureSlotter=function(j,h,f,g){this.slots={};this.start_end_dct={};this.w_scale=j;this.include_label=h;this.max_rows=f;this.measureText=g};e(b.FeatureSlotter.prototype,{slot_features:function(m){var p=this.w_scale,s=this.slots,h=this.start_end_dct,y=[],A=[],n=0,z=this.max_rows;for(var w=0,x=m.length;w<x;w++){var l=m[w],o=l[0];if(s[o]!==undefined){n=Math.max(n,s[o]);A.push(s[o])}else{y.push(w)}}var q=function(G,H){for(var F=0;F<=z;F++){var D=false,I=h[F];if(I!==undefined){for(var C=0,E=I.length;C<E;C++){var B=I[C];if(H>B[0]&&G<B[1]){D=true;break}}}if(!D){return F}}return -1};for(var w=0,x=y.length;w<x;w++){var l=m[y[w]],o=l[0],u=l[1],f=l[2],r=l[3],g=Math.floor(u*p),k=Math.ceil(f*p),v=this.measureText(r).width,j;if(r!==undefined&&this.include_label){v+=(d+a);if(g-v>=0){g-=v;j="left"}else{k+=v;j="right"}}var t=q(g,k);if(t>=0){if(h[t]===undefined){h[t]=[]}h[t].push([g,k]);s[o]=t;n=Math.max(n,t)}else{}}return n+1}})};var painters_module=function(k,y){var v=k("class").extend;var q=function(J,B,H,A,G,E){if(E===undefined){E=4}var D=A-B;var C=G-H;var F=Math.floor(Math.sqrt(D*D+C*C)/E);var K=D/F;var I=C/F;var z;for(z=0;z<F;z++,B+=K,H+=I){if(z%2!==0){continue}J.fillRect(B,H,E,1)}};var r=function(B,A,z,E){var D=A-E/2,C=A+E/2,F=z-Math.sqrt(E*3/2);B.beginPath();B.moveTo(D,F);B.lineTo(C,F);B.lineTo(A,z);B.lineTo(D,F);B.strokeStyle=this.fillStyle;B.fill();B.stroke();B.closePath()};var g=function(z){this.default_alpha=(z?z:1)};g.prototype.gen_alpha=function(z){return this.default_alpha};var n=function(B,D,z,A,C){this.data=B;this.view_start=D;this.view_end=z;this.prefs=v({},this.default_prefs,A);this.mode=C};n.prototype.default_prefs={};var w=function(B,D,z,A,C){n.call(this,B,D,z,A,C)};w.prototype.default_prefs={show_counts:false};w.prototype.draw=function(M,z,L){var E=this.view_start,O=this.view_end-this.view_start,N=z/O;var J=this.data.data,I=this.data.delta,G=this.data.max,B=L;delta_x_px=Math.ceil(I*N);M.save();for(var C=0,D=J.length;C<D;C++){var H=Math.floor((J[C][0]-E)*N);var F=J[C][1];if(!F){continue}var K=F/G*L;if(F!==0&&K<1){K=1}M.fillStyle=this.prefs.block_color;M.fillRect(H,B-K,delta_x_px,K);var A=4;if(this.prefs.show_counts&&(M.measureText(F).width+A)<delta_x_px){M.fillStyle=this.prefs.label_color;M.textAlign="center";M.fillText(F,H+(delta_x_px/2),10)}}M.restore()};var c=function(z,D,F,G,B){n.call(this,z,D,F,G,B);if(this.prefs.min_value===undefined){var H=Infinity;for(var A=0,C=this.data.length;A<C;A++){H=Math.min(H,this.data[A][1])}this.prefs.min_value=H}if(this.prefs.max_value===undefined){var E=-Infinity;for(var A=0,C=this.data.length;A<C;A++){E=Math.max(E,this.data[A][1])}this.prefs.max_value=E}};c.prototype.default_prefs={min_value:undefined,max_value:undefined,mode:"Histogram",color:"#000",overflow_color:"#F66"};c.prototype.draw=function(N,M,K){var F=false,H=this.prefs.min_value,D=this.prefs.max_value,J=D-H,z=K,A=this.view_start,L=this.view_end-this.view_start,B=M/L,I=this.mode,T=this.data;N.save();var U=Math.round(K+H/J*K);if(I!=="Intensity"){N.fillStyle="#aaa";N.fillRect(0,U,M,1)}N.beginPath();var R,E,C;if(T.length>1){C=Math.ceil((T[1][0]-T[0][0])*B)}else{C=10}for(var O=0,P=T.length;O<P;O++){N.fillStyle=this.prefs.color;R=Math.round((T[O][0]-A)*B);E=T[O][1];var Q=false,G=false;if(E===null){if(F&&I==="Filled"){N.lineTo(R,z)}F=false;continue}if(E<H){G=true;E=H}else{if(E>D){Q=true;E=D}}if(I==="Histogram"){E=Math.round(E/J*z);N.fillRect(R,U,C,-E)}else{if(I==="Intensity"){E=255-Math.floor((E-H)/J*255);N.fillStyle="rgb("+E+","+E+","+E+")";N.fillRect(R,0,C,z)}else{E=Math.round(z-(E-H)/J*z);if(F){N.lineTo(R,E)}else{F=true;if(I==="Filled"){N.moveTo(R,z);N.lineTo(R,E)}else{N.moveTo(R,E)}}}}N.fillStyle=this.prefs.overflow_color;if(Q||G){var S;if(I==="Histogram"||I==="Intensity"){S=C}else{R-=2;S=4}if(Q){N.fillRect(R,0,S,3)}if(G){N.fillRect(R,z-3,S,3)}}N.fillStyle=this.prefs.color}if(I==="Filled"){if(F){N.lineTo(R,U);N.lineTo(0,U)}N.fill()}else{N.stroke()}N.restore()};var o=function(z){this.feature_positions={};this.slot_height=z;this.translation=0};o.prototype.map_feature_data=function(A,C,z,B){if(!this.feature_positions[C]){this.feature_positions[C]=[]}this.feature_positions[C].push({data:A,x_start:z,x_end:B})};o.prototype.get_feature_data=function(z,D){var C=Math.floor(D/this.slot_height),B;if(!this.feature_positions[C]){return null}z+=this.translation;for(var A=0;A<this.feature_positions[C].length;A++){B=this.feature_positions[C][A];if(z>=B.x_start&&z<=B.x_end){return B.data}}};var p=function(C,E,z,B,D,A){n.call(this,C,E,z,B,D);this.alpha_generator=(A?A:new g())};p.prototype.default_prefs={block_color:"#FFF",connector_color:"#FFF"};v(p.prototype,{get_required_height:function(A){var z=y_scale=this.get_row_height(),B=this.mode;if(B==="no_detail"||B==="Squish"||B==="Pack"){z=A*y_scale}return z+Math.max(Math.round(y_scale/2),5)},draw:function(L,J,H,G){var R=this.data,E=this.view_start,N=this.view_end;L.save();L.fillStyle=this.prefs.block_color;L.textAlign="right";var I=this.view_end-this.view_start,F=J/I,M=this.get_row_height(),Q=new o(M),C;for(var O=0,P=R.length;O<P;O++){var B=R[O],D=B[0],K=B[1],z=B[2],A=(G&&G[D]!==undefined?G[D]:null);if((K<N&&z>E)&&(this.mode=="Dense"||A!==null)){C=this.draw_element(L,this.mode,B,A,E,N,F,M,J);Q.map_feature_data(B,A,C[0],C[1])}}L.restore();return Q},draw_element:function(F,B,H,D,C,E,G,A,z){console.log("WARNING: Unimplemented function.");return[0,0]}});var d=10,j=3,m=5,x=10,f=1,t=3,e=3,a=9,l=2,h="#ccc";var s=function(C,E,z,B,D,A){p.call(this,C,E,z,B,D,A)};v(s.prototype,p.prototype,{get_row_height:function(){var A=this.mode,z;if(A==="Dense"){z=d}else{if(A==="no_detail"){z=j}else{if(A==="Squish"){z=m}else{z=x}}}return z},draw_element:function(L,E,T,G,N,ae,ai,ak,z){var Q=T[0],ag=T[1],Y=T[2],O=T[3],Z=Math.floor(Math.max(0,(ag-N)*ai)),M=Math.ceil(Math.min(z,Math.max(0,(Y-N)*ai))),X=Z,aj=M,W=(E==="Dense"?0:(0+G))*ak,K,ac,P=null,al=null,C=this.prefs.block_color,ab=this.prefs.label_color;L.globalAlpha=this.alpha_generator.gen_alpha(T);if(E=="Dense"){G=1}if(E==="no_detail"){L.fillStyle=C;L.fillRect(Z,W+5,M-Z,f)}else{var J=T[4],V=T[5],aa=T[6],D=T[7];if(V&&aa){P=Math.floor(Math.max(0,(V-N)*ai));al=Math.ceil(Math.min(z,Math.max(0,(aa-N)*ai)))}var ah,R;if(E==="Squish"||E==="Dense"){ah=1;R=e}else{ah=5;R=a}if(!D){if(T.strand){if(T.strand==="+"){L.fillStyle=L.canvas.manager.get_pattern("right_strand_inv")}else{if(T.strand==="-"){L.fillStyle=L.canvas.manager.get_pattern("left_strand_inv")}}}else{L.fillStyle=C}L.fillRect(Z,W,M-Z,R)}else{var I,S;if(E==="Squish"||E==="Dense"){L.fillStyle=h;I=W+Math.floor(e/2)+1;S=1}else{if(J){var I=W;var S=R;if(J==="+"){L.fillStyle=L.canvas.manager.get_pattern("right_strand")}else{if(J==="-"){L.fillStyle=L.canvas.manager.get_pattern("left_strand")}}}else{L.fillStyle=h;I+=(e/2)+1;S=1}}L.fillRect(Z,I,M-Z,S);for(var af=0,B=D.length;af<B;af++){var F=D[af],A=Math.floor(Math.max(0,(F[0]-N)*ai)),U=Math.ceil(Math.min(z,Math.max((F[1]-N)*ai)));if(A>U){continue}L.fillStyle=C;L.fillRect(A,W+(R-ah)/2+1,U-A,ah);if(P!==undefined&&aa>V&&!(A>al||U<P)){var ad=Math.max(A,P),H=Math.min(U,al);L.fillRect(ad,W+1,H-ad,R);if(D.length==1&&E=="Pack"){if(J==="+"){L.fillStyle=L.canvas.manager.get_pattern("right_strand_inv")}else{if(J==="-"){L.fillStyle=L.canvas.manager.get_pattern("left_strand_inv")}}if(ad+14<H){ad+=2;H-=2}L.fillRect(ad,W+1,H-ad,R)}}}}if(E==="Pack"&&ag>N){L.fillStyle=ab;if(N===0&&Z-L.measureText(O).width<0){L.textAlign="left";L.fillText(O,M+l,W+8);aj+=L.measureText(O).width+l}else{L.textAlign="right";L.fillText(O,Z-l,W+8);X-=L.measureText(O).width+l}}}L.globalAlpha=1;return[X,aj]}});var b=function(C,E,z,B,D,A){p.call(this,C,E,z,B,D,A)};v(b.prototype,p.prototype,{draw_element:function(S,N,H,D,V,B,K,T,Q){var H=data[i],J=H[0],R=H[1],C=H[2],M=H[3],F=Math.floor(Math.max(0,(R-V)*K)),I=Math.ceil(Math.min(Q,Math.max(0,(C-V)*K))),E=(N==="Dense"?0:(0+D))*T,z,W,A=null,L=null;if(no_label){S.fillStyle=block_color;S.fillRect(F+left_offset,E+5,I-F,1)}else{var U=H[4],P=H[5],G=H[6];z=9;W=1;S.fillRect(F+left_offset,E,I-F,z);if(N!=="Dense"&&M!==undefined&&R>V){S.fillStyle=label_color;if(V===0&&F-S.measureText(M).width<0){S.textAlign="left";S.fillText(M,I+2+left_offset,E+8)}else{S.textAlign="right";S.fillText(M,F-2+left_offset,E+8)}S.fillStyle=block_color}var O=U+" / "+P;if(R>V&&S.measureText(O).width<(I-F)){S.fillStyle="white";S.textAlign="center";S.fillText(O,left_offset+F+(I-F)/2,E+8);S.fillStyle=block_color}}return[F,I]}});var u=function(D,F,z,C,E,A,B){p.call(this,D,F,z,C,E,A);this.ref_seq=B};u.prototype.default_prefs=v({},p.prototype.default_prefs,{show_insertions:false});v(u.prototype,p.prototype,{get_row_height:function(){var z,A=this.mode;if(A==="Dense"){z=d}else{if(A==="Squish"){z=m}else{z=x;if(this.prefs.show_insertions){z*=2}}}return z},draw_read:function(V,Q,M,aa,B,U,J,G,F){V.textAlign="center";var T=this,A=[aa,B],P=0,W=0,S=0;ref_seq=this.ref_seq,char_width_px=V.canvas.manager.char_width_px;var af=[];if((Q==="Pack"||this.mode==="Auto")&&G!==undefined&&M>char_width_px){S=Math.round(M/2)}if(!J){J=[[0,G.length]]}for(var N=0,Y=J.length;N<Y;N++){var K=J[N],C="MIDNSHP=X"[K[0]],O=K[1];if(C==="H"||C==="S"){P-=O}var H=U+P,ae=Math.floor(Math.max(0,(H-aa)*M)),I=Math.floor(Math.max(0,(H+O-aa)*M));if(ae===I){I+=1}switch(C){case"H":break;case"S":case"M":case"=":if(is_overlap([H,H+O],A)){var R=G.slice(W,W+O);if(S>0){V.fillStyle=this.prefs.block_color;V.fillRect(ae-S,F+1,I-ae,9);V.fillStyle=h;for(var ac=0,z=R.length;ac<z;ac++){if(this.prefs.show_differences&&ref_seq){var L=ref_seq[H-aa+ac];if(!L||L.toLowerCase()===R[ac].toLowerCase()){continue}}if(H+ac>=aa&&H+ac<=B){var ad=Math.floor(Math.max(0,(H+ac-aa)*M));V.fillText(R[ac],ad,F+9)}}}else{V.fillStyle=this.prefs.block_color;V.fillRect(ae,F+4,I-ae,e)}}W+=O;P+=O;break;case"N":V.fillStyle=h;V.fillRect(ae-S,F+5,I-ae,1);P+=O;break;case"D":V.fillStyle="red";V.fillRect(ae-S,F+4,I-ae,3);P+=O;break;case"P":break;case"I":var Z=ae-S;if(is_overlap([H,H+O],A)){var R=G.slice(W,W+O);if(this.prefs.show_insertions){var E=ae-(I-ae)/2;if((Q==="Pack"||this.mode==="Auto")&&G!==undefined&&M>char_width_px){V.fillStyle="yellow";V.fillRect(E-S,F-9,I-ae,9);af[af.length]={type:"triangle",data:[Z,F+4,5]};V.fillStyle=h;switch(seq_tile_overlap){case (OVERLAP_START):R=R.slice(aa-H);break;case (OVERLAP_END):R=R.slice(0,H-B);break;case (CONTAINED_BY):break;case (CONTAINS):R=R.slice(aa-H,H-B);break}for(var ac=0,z=R.length;ac<z;ac++){var ad=Math.floor(Math.max(0,(H+ac-aa)*M));V.fillText(R[ac],ad-(I-ae)/2,F)}}else{V.fillStyle="yellow";V.fillRect(E,F+(this.mode!=="Dense"?2:5),I-ae,(Q!=="Dense"?e:t))}}else{if((Q==="Pack"||this.mode==="Auto")&&G!==undefined&&M>char_width_px){af[af.length]={type:"text",data:[R.length,Z,F+9]}}else{}}}W+=O;break;case"X":W+=O;break}}V.fillStyle="yellow";var ab,D,ag;for(var X=0;X<af.length;X++){ab=af[X];D=ab.type;ag=ab.data;if(D==="text"){V.save();V.font="bold "+V.font;V.fillText(ag[0],ag[1],ag[2]);V.restore()}else{if(D=="triangle"){r(V,ag[0],ag[1],ag[2])}}}},draw_element:function(S,N,F,C,V,A,J,T,Q){var I=F[0],R=F[1],B=F[2],K=F[3],E=Math.floor(Math.max(0,(R-V)*J)),G=Math.ceil(Math.min(Q,Math.max(0,(B-V)*J))),D=(N==="Dense"?0:(0+C))*T,W=this.prefs.block_color,H=this.prefs.label_color,P=0;if((N==="Pack"||this.mode==="Auto")&&J>S.canvas.manager.char_width_px){var P=Math.round(J/2)}S.fillStyle=W;if(F[5] instanceof Array){var O=Math.floor(Math.max(0,(F[4][0]-V)*J)),M=Math.ceil(Math.min(Q,Math.max(0,(F[4][1]-V)*J))),L=Math.floor(Math.max(0,(F[5][0]-V)*J)),z=Math.ceil(Math.min(Q,Math.max(0,(F[5][1]-V)*J)));if(F[4][1]>=V&&F[4][0]<=A&&F[4][2]){this.draw_read(S,N,J,V,A,F[4][0],F[4][2],F[4][3],D)}if(F[5][1]>=V&&F[5][0]<=A&&F[5][2]){this.draw_read(S,N,J,V,A,F[5][0],F[5][2],F[5][3],D)}if(L>M){S.fillStyle=h;q(S,M-P,D+5,L-P,D+5)}}else{S.fillStyle=W;this.draw_read(S,N,J,V,A,R,F[4],F[5],D)}if(N==="Pack"&&R>V){S.fillStyle=this.prefs.label_color;var U=1;if(U===0&&E-S.measureText(K).width<0){S.textAlign="left";S.fillText(K,G+l-P,D+8)}else{S.textAlign="right";S.fillText(K,E-l-P,D+8)}S.fillStyle=W}return[0,0]}});y.AlphaGenerator=g;y.SummaryTreePainter=w;y.LinePainter=c;y.LinkedFeaturePainter=s;y.ReadPainter=u;y.VariantPainter=b};(function(d){var c={};var b=function(e){return c[e]};var a=function(f,g){var e={};g(b,e);c[f]=e};a("class",class_module);a("slotting",slotting_module);a("painters",painters_module);a("trackster",trackster_module);for(key in c.trackster){d[key]=c.trackster[key]}})(window); \ No newline at end of file --- a/static/scripts/packed/trackster_ui.js Wed Oct 12 11:19:42 2011 -0400 +++ b/static/scripts/packed/trackster_ui.js Wed Oct 12 11:19:58 2011 -0400 @@ -1,1 +1,1 @@ -var add_bookmark=function(b,a){var g=$("#bookmarks-container"),d=$("<div/>").addClass("bookmark").appendTo(g),c=$("<div/>").addClass("delete-icon-container").appendTo(d).click(function(){d.slideUp("fast");d.remove();view.has_changes=true;return false}),e=$("<a href=''/>").addClass("icon-button delete").appendTo(c),f=$("<div/>").addClass("position").appendTo(d),h=$("<a href=''/>").text(b).appendTo(f).click(function(){view.go_to(b);return false});annotation_div=get_editable_text_elt(a,true).addClass("annotation").appendTo(d);view.has_changes=true;return d};var addable_objects={LineTrack:LineTrack,FeatureTrack:FeatureTrack,ReadTrack:ReadTrack,DrawableGroup:DrawableGroup};var track_from_dict=function(c,b){var a=new addable_objects[c.track_type](c.name,view,b,c.hda_ldda,c.dataset_id,c.prefs,c.filters,c.tool);if(c.mode){a.change_mode(c.mode)}return a};var drawable_collection_from_dict=function(f,a){var e=new addable_objects[f.obj_type](f.name,view,a,f.prefs,view.viewport_container,view);for(var d=0;d<f.drawables.length;d++){var b=f.drawables[d],c;if(b.track_type){c=track_from_dict(b,e)}else{c=drawable_collection_from_dict(b)}e.add_drawable(c);e.content_div.append(c.container_div)}return e};var drawable_from_dict=function(b,a){return(b.track_type?track_from_dict(b,a):drawable_collection_from_dict(b,a))};var create_visualization=function(b,e,g,c,a,d,f){view=new View(b,e,g,c);view.editor=true;$.when(view.load_chroms_deferred).then(function(){if(a){var k=a.chrom,p=a.start,h=a.end,m=a.overview;if(k&&(p!==undefined)&&h){view.change_chrom(k,p,h)}}if(d){var o;for(var j=0;j<d.length;j++){o=d[j];view.add_drawable(drawable_from_dict(o,view))}}var n;for(var j=0;j<view.tracks.length;j++){if(view.tracks[j].name==m){view.set_overview(view.tracks[j]);break}}if(f){var l;for(var j=0;j<f.length;j++){l=f[j];add_bookmark(l.position,l.annotation)}}view.has_changes=false});return view};var init_keyboard_nav=function(a){$(document).keydown(function(b){if($(b.srcElement).is(":input")){return}switch(b.which){case 37:a.move_fraction(0.25);break;case 38:var c=Math.round(a.viewport_container.height()/15);a.viewport_container.scrollTo("-="+c+"px");break;case 39:a.move_fraction(-0.25);break;case 40:var c=Math.round(a.viewport_container.height()/15);a.viewport_container.scrollTo("+="+c+"px");break}})}; \ No newline at end of file +var add_bookmark=function(b,a){var g=$("#bookmarks-container"),d=$("<div/>").addClass("bookmark").appendTo(g),c=$("<div/>").addClass("delete-icon-container").appendTo(d).click(function(){d.slideUp("fast");d.remove();view.has_changes=true;return false}),e=$("<a href=''/>").addClass("icon-button delete").appendTo(c),f=$("<div/>").addClass("position").appendTo(d),h=$("<a href=''/>").text(b).appendTo(f).click(function(){view.go_to(b);return false});annotation_div=get_editable_text_elt(a,true).addClass("annotation").appendTo(d);view.has_changes=true;return d};var addable_objects={LineTrack:LineTrack,FeatureTrack:FeatureTrack,ReadTrack:ReadTrack,DrawableGroup:DrawableGroup};var track_from_dict=function(c,b){var a=new addable_objects[c.track_type](c.name,view,b,c.hda_ldda,c.dataset_id,c.prefs,c.filters,c.tool);if(c.mode){a.change_mode(c.mode)}return a};var drawable_collection_from_dict=function(f,a){var e=new addable_objects[f.obj_type](f.name,view,a,f.prefs,view.viewport_container,view);for(var d=0;d<f.drawables.length;d++){var b=f.drawables[d],c;if(b.track_type){c=track_from_dict(b,e)}else{c=drawable_collection_from_dict(b)}e.add_drawable(c);e.content_div.append(c.container_div)}return e};var drawable_from_dict=function(b,a){return(b.track_type?track_from_dict(b,a):drawable_collection_from_dict(b,a))};var create_visualization=function(b,e,g,c,a,d,f){view=new View(b,e,g,c);view.editor=true;$.when(view.load_chroms_deferred).then(function(){if(a){var k=a.chrom,p=a.start,h=a.end,m=a.overview;if(k&&(p!==undefined)&&h){view.change_chrom(k,p,h)}}if(d){var o;for(var j=0;j<d.length;j++){o=d[j];view.add_drawable(drawable_from_dict(o,view))}}var n;for(var j=0;j<view.drawables.length;j++){if(view.drawables[j].name==m){view.set_overview(view.drawables[j]);break}}if(f){var l;for(var j=0;j<f.length;j++){l=f[j];add_bookmark(l.position,l.annotation)}}view.has_changes=false});return view};var init_keyboard_nav=function(a){$(document).keydown(function(b){if($(b.srcElement).is(":input")){return}switch(b.which){case 37:a.move_fraction(0.25);break;case 38:var c=Math.round(a.viewport_container.height()/15);a.viewport_container.scrollTo("-="+c+"px");break;case 39:a.move_fraction(-0.25);break;case 40:var c=Math.round(a.viewport_container.height()/15);a.viewport_container.scrollTo("+="+c+"px");break}})}; \ No newline at end of file --- a/static/scripts/trackster.js Wed Oct 12 11:19:42 2011 -0400 +++ b/static/scripts/trackster.js Wed Oct 12 11:19:58 2011 -0400 @@ -309,10 +309,19 @@ CACHED_TILES_FEATURE = 10, CACHED_TILES_LINE = 5, CACHED_DATA = 5; - -function round_1000(num) { - return Math.round(num * 1000) / 1000; -}; + +/** + * Round a number to a given number of decimal places. + */ +function round(num, places) { + // Default rounding is to integer. + if (!places) { + places = 0; + } + + var val = Math.pow(10, places); + return Math.round(num * val) / val; +} /** * Generic cache that handles key/value pairs. @@ -799,8 +808,6 @@ this.vis_id = vis_id; this.dbkey = dbkey; this.title = title; - // Alias tracks to point at drawables. TODO: changes tracks to 'drawables' or something similar. - this.tracks = this.drawables; this.label_tracks = []; this.tracks_to_be_redrawn = []; this.max_low = 0; @@ -1097,10 +1104,10 @@ view.reset(); view.request_redraw(true); - for (var track_id = 0, len = view.tracks.length; track_id < len; track_id++) { - var track = view.tracks[track_id]; - if (track.init) { - track.init(); + for (var i = 0, len = view.drawables.length; i < len; i++) { + var drawable = view.drawables[i]; + if (drawable.init) { + drawable.init(); } } } @@ -1192,7 +1199,7 @@ var view = this, // Either redrawing a single track or all view's tracks. - track_list = (track ? [track] : view.tracks), + track_list = (track ? [track] : view.drawables), track_index; // Add/update tracks in track list to redraw list. @@ -1629,7 +1636,7 @@ // No element to filter on. return true; } - var val = parseFloat(element[this.index]); + var val = element[this.index]; return (isNaN(val) || (val >= this.low && val <= this.high)); }, /** @@ -2085,7 +2092,8 @@ /** * Tiles drawn by tracks. */ -var Tile = function(index, resolution, canvas, data) { +var Tile = function(track, index, resolution, canvas, data) { + this.track = track; this.index = index; this.low = index * DENSITY * resolution; this.high = (index + 1) * DENSITY * resolution; @@ -2096,26 +2104,114 @@ this.stale = false; }; -var SummaryTreeTile = function(index, resolution, canvas, data, max_val) { - Tile.call(this, index, resolution, canvas, data); +/** + * Perform pre-display actions. + */ +Tile.prototype.predisplay_actions = function() {}; + +var SummaryTreeTile = function(track, index, resolution, canvas, data, max_val) { + Tile.call(this, track, index, resolution, canvas, data); this.max_val = max_val; }; +extend(SummaryTreeTile.prototype, Tile.prototype); -var FeatureTrackTile = function(index, resolution, canvas, data, message, feature_mapper) { - Tile.call(this, index, resolution, canvas, data); +var FeatureTrackTile = function(track, index, resolution, canvas, data, mode, message, feature_mapper) { + Tile.call(this, track, index, resolution, canvas, data); + this.mode = mode; this.message = message; this.feature_mapper = feature_mapper; +}; +extend(FeatureTrackTile.prototype, Tile.prototype); + +/** + * Sets up support for popups. + */ +FeatureTrackTile.prototype.predisplay_actions = function() { + // + // Add support for popups. + // + var tile = this, + popups = {}; + + // Only show popups in Pack mode. + if (tile.mode !== "Pack") { return; } - // - // Set up display of feature data on mouseover. - // - var tile = this; $(this.canvas).mousemove(function (e) { - var feature_data = tile.feature_mapper.get_feature_data(e.offsetX, e.offsetY); - - // TODO: show popup with feature's information. + // Get feature data for position. + var + this_offset = $(this).offset(), + offsetX = e.pageX - this_offset.left, + offsetY = e.pageY - this_offset.top, + feature_data = tile.feature_mapper.get_feature_data(offsetX, offsetY), + feature_uid = (feature_data ? feature_data[0] : null); + // Hide visible popup if not over a feature or over a different feature. + $(this).siblings(".feature-popup").each(function() { + if ( !feature_uid || + $(this).attr("id") !== feature_uid.toString() ) { + $(this).remove(); + } + }); + + if (feature_data) { + // Get or create popup. + var popup = popups[feature_uid]; + if (!popup) { + // Create feature's popup element. + var + feature_uid = feature_data[0], + feature_dict = { + name: feature_data[3], + start: feature_data[1], + end: feature_data[2], + strand: feature_data[4] + }, + filters = tile.track.filters_manager.filters, + filter; + + // Add filter values to feature dict. + for (var i = 0; i < filters.length; i++) { + filter = filters[i]; + feature_dict[filter.name] = feature_data[filter.index]; + } + + // Build popup. + + var popup = $("<div/>").attr("id", feature_uid).addClass("feature-popup"), + key, value, + table = $("<table/>").appendTo(popup), row; + for (key in feature_dict) { + value = feature_dict[key]; + row = $("<tr/>").appendTo(table); + $("<th/>").appendTo(row).text(key); + $("<td/>").attr("align", "left").appendTo(row) + .text(typeof(value) == 'number' ? round(value, 2) : value); + } + popups[feature_uid] = popup; + } + + // Attach popup to canvas's parent. + popup.appendTo($(tile.canvas).parent()); + + // Offsets are within canvas, but popup must be positioned relative to parent element. + // parseInt strips "px" from left, top measurements. +7 so that mouse pointer does not + // overlap popup. + var + popupX = offsetX + parseInt( tile.canvas.css("left") ) + 7, + popupY = offsetY + parseInt( tile.canvas.css("top") ) + 7; + popup.css("left", popupX + "px").css("top", popupY + "px") + } + else if (!e.isPropagationStopped()) { + // Propogate event to other tiles because overlapping tiles prevent mousemove from being + // called on tiles under this tile. + e.stopPropagation(); + $(this).siblings().each(function() { + $(this).trigger(e); + }); + } + }) + .mouseleave(function() { + $(this).siblings(".feature-popup").remove(); }); - }; /** @@ -2223,7 +2319,6 @@ track.container_div.addClass("error"); track.content_div.text(DATA_ERROR); if (result.message) { - var track_id = track.view.tracks.indexOf(track); var error_link = $(" <a href='javascript:void(0);'></a>").text("View error").click(function() { show_modal( "Trackster Error", "<pre>" + result.message + "</pre>", { "Close" : hide_modal } ); }); @@ -2492,7 +2587,7 @@ data = src_ctx.getImageData(0, 0, src_ctx.canvas.width, src_ctx.canvas.height); // Need to undo offsets when placing image data. tgt_ctx.putImageData(data, -track.left_offset, (tile.data.dataset_type === "summary_tree" ? SUMMARY_TREE_TOP_PADDING : 0)); - new_tile = new Tile(-1, resolution, new_canvas); + new_tile = new Tile(track, -1, resolution, new_canvas); overview_tile.resolve(new_tile); }); @@ -2697,6 +2792,8 @@ // // Show tile element. // + + tile.predisplay_actions(); // Position tile element, recalculate left position at display time var range = this.view.high - this.view.low, @@ -2812,7 +2909,7 @@ var c_start = Math.round(c * w_scale); ctx.fillText(seq[c], c_start + track.left_offset, 10); } - return new Tile(tile_index, resolution, canvas, seq); + return new Tile(track, tile_index, resolution, canvas, seq); } this.content_div.css("height", "0px"); } @@ -2851,8 +2948,8 @@ track.set_name(track.prefs.name); track.vertical_range = track.prefs.max_value - track.prefs.min_value; // Update the y-axis - $('#linetrack_' + track.track_id + '_minval').text(track.prefs.min_value); - $('#linetrack_' + track.track_id + '_maxval').text(track.prefs.max_value); + $('#linetrack_' + track.dataset_id + '_minval').text(track.prefs.min_value); + $('#linetrack_' + track.dataset_id + '_maxval').text(track.prefs.max_value); track.tile_cache.clear(); track.request_draw(); } @@ -2899,8 +2996,7 @@ }).appendTo(track.container_div); }, predraw_init: function() { - var track = this, - track_id = track.view.tracks.indexOf(track); + var track = this; track.vertical_range = undefined; return $.getJSON( track.data_url, { stats: true, chrom: track.view.chrom, low: null, high: null, @@ -2911,8 +3007,8 @@ track.prefs.min_value = data.min; track.prefs.max_value = data.max; // Update the config - $('#track_' + track_id + '_minval').val(track.prefs.min_value); - $('#track_' + track_id + '_maxval').val(track.prefs.max_value); + $('#track_' + track.dataset_id + '_minval').val(track.prefs.min_value); + $('#track_' + track.dataset_id + '_maxval').val(track.prefs.max_value); } track.vertical_range = track.prefs.max_value - track.prefs.min_value; track.total_frequency = data.total_frequency; @@ -2920,8 +3016,8 @@ // Draw y-axis labels if necessary track.container_div.find(".yaxislabel").remove(); - var min_label = $("<div />").addClass('yaxislabel').attr("id", 'linetrack_' + track_id + '_minval').text(round_1000(track.prefs.min_value)); - var max_label = $("<div />").addClass('yaxislabel').attr("id", 'linetrack_' + track_id + '_maxval').text(round_1000(track.prefs.max_value)); + var min_label = $("<div />").addClass('yaxislabel').attr("id", 'linetrack_' + track.dataset_id + '_minval').text(round(track.prefs.min_value, 3)); + var max_label = $("<div />").addClass('yaxislabel').attr("id", 'linetrack_' + track.dataset_id + '_maxval').text(round(track.prefs.max_value, 3)); max_label.css({ position: "absolute", top: "24px", left: "10px" }); max_label.prependTo(track.container_div); @@ -2955,7 +3051,7 @@ var painter = new painters.LinePainter(result.data, tile_low, tile_high, this.prefs, mode); painter.draw(ctx, width, height); - return new Tile(tile_index, resolution, canvas, result.data); + return new Tile(this.track, tile_index, resolution, canvas, result.data); } }); @@ -3288,7 +3384,7 @@ // Deal with left_offset by translating. ctx.translate(left_offset, SUMMARY_TREE_TOP_PADDING); painter.draw(ctx, width, required_height); - return new SummaryTreeTile(tile_index, resolution, canvas, result.data, result.max); + return new SummaryTreeTile(track, tile_index, resolution, canvas, result.data, result.max); } // Start dealing with row-by-row tracks @@ -3347,7 +3443,7 @@ feature_mapper.translation = -left_offset; } - return new FeatureTrackTile(tile_index, resolution, canvas, result.data, result.message, feature_mapper); + return new FeatureTrackTile(track, tile_index, resolution, canvas, result.data, mode, result.message, feature_mapper); } }); @@ -3891,7 +3987,7 @@ */ FeaturePositionMapper.prototype.get_feature_data = function(x, y) { // Find slot using Y. - var slot = Math.floor(y/this.slot_height), + var slot = Math.floor( y/this.slot_height ), feature_dict; // May not be over a slot due to padding, margin, etc. --- a/static/scripts/trackster_ui.js Wed Oct 12 11:19:42 2011 -0400 +++ b/static/scripts/trackster_ui.js Wed Oct 12 11:19:58 2011 -0400 @@ -113,9 +113,9 @@ // Set overview. var overview_track; - for (var i = 0; i < view.tracks.length; i++) { - if (view.tracks[i].name == overview_track_name) { - view.set_overview(view.tracks[i]); + for (var i = 0; i < view.drawables.length; i++) { + if (view.drawables[i].name == overview_track_name) { + view.set_overview(view.drawables[i]); break; } } --- a/tool_conf.xml.main Wed Oct 12 11:19:42 2011 -0400 +++ b/tool_conf.xml.main Wed Oct 12 11:19:58 2011 -0400 @@ -45,6 +45,7 @@ <tool file="filters/tailWrapper.xml" /><tool file="filters/trimmer.xml" /><tool file="filters/wc_gnu.xml" /> + <tool file="filters/secure_hash_message_digest.xml" /></section><section name="Convert Formats" id="convert"><tool file="filters/bed2gff.xml" /> Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
Bitbucket