commit/galaxy-central: 8 new changesets
8 new commits in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/commits/e4daf2910a99/ Changeset: e4daf2910a99 User: natefoo Date: 2014-09-15 17:28:18 Summary: Move samples configs to config subdirectory. Affected #: 44 files diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/data_manager_conf.xml.sample --- /dev/null +++ b/config/data_manager_conf.xml.sample @@ -0,0 +1,3 @@ +<?xml version="1.0"?> +<data_managers> +</data_managers> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/datatypes_conf.xml.sample --- /dev/null +++ b/config/datatypes_conf.xml.sample @@ -0,0 +1,333 @@ +<?xml version="1.0"?> +<datatypes> + <registration converters_path="lib/galaxy/datatypes/converters" display_path="display_applications"> + <datatype extension="ab1" type="galaxy.datatypes.binary:Ab1" mimetype="application/octet-stream" display_in_upload="true" description="A binary sequence file in 'ab1' format with a '.ab1' file extension. You must manually select this 'File Format' when uploading the file." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Ab1"/> + <datatype extension="afg" type="galaxy.datatypes.assembly:Amos" display_in_upload="false" /> + <datatype extension="asn1" type="galaxy.datatypes.data:GenericAsn1" mimetype="text/plain" display_in_upload="true" /> + <datatype extension="asn1-binary" type="galaxy.datatypes.binary:GenericAsn1Binary" mimetype="application/octet-stream" display_in_upload="true" /> + <datatype extension="axt" type="galaxy.datatypes.sequence:Axt" display_in_upload="true" description="blastz pairwise alignment format. Each alignment block in an axt file contains three lines: a summary line and 2 sequence lines. Blocks are separated from one another by blank lines. The summary line contains chromosomal position and size information about the alignment. It consists of 9 required fields." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Axt"/> + <datatype extension="fli" type="galaxy.datatypes.tabular:FeatureLocationIndex" display_in_upload="false"/> + <datatype extension="bam" type="galaxy.datatypes.binary:Bam" mimetype="application/octet-stream" display_in_upload="true" description="A binary file compressed in the BGZF format with a '.bam' file extension." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#BAM"> + <converter file="bam_to_bai.xml" target_datatype="bai"/> + <converter file="bam_to_bigwig_converter.xml" target_datatype="bigwig"/> + <display file="ucsc/bam.xml" /> + <display file="ensembl/ensembl_bam.xml" /> + <display file="igv/bam.xml" /> + <display file="igb/bam.xml" /> + </datatype> + <datatype extension="bed" type="galaxy.datatypes.interval:Bed" display_in_upload="true" description="BED format provides a flexible way to define the data lines that are displayed in an annotation track. BED lines have three required columns and nine additional optional columns. The three required columns are chrom, chromStart and chromEnd." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Bed"> + <converter file="bed_to_gff_converter.xml" target_datatype="gff"/> + <converter file="bed_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="bed_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + <converter file="bed_to_fli_converter.xml" target_datatype="fli"/> + <!-- <display file="ucsc/interval_as_bed.xml" /> --> + <display file="igb/bed.xml" /> + </datatype> + <datatype extension="bedgraph" type="galaxy.datatypes.interval:BedGraph" display_in_upload="true"> + <converter file="bedgraph_to_bigwig_converter.xml" target_datatype="bigwig"/> + <display file="igb/bedgraph.xml" /> + </datatype> + <datatype extension="bedstrict" type="galaxy.datatypes.interval:BedStrict" /> + <datatype extension="bed6" type="galaxy.datatypes.interval:Bed6"> + </datatype> + <datatype extension="bed12" type="galaxy.datatypes.interval:Bed12" /> + <datatype extension="len" type="galaxy.datatypes.chrominfo:ChromInfo" display_in_upload="true"> + <converter file="len_to_linecount.xml" target_datatype="linecount" /> + </datatype> + <datatype extension="bigbed" type="galaxy.datatypes.binary:BigBed" mimetype="application/octet-stream" display_in_upload="true"> + <display file="ucsc/bigbed.xml" /> + <display file="igb/bb.xml" /> + </datatype> + <datatype extension="bigwig" type="galaxy.datatypes.binary:BigWig" mimetype="application/octet-stream" display_in_upload="true"> + <display file="ucsc/bigwig.xml" /> + <display file="igb/bigwig.xml" /> + </datatype> + <datatype extension="chrint" type="galaxy.datatypes.interval:ChromatinInteractions" display_in_upload="True"> + <converter file="interval_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="interval_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + </datatype> + <!-- MSI added Datatypes --> + <datatype extension="csv" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="true" /><!-- FIXME: csv is 'tabular'ized data, but not 'tab-delimited'; the class used here is intended for 'tab-delimited' --> + <!-- End MSI added Datatypes --> + <datatype extension="customtrack" type="galaxy.datatypes.interval:CustomTrack"/> + <datatype extension="bowtie_color_index" type="galaxy.datatypes.ngsindex:BowtieColorIndex" mimetype="text/html" display_in_upload="False"/> + <datatype extension="bowtie_base_index" type="galaxy.datatypes.ngsindex:BowtieBaseIndex" mimetype="text/html" display_in_upload="False"/> + <datatype extension="csfasta" type="galaxy.datatypes.sequence:csFasta" display_in_upload="true"/> + <datatype extension="data" type="galaxy.datatypes.data:Data" mimetype="application/octet-stream" max_optional_metadata_filesize="1048576" /> + <datatype extension="data_manager_json" type="galaxy.datatypes.data:Text" mimetype="application/json" subclass="True" display_in_upload="False"/> + <datatype extension="fasta" type="galaxy.datatypes.sequence:Fasta" display_in_upload="true" description="A sequence in FASTA format consists of a single-line description, followed by lines of sequence data. The first character of the description line is a greater-than ('>') symbol in the first column. All lines should be shorter than 80 characters." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Fasta"> + <converter file="fasta_to_tabular_converter.xml" target_datatype="tabular"/> + <converter file="fasta_to_bowtie_base_index_converter.xml" target_datatype="bowtie_base_index"/> + <converter file="fasta_to_bowtie_color_index_converter.xml" target_datatype="bowtie_color_index"/> + <converter file="fasta_to_2bit.xml" target_datatype="twobit"/> + <converter file="fasta_to_len.xml" target_datatype="len"/> + </datatype> + <datatype extension="fastq" type="galaxy.datatypes.sequence:Fastq" display_in_upload="true" description="FASTQ format is a text-based format for storing both a biological sequence (usually nucleotide sequence) and its corresponding quality scores." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Fastq"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqsanger" type="galaxy.datatypes.sequence:FastqSanger" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqsolexa" type="galaxy.datatypes.sequence:FastqSolexa" display_in_upload="true" description="FastqSolexa is the Illumina (Solexa) variant of the Fastq format, which stores sequences and quality scores in a single file." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#FastqSolexa"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqcssanger" type="galaxy.datatypes.sequence:FastqCSSanger" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqillumina" type="galaxy.datatypes.sequence:FastqIllumina" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fqtoc" type="galaxy.datatypes.sequence:SequenceSplitLocations" display_in_upload="true"/> + <datatype extension="eland" type="galaxy.datatypes.tabular:Eland" display_in_upload="true"/> + <datatype extension="elandmulti" type="galaxy.datatypes.tabular:ElandMulti" display_in_upload="true"/> + <datatype extension="genetrack" type="galaxy.datatypes.tracks:GeneTrack"> + <!-- <display file="genetrack.xml" /> --> + </datatype> + <datatype extension="gff" type="galaxy.datatypes.interval:Gff" display_in_upload="true" description="GFF lines have nine required fields that must be tab-separated." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#GFF"> + <converter file="gff_to_bed_converter.xml" target_datatype="bed"/> + <converter file="gff_to_interval_index_converter.xml" target_datatype="interval_index"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + <converter file="gff_to_fli_converter.xml" target_datatype="fli"/> + <display file="ensembl/ensembl_gff.xml" inherit="True"/> + <!-- <display file="gbrowse/gbrowse_gff.xml" inherit="True" /> --> + </datatype> + <datatype extension="gff3" type="galaxy.datatypes.interval:Gff3" display_in_upload="true" description="The GFF3 format addresses the most common extensions to GFF, while preserving backward compatibility with previous formats." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#GFF3"/> + <datatype extension="gif" type="galaxy.datatypes.images:Gif" mimetype="image/gif"/> + <datatype extension="gmaj.zip" type="galaxy.datatypes.images:Gmaj" mimetype="application/zip"/> + <datatype extension="gtf" type="galaxy.datatypes.interval:Gtf" display_in_upload="true"> + <converter file="gff_to_interval_index_converter.xml" target_datatype="interval_index"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + <display file="igb/gtf.xml" /> + </datatype> + <datatype extension="toolshed.gz" type="galaxy.datatypes.binary:Binary" mimetype="multipart/x-gzip" subclass="True" /> + <datatype extension="h5" type="galaxy.datatypes.binary:Binary" mimetype="application/octet-stream" subclass="True" /> + <datatype extension="html" type="galaxy.datatypes.images:Html" mimetype="text/html"/> + <datatype extension="interval" type="galaxy.datatypes.interval:Interval" display_in_upload="true" description="File must start with definition line in the following format (columns may be in any order)." > + <converter file="interval_to_bed_converter.xml" target_datatype="bed"/> + <converter file="interval_to_bedstrict_converter.xml" target_datatype="bedstrict"/> + <converter file="interval_to_bed6_converter.xml" target_datatype="bed6"/> + <converter file="interval_to_bed12_converter.xml" target_datatype="bed12"/> + <converter file="interval_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="interval_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="interval_to_bigwig_converter.xml" target_datatype="bigwig"/> + <!-- <display file="ucsc/interval_as_bed.xml" inherit="True" /> --> + <display file="ensembl/ensembl_interval_as_bed.xml" inherit="True"/> + <display file="gbrowse/gbrowse_interval_as_bed.xml" inherit="True"/> + <display file="rviewer/bed.xml" inherit="True"/> + </datatype> + <datatype extension="picard_interval_list" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="True"> + <converter file="picard_interval_list_to_bed6_converter.xml" target_datatype="bed6"/> + </datatype> + <datatype extension="gatk_interval" type="galaxy.datatypes.data:Text" subclass="True" display_in_upload="True"/> + <datatype extension="gatk_report" type="galaxy.datatypes.data:Text" subclass="True" display_in_upload="True"/> + <datatype extension="gatk_dbsnp" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="True"/> + <datatype extension="gatk_tranche" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="True"/> + <datatype extension="gatk_recal" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="True"/> + <datatype extension="jpg" type="galaxy.datatypes.images:Jpg" mimetype="image/jpeg"/> + <datatype extension="tiff" type="galaxy.datatypes.images:Tiff" mimetype="image/tiff"/> + <datatype extension="bmp" type="galaxy.datatypes.images:Bmp" mimetype="image/bmp"/> + <datatype extension="im" type="galaxy.datatypes.images:Im" mimetype="image/im"/> + <datatype extension="pcd" type="galaxy.datatypes.images:Pcd" mimetype="image/pcd"/> + <datatype extension="pcx" type="galaxy.datatypes.images:Pcx" mimetype="image/pcx"/> + <datatype extension="ppm" type="galaxy.datatypes.images:Ppm" mimetype="image/ppm"/> + <datatype extension="psd" type="galaxy.datatypes.images:Psd" mimetype="image/psd"/> + <datatype extension="xbm" type="galaxy.datatypes.images:Xbm" mimetype="image/xbm"/> + <datatype extension="xpm" type="galaxy.datatypes.images:Xpm" mimetype="image/xpm"/> + <datatype extension="rgb" type="galaxy.datatypes.images:Rgb" mimetype="image/rgb"/> + <datatype extension="pbm" type="galaxy.datatypes.images:Pbm" mimetype="image/pbm"/> + <datatype extension="pgm" type="galaxy.datatypes.images:Pgm" mimetype="image/pgm"/> + <datatype extension="eps" type="galaxy.datatypes.images:Eps" mimetype="image/eps"/> + <datatype extension="rast" type="galaxy.datatypes.images:Rast" mimetype="image/rast"/> + <datatype extension="laj" type="galaxy.datatypes.images:Laj"/> + <datatype extension="lav" type="galaxy.datatypes.sequence:Lav" display_in_upload="true" description="Lav is the primary output format for BLASTZ. The first line of a .lav file begins with #:lav.." /> + <datatype extension="maf" type="galaxy.datatypes.sequence:Maf" display_in_upload="true" description="TBA and multiz multiple alignment format. The first line of a .maf file begins with ##maf. This word is followed by white-space-separated 'variable=value' pairs. There should be no white space surrounding the '='." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#MAF"> + <converter file="maf_to_fasta_converter.xml" target_datatype="fasta"/> + <converter file="maf_to_interval_converter.xml" target_datatype="interval"/> + </datatype> + <datatype extension="mafcustomtrack" type="galaxy.datatypes.sequence:MafCustomTrack"> + <display file="ucsc/maf_customtrack.xml" /> + </datatype> + <datatype extension="encodepeak" type="galaxy.datatypes.interval:ENCODEPeak" display_in_upload="True"> + <converter file="encodepeak_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="encodepeak_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + </datatype> + <datatype extension="pdf" type="galaxy.datatypes.images:Pdf" mimetype="application/pdf"/> + <datatype extension="pileup" type="galaxy.datatypes.tabular:Pileup" display_in_upload="true"> + <converter file="interval_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="interval_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + </datatype> + <datatype extension="png" type="galaxy.datatypes.images:Png" mimetype="image/png"/> + <datatype extension="qual" type="galaxy.datatypes.qualityscore:QualityScore" /> + <datatype extension="qualsolexa" type="galaxy.datatypes.qualityscore:QualityScoreSolexa" display_in_upload="true"/> + <datatype extension="qualillumina" type="galaxy.datatypes.qualityscore:QualityScoreIllumina" display_in_upload="true"/> + <datatype extension="qualsolid" type="galaxy.datatypes.qualityscore:QualityScoreSOLiD" display_in_upload="true"/> + <datatype extension="qual454" type="galaxy.datatypes.qualityscore:QualityScore454" display_in_upload="true"/> + <datatype extension="Roadmaps" type="galaxy.datatypes.assembly:Roadmaps" display_in_upload="false"/> + <datatype extension="sam" type="galaxy.datatypes.tabular:Sam" display_in_upload="true"> + <converter file="sam_to_bam.xml" target_datatype="bam"/> + <converter file="sam_to_bigwig_converter.xml" target_datatype="bigwig"/> + </datatype> + <datatype extension="scf" type="galaxy.datatypes.binary:Scf" mimetype="application/octet-stream" display_in_upload="true" description="A binary sequence file in 'scf' format with a '.scf' file extension. You must manually select this 'File Format' when uploading the file." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Scf"/> + <datatype extension="Sequences" type="galaxy.datatypes.assembly:Sequences" display_in_upload="false"/> + <datatype extension="sff" type="galaxy.datatypes.binary:Sff" mimetype="application/octet-stream" display_in_upload="true" description="A binary file in 'Standard Flowgram Format' with a '.sff' file extension." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Sff"/> + <datatype extension="svg" type="galaxy.datatypes.images:Image" mimetype="image/svg+xml"/> + <datatype extension="taxonomy" type="galaxy.datatypes.tabular:Taxonomy" display_in_upload="true"/> + <datatype extension="tabular" type="galaxy.datatypes.tabular:Tabular" display_in_upload="true" description="Any data in tab delimited format (tabular)." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Tabular_.28tab_delimited.29"/> + <datatype extension="twobit" type="galaxy.datatypes.binary:TwoBit" mimetype="application/octet-stream" display_in_upload="true"/> + <datatype extension="sqlite" type="galaxy.datatypes.binary:SQlite" mimetype="application/octet-stream" display_in_upload="true"/> + <datatype extension="txt" type="galaxy.datatypes.data:Text" display_in_upload="true" description="Any text file." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Plain_text"/> + <datatype extension="linecount" type="galaxy.datatypes.data:LineCount" display_in_upload="false"/> + <datatype extension="memexml" type="galaxy.datatypes.xml:MEMEXml" mimetype="application/xml" display_in_upload="true"/> + <datatype extension="cisml" type="galaxy.datatypes.xml:CisML" mimetype="application/xml" display_in_upload="true"/> + <datatype extension="xml" type="galaxy.datatypes.xml:GenericXml" mimetype="application/xml" display_in_upload="true"/> + <datatype extension="vcf" type="galaxy.datatypes.tabular:Vcf" display_in_upload="true"> + <converter file="vcf_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="vcf_to_vcf_bgzip_converter.xml" target_datatype="vcf_bgzip"/> + <converter file="vcf_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + <display file="ucsc/vcf.xml" /> + <display file="igv/vcf.xml" /> + <display file="rviewer/vcf.xml" inherit="True"/> + </datatype> + <datatype extension="bcf" type="galaxy.datatypes.binary:Bcf" mimetype="application/octet-stream" display_in_upload="True"/> + <datatype extension="velvet" type="galaxy.datatypes.assembly:Velvet" display_in_upload="false"/> + <datatype extension="wig" type="galaxy.datatypes.interval:Wiggle" display_in_upload="true" description="The wiggle format is line-oriented. Wiggle data is preceded by a track definition line, which adds a number of options for controlling the default display of this track." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Wig"> + <converter file="wig_to_bigwig_converter.xml" target_datatype="bigwig"/> + <converter file="wiggle_to_simple_converter.xml" target_datatype="interval"/> + <!-- <display file="gbrowse/gbrowse_wig.xml" /> --> + <display file="igb/wig.xml" /> + </datatype> + <datatype extension="interval_index" type="galaxy.datatypes.binary:Binary" subclass="True" /> + <datatype extension="tabix" type="galaxy.datatypes.binary:Binary" subclass="True" /> + <datatype extension="bgzip" type="galaxy.datatypes.binary:Binary" subclass="True" /> + <datatype extension="vcf_bgzip" type_extension="bgzip" subclass="True" > + <display file="igv/vcf.xml" /> + <converter file="vcf_bgzip_to_tabix_converter.xml" target_datatype="tabix"/> + </datatype> + <!-- Phylogenetic tree datatypes --> + <datatype extension="phyloxml" type="galaxy.datatypes.xml:Phyloxml" display_in_upload="true" /> + <datatype extension="nhx" type="galaxy.datatypes.data:Newick" display_in_upload="true" /> + <datatype extension="nex" type="galaxy.datatypes.data:Nexus" display_in_upload="true" /> + <!-- Start RGenetics Datatypes --> + <datatype extension="affybatch" type="galaxy.datatypes.genetics:Affybatch" display_in_upload="true"/> + <!-- eigenstrat pedigree input file --> + <datatype extension="eigenstratgeno" type="galaxy.datatypes.genetics:Eigenstratgeno"/> + <!-- eigenstrat pca output file for adjusted eigenQTL eg --> + <datatype extension="eigenstratpca" type="galaxy.datatypes.genetics:Eigenstratpca"/> + <datatype extension="eset" type="galaxy.datatypes.genetics:Eset" display_in_upload="true" /> + <!-- fbat/pbat format pedigree (header row of marker names) --> + <datatype extension="fped" type="galaxy.datatypes.genetics:Fped" display_in_upload="true"/> + <!-- phenotype file - fbat format --> + <datatype extension="fphe" type="galaxy.datatypes.genetics:Fphe" display_in_upload="true" mimetype="text/html"/> + <!-- genome graphs ucsc file - first col is always marker then numeric values to plot --> + <datatype extension="gg" type="galaxy.datatypes.genetics:GenomeGraphs"/> + <!-- part of linkage format pedigree --> + <!-- information redundancy (LD) filtered plink pbed --> + <datatype extension="ldindep" type="galaxy.datatypes.genetics:ldIndep" display_in_upload="true"> + </datatype> + <datatype extension="malist" type="galaxy.datatypes.genetics:MAlist" display_in_upload="true"/> + <!-- linkage format pedigree (separate .map file) --> + <datatype extension="lped" type="galaxy.datatypes.genetics:Lped" display_in_upload="true"> + <converter file="lped_to_fped_converter.xml" target_datatype="fped"/> + <converter file="lped_to_pbed_converter.xml" target_datatype="pbed"/> + </datatype> + <!-- plink compressed file - has bed extension unfortunately --> + <datatype extension="pbed" type="galaxy.datatypes.genetics:Pbed" display_in_upload="true"> + <converter file="pbed_to_lped_converter.xml" target_datatype="lped"/> + <converter file="pbed_ldreduced_converter.xml" target_datatype="ldindep"/> + </datatype> + <datatype extension="pheno" type="galaxy.datatypes.genetics:Pheno"/> + <!-- phenotype file - plink format --> + <datatype extension="pphe" type="galaxy.datatypes.genetics:Pphe" display_in_upload="true" mimetype="text/html"/> + <datatype extension="rexpbase" type="galaxy.datatypes.genetics:RexpBase"/> + <datatype extension="rgenetics" type="galaxy.datatypes.genetics:Rgenetics"/> + <datatype extension="snptest" type="galaxy.datatypes.genetics:Snptest" display_in_upload="true"/> + <datatype extension="snpmatrix" type="galaxy.datatypes.genetics:SNPMatrix" display_in_upload="true"/> + <datatype extension="xls" type="galaxy.datatypes.tabular:Tabular"/> + <!-- End RGenetics Datatypes --> + <!-- graph datatypes --> + <datatype extension="xgmml" type="galaxy.datatypes.graph:Xgmml" display_in_upload="true"/> + <datatype extension="sif" type="galaxy.datatypes.graph:Sif" display_in_upload="true"/> + <datatype extension="rdf" type="galaxy.datatypes.graph:Rdf" display_in_upload="true"/> + </registration> + <sniffers> + <!-- + The order in which Galaxy attempts to determine data types is + important because some formats are much more loosely defined + than others. The following list should be the most rigidly + defined format first, followed by next-most rigidly defined, + and so on. + --> + <sniffer type="galaxy.datatypes.tabular:Vcf"/> + <sniffer type="galaxy.datatypes.binary:TwoBit"/> + <sniffer type="galaxy.datatypes.binary:SQlite"/> + <sniffer type="galaxy.datatypes.binary:Bam"/> + <sniffer type="galaxy.datatypes.binary:Sff"/> + <sniffer type="galaxy.datatypes.xml:Phyloxml"/> + <sniffer type="galaxy.datatypes.xml:GenericXml"/> + <sniffer type="galaxy.datatypes.sequence:Maf"/> + <sniffer type="galaxy.datatypes.sequence:Lav"/> + <sniffer type="galaxy.datatypes.sequence:csFasta"/> + <sniffer type="galaxy.datatypes.qualityscore:QualityScoreSOLiD"/> + <sniffer type="galaxy.datatypes.qualityscore:QualityScore454"/> + <sniffer type="galaxy.datatypes.sequence:Fasta"/> + <sniffer type="galaxy.datatypes.sequence:Fastq"/> + <sniffer type="galaxy.datatypes.interval:Wiggle"/> + <sniffer type="galaxy.datatypes.images:Html"/> + <sniffer type="galaxy.datatypes.images:Pdf"/> + <sniffer type="galaxy.datatypes.sequence:Axt"/> + <sniffer type="galaxy.datatypes.interval:Bed"/> + <sniffer type="galaxy.datatypes.interval:CustomTrack"/> + <sniffer type="galaxy.datatypes.interval:Gtf"/> + <sniffer type="galaxy.datatypes.interval:Gff"/> + <sniffer type="galaxy.datatypes.interval:Gff3"/> + <sniffer type="galaxy.datatypes.tabular:Pileup"/> + <sniffer type="galaxy.datatypes.interval:Interval"/> + <sniffer type="galaxy.datatypes.tabular:Sam"/> + <sniffer type="galaxy.datatypes.data:Newick"/> + <sniffer type="galaxy.datatypes.data:Nexus"/> + <sniffer type="galaxy.datatypes.images:Jpg"/> + <sniffer type="galaxy.datatypes.images:Png"/> + <sniffer type="galaxy.datatypes.images:Tiff"/> + <sniffer type="galaxy.datatypes.images:Bmp"/> + <sniffer type="galaxy.datatypes.images:Gif"/> + <sniffer type="galaxy.datatypes.images:Im"/> + <sniffer type="galaxy.datatypes.images:Pcd"/> + <sniffer type="galaxy.datatypes.images:Pcx"/> + <sniffer type="galaxy.datatypes.images:Ppm"/> + <sniffer type="galaxy.datatypes.images:Psd"/> + <sniffer type="galaxy.datatypes.images:Xbm"/> + <sniffer type="galaxy.datatypes.images:Rgb"/> + <sniffer type="galaxy.datatypes.images:Pbm"/> + <sniffer type="galaxy.datatypes.images:Pgm"/> + <sniffer type="galaxy.datatypes.images:Xpm"/> + <sniffer type="galaxy.datatypes.images:Eps"/> + <sniffer type="galaxy.datatypes.images:Rast"/> + <!-- + Keep this commented until the sniff method in the assembly.py + module is fixed to not read the entire file. + <sniffer type="galaxy.datatypes.assembly:Amos"/> + --> + </sniffers> + <build_sites> + <!-- + Build sites define the builds (dbkeys) available at sites used by display + applications and the URL to those sites. + + The `display` attributes on the `ucsc` and `gbrowse` sites replace the + `ucsc_display_sites` and `gbrowse_display_sites` options in + universe_wsgi.ini. Because these are used by "old-style" display + applications, their types cannot change if you want the old-style display + links for these sites to work. + --> + <site type="ucsc" file="tool-data/shared/ucsc/ucsc_build_sites.txt" display="main,test,archaea,ucla"/> + <site type="gbrowse" file="tool-data/shared/gbrowse/gbrowse_build_sites.txt" display="modencode,sgd_yeast,tair,wormbase,wormbase_ws120,wormbase_ws140,wormbase_ws170,wormbase_ws180,wormbase_ws190,wormbase_ws200,wormbase_ws204,wormbase_ws210,wormbase_ws220,wormbase_ws225"/> + <site type="ensembl" file="tool-data/shared/ensembl/ensembl_sites.txt"/> + <site type="ensembl_data_url" file="tool-data/shared/ensembl/ensembl_sites_data_URL.txt"/> + <site type="igv" file="tool-data/shared/igv/igv_build_sites.txt"/> + <site type="rviewer" file="tool-data/shared/rviewer/rviewer_build_sites.txt"/> + </build_sites> +</datatypes> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/demo_sequencer_wsgi.ini.sample --- /dev/null +++ b/config/demo_sequencer_wsgi.ini.sample @@ -0,0 +1,66 @@ +# ---- HTTP Server ---------------------------------------------------------- + +[server:main] + +use = egg:Paste#http +port = 9011 +host = 0.0.0.0 +use_threadpool = true +threadpool_workers = 10 + +# ---- Galaxy Demo Sequencer Emulator Interface ------------------------------------------------- + +[app:main] + +# Specifies the factory for the universe WSGI application +paste.app_factory = galaxy.webapps.demo_sequencer.buildapp:app_factory +log_level = DEBUG + +# Where dataset files are saved +file_path = database/demo_sequencer_files +# Temporary storage for additional datasets, this should be shared through the cluster +new_file_path = database/tmp + +# Sequencer emulator actions +sequencer_actions_config_file = %(here)s/lib/galaxy/webapps/demo_sequencer/sequencer_actions.xml + +# Session support (beaker) +use_beaker_session = True +session_type = memory +session_data_dir = %(here)s/database/beaker_sessions +session_key = galaxysessions +session_secret = changethisinproduction + +# Galaxy session security +id_secret = changethisinproductiontoo + +# Configuration for debugging middleware +debug = true +use_lint = false + +# NEVER enable this on a public site (even test or QA) +# use_interactive = true + +# this should be a comma-separated list of valid Galaxy users +#admin_users = test@bx.psu.edu + +# Force everyone to log in (disable anonymous access) +require_login = False + +# Write thread status periodically to 'heartbeat.log' (careful, uses disk space rapidly!) +## use_heartbeat = True + +# Profiling middleware (cProfile based) +## use_profile = True + +# Use the new iframe / javascript based layout +use_new_layout = true + +# Serving static files (needed if running standalone) +static_enabled = True +static_cache_time = 360 +static_dir = %(here)s/static/ +static_images_dir = %(here)s/static/images +static_favicon_dir = %(here)s/static/favicon.ico +static_scripts_dir = %(here)s/static/scripts/ +static_style_dir = %(here)s/static/june_2007_style/blue diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/external_service_types_conf.xml.sample --- /dev/null +++ b/config/external_service_types_conf.xml.sample @@ -0,0 +1,5 @@ +<?xml version="1.0"?> +<external_service_types> + <external_service_type file="simple_unknown_sequencer.xml" visible="False"/> + <external_service_type file="applied_biosystems_solid.xml" visible="True"/> +</external_service_types> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/job_conf.xml.sample_advanced --- /dev/null +++ b/config/job_conf.xml.sample_advanced @@ -0,0 +1,482 @@ +<?xml version="1.0"?> +<job_conf> + <plugins workers="4"> + <!-- "workers" is the number of threads for the runner's work queue. + The default from <plugins> is used if not defined for a <plugin>. + --> + <plugin id="local" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner"/> + <plugin id="pbs" type="runner" load="galaxy.jobs.runners.pbs:PBSJobRunner" workers="2"/> + <plugin id="drmaa" type="runner" load="galaxy.jobs.runners.drmaa:DRMAAJobRunner"> + <!-- Different DRMs handle successfully completed jobs differently, + these options can be changed to handle such differences and + are explained in detail on the Galaxy wiki. Defaults are shown --> + <param id="invalidjobexception_state">ok</param> + <param id="invalidjobexception_retries">0</param> + <param id="internalexception_state">ok</param> + <param id="internalexception_retries">0</param> + </plugin> + <plugin id="sge" type="runner" load="galaxy.jobs.runners.drmaa:DRMAAJobRunner"> + <!-- Override the $DRMAA_LIBRARY_PATH environment variable --> + <param id="drmaa_library_path">/sge/lib/libdrmaa.so</param> + </plugin> + <plugin id="cli" type="runner" load="galaxy.jobs.runners.cli:ShellJobRunner" /> + <plugin id="condor" type="runner" load="galaxy.jobs.runners.condor:CondorJobRunner" /> + <plugin id="slurm" type="runner" load="galaxy.jobs.runners.slurm:SlurmJobRunner" /> + <plugin id="dynamic" type="runner"> + <!-- The dynamic runner is not a real job running plugin and is + always loaded, so it does not need to be explicitly stated in + <plugins>. However, if you wish to change the base module + containing your dynamic rules, you can do so. + + The `load` attribute is not required (and ignored if + included). + --> + <param id="rules_module">galaxy.jobs.rules</param> + </plugin> + <!-- Pulsar runners (see more at https://pulsar.readthedocs.org) --> + <plugin id="pulsar_rest" type="runner" load="galaxy.jobs.runners.pulsar:PulsarRESTJobRunner"> + <!-- Allow optimized HTTP calls with libcurl (defaults to urllib) --> + <!-- <param id="transport">curl</param> --> + + <!-- *Experimental Caching*: Next parameter enables caching. + Likely will not work with newer features such as MQ support. + + If this is enabled be sure to specify a `file_cache_dir` in + the remote Pulsar's servers main configuration file. + --> + <!-- <param id="cache">True</param> --> + </plugin> + <plugin id="pulsar_mq" type="runner" load="galaxy.jobs.runners.pulsar:PulsarMQJobRunner"> + <!-- AMQP URL to connect to. --> + <param id="amqp_url">amqp://guest:guest@localhost:5672//</param> + <!-- URL remote Pulsar apps should transfer files to this Galaxy + instance to/from. --> + <param id="galaxy_url">http://localhost:8080</param> + <!-- Pulsar job manager to communicate with (see Pulsar + docs for information on job managers). --> + <!-- <param id="manager">_default_</param> --> + <!-- The AMQP client can provide an SSL client certificate (e.g. for + validation), the following options configure that certificate + (see for reference: + http://kombu.readthedocs.org/en/latest/reference/kombu.connection.html + ). If you simply want to use SSL but not use/validate a client + cert, just use the ?ssl=1 query on the amqp URL instead. --> + <!-- <param id="amqp_connect_ssl_ca_certs">/path/to/cacert.pem</param> --> + <!-- <param id="amqp_connect_ssl_keyfile">/path/to/key.pem</param> --> + <!-- <param id="amqp_connect_ssl_certfile">/path/to/cert.pem</param> --> + <!-- <param id="amqp_connect_ssl_cert_reqs">cert_required</param> --> + <!-- By default, the AMQP consumer uses a nonblocking connection with + a 0.2 second timeout. In testing, this works fine for + unencrypted AMQP connections, but with SSL it will cause the + client to reconnect to the server after each timeout. Set to a + higher value (in seconds) (or `None` to use blocking connections). --> + <!-- <param id="amqp_consumer_timeout">None</param> --> + </plugin> + <plugin id="pulsar_legacy" type="runner" load="galaxy.jobs.runners.pulsar:PulsarLegacyJobRunner"> + <!-- Pulsar job runner with default parameters matching those + of old LWR job runner. If your Pulsar server is running on a + Windows machine for instance this runner should still be used. + + These destinations still needs to target a Pulsar server, + older LWR plugins and destinations still work in Galaxy can + target LWR servers, but this support should be considered + deprecated and will disappear with a future release of Galaxy. + --> + </plugin> + </plugins> + <handlers default="handlers"> + <!-- Additional job handlers - the id should match the name of a + [server:<id>] in universe_wsgi.ini. + --> + <handler id="handler0" tags="handlers"/> + <handler id="handler1" tags="handlers"/> + <!-- Handlers will load all plugins defined in the <plugins> collection + above by default, but can be limited to a subset using <plugin> + tags. This is useful for heterogenous environments where the DRMAA + plugin would need to be loaded more than once with different + configs. + --> + <handler id="sge_handler"> + <plugin id="sge"/> + </handler> + <handler id="special_handler0" tags="special_handlers"/> + <handler id="special_handler1" tags="special_handlers"/> + <handler id="trackster_handler"/> + </handlers> + <destinations default="local"> + <!-- Destinations define details about remote resources and how jobs + should be executed on those remote resources. + --> + <destination id="local" runner="local"/> + <destination id="multicore_local" runner="local"> + <param id="local_slots">4</param><!-- Specify GALAXY_SLOTS for local jobs. --> + <!-- Warning: Local slot count doesn't tie up additional worker threads, to prevent over + allocating machine define a second local runner with different name and fewer workers + to run this destination. --> + <param id="embed_metadata_in_job">True</param> + <!-- Above parameter will be default (with no option to set + to False) in an upcoming release of Galaxy, but you can + try it early - it will slightly speed up local jobs by + embedding metadata calculation in job script itself. + --> + <job_metrics /> + <!-- Above element demonstrates embedded job metrics definition - see + job_metrics_conf.xml.sample for full documentation on possible nested + elements. This empty block will simply disable job metrics for the + corresponding destination. --> + </destination> + <destination id="docker_local" runner="local"> + <param id="docker_enabled">true</param> + <!-- docker_volumes can be used to configure volumes to expose to docker, + For added isolation append :ro to the path to mount it read only. + Galaxy will attempt to infer a reasonable set of defaults which + volumes should be exposed how based on Galaxy's settings and the + destination - but be sure to add any library paths or data incides + that may be needed read-only. + --> + <!-- + <param id="docker_volumes">$defaults,/mnt/galaxyData/libraries:ro,/mnt/galaxyData/indices:ro</param> + --> + <!-- For a stock Galaxy instance and traditional job runner $defaults will + expand out as: + + $galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$default_file_path:rw + + This assumes most of what is needed is available under Galaxy's root directory, + the tool directory, and the Galaxy's file_path (if using object store creatively + you will definitely need to expand defaults). + + This configuration allows any docker instance to write to any Galaxy + file - for greater isolation set outputs_to_working_directory in + universe_wsgi.ini. This will cause $defaults to allow writing to much + less. It will then expand as follows: + + $galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$default_file_path:ro + + If using the Pulsar, defaults will be even further restricted because the + Pulsar will (by default) stage all needed inputs into the job's job_directory + (so there is not need to allow the docker container to read all the + files - let alone write over them). Defaults in this case becomes: + + $job_directory:ro,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw + + Python string.Template is used to expand volumes and values $defaults, + $galaxy_root, $default_file_path, $tool_directory, $working_directory, + are available to all jobs and $job_directory is also available for + Pulsar jobs. + --> + <!-- One can run docker using volumes-from tag by setting the following + parameter. For more information on volumes-from check out the following + docker tutorial. https://docs.docker.com/userguide/dockervolumes/ + --> + <!-- <param id="docker_volumes_from">parent_container_name</param> --> + <!-- Control memory allocatable by docker container with following option: + --> + <!-- <param id="docker_memory">24G</param> --> + <!-- By default Docker will need to runnable by Galaxy using + password-less sudo - this can be configured by adding the + following line to the sudoers file of all compute nodes + with docker enabled: + + galaxy ALL = (root) NOPASSWD: SETENV: /usr/bin/docker + + The follow option is set to false to disable sudo (docker + must likewise be configured to allow this). + --> + <!-- <param id="docker_sudo">false</param> --> + <!-- Following option can be used to tweak sudo command used by + default. --> + <!-- <param id="docker_sudo_cmd">/usr/bin/sudo -extra_param</param> --> + <!-- By default, docker container will not have any networking + enabled. host networking can be bridged by uncommenting next option + http://docs.docker.io/reference/run/#network-settings + --> + <!-- <param id="docker_net">bridge</param> --> + <!-- Following command can be used to tweak docker command. --> + <!-- <param id="docker_cmd">/usr/local/custom_docker/docker</param> --> + <!-- Following can be used to connect to docke server in different + ways (translated as -H argument to docker client). --> + <!-- <param id="docker_host">unix:///var/run/docker.sock</param> --> + <!-- <param id="docker_host">:5555</param> --> + <!-- <param id="docker_host">:5555</param> --> + <!-- <param id="docker_host">tcp://127.0.0.1:4243</param> --> + + <!-- If deployer wants to use docker for isolation, but does not + trust tool's specified container - a destination wide override + can be set. This will cause all jobs on this destination to use + that docker image. --> + <!-- <param id="docker_container_id_override">busybox:ubuntu-14.04</param> --> + + <!-- Likewise, if deployer wants to use docker for isolation and + does trust tool's specified container - but also wants tool's not + configured to run in a container the following option can provide + a fallback. --> + <!-- <param id="docker_default_container_id">busybox:ubuntu-14.04</param> --> + + </destination> + <destination id="pbs" runner="pbs" tags="mycluster"/> + <destination id="pbs_longjobs" runner="pbs" tags="mycluster,longjobs"> + <!-- Define parameters that are native to the job runner plugin. --> + <param id="Resource_List">walltime=72:00:00</param> + </destination> + <destination id="remote_cluster" runner="drmaa" tags="longjobs"/> + <destination id="java_cluster" runner="drmaa"> + <!-- set arbitrary environment variables at runtime. General + dependencies for tools should be configured via + tool_dependency_dir and package options and these + options should be reserved for defining cluster + specific options. + --> + <env id="_JAVA_OPTIONS">-Xmx=6GB</env> + <env id="ANOTHER_OPTION" raw="true">'5'</env><!-- raw disables auto quoting --> + <env file="/mnt/java_cluster/environment_setup.sh" /><!-- will be sourced --> + <env exec="module load javastuff/2.10" /><!-- will be sourced --> + <!-- files to source and exec statements will be handled on remote + clusters. These don't need to be available on the Galaxy server + itself. + --> + </destination> + <destination id="real_user_cluster" runner="drmaa"> + <!-- TODO: The real user options should maybe not be considered runner params. --> + <param id="galaxy_external_runjob_script">scripts/drmaa_external_runner.py</param> + <param id="galaxy_external_killjob_script">scripts/drmaa_external_killer.py</param> + <param id="galaxy_external_chown_script">scripts/external_chown_script.py</param> + </destination> + <destination id="dynamic" runner="dynamic"> + <!-- A destination that represents a method in the dynamic runner. --> + <param id="function">foo</param> + </destination> + <destination id="load_balance" runner="dynamic"> + <param id="type">choose_one</param> + <!-- Randomly assign jobs to various static destination ids --> + <param id="destination_ids">cluster1,cluster2,cluster3</param> + </destination> + <destination id="load_balance_with_data_locality" runner="dynamic"> + <!-- Randomly assign jobs to various static destination ids, + but keep jobs in the same workflow invocation together and + for those jobs ran outside of workflows keep jobs in same + history together. + --> + <param id="type">choose_one</param> + <param id="destination_ids">cluster1,cluster2,cluster3</param> + <param id="hash_by">workflow_invocation,history</param> + </destination> + <destination id="burst_out" runner="dynamic"> + <!-- Burst out from static destination local_cluster_8_core to + static destination shared_cluster_8_core when there are about + 50 Galaxy jobs assigned to any of the local_cluster_XXX + destinations (either running or queued). If there are fewer + than 50 jobs, just use local_cluster_8_core destination. + + Uncomment job_state parameter to make this bursting happen when + roughly 50 jobs are queued instead. + --> + <param id="type">burst</param> + <param id="from_destination_ids">local_cluster_8_core,local_cluster_1_core,local_cluster_16_core</param> + <param id="to_destination_id">shared_cluster_8_core</param> + <param id="num_jobs">50</param> + <!-- <param id="job_states">queued</param> --> + </destination> + <destination id="docker_dispatch" runner="dynamic"> + <!-- Follow dynamic destination type will send all tool's that + support docker to static destination defined by + docker_destination_id (docker_cluster in this example) and all + other tools to default_destination_id (normal_cluster in this + example). + --> + <param id="type">docker_dispatch</param> + <param id="docker_destination_id">docker_cluster</param> + <param id="default_destination_id">normal_cluster</param> + </destination> + <destination id="secure_pulsar_rest_dest" runner="pulsar_rest"> + <param id="url">https://examle.com:8913/</param> + <!-- If set, private_token must match token in remote Pulsar's + configuration. --> + <param id="private_token">123456789changeme</param> + <!-- Uncomment the following statement to disable file staging (e.g. + if there is a shared file system between Galaxy and the Pulsar + server). Alternatively action can be set to 'copy' - to replace + http transfers with file system copies, 'remote_transfer' to cause + the Pulsar to initiate HTTP transfers instead of Galaxy, or + 'remote_copy' to cause Pulsar to initiate file system copies. + If setting this to 'remote_transfer' be sure to specify a + 'galaxy_url' attribute on the runner plugin above. --> + <!-- <param id="default_file_action">none</param> --> + <!-- The above option is just the default, the transfer behavior + none|copy|http can be configured on a per path basis via the + following file. See Pulsar documentation for more details and + examples. + --> + <!-- <param id="file_action_config">file_actions.yaml</param> --> + <!-- The non-legacy Pulsar runners will attempt to resolve Galaxy + dependencies remotely - to enable this set a tool_dependency_dir + in Pulsar's configuration (can work with all the same dependency + resolutions mechanisms as Galaxy - tool Shed installs, Galaxy + packages, etc...). To disable this behavior, set the follow parameter + to none. To generate the dependency resolution command locally + set the following parameter local. + --> + <!-- <param id="dependency_resolution">none</params> --> + <!-- Uncomment following option to enable setting metadata on remote + Pulsar server. The 'use_remote_datatypes' option is available for + determining whether to use remotely configured datatypes or local + ones (both alternatives are a little brittle). --> + <!-- <param id="remote_metadata">true</param> --> + <!-- <param id="use_remote_datatypes">false</param> --> + <!-- <param id="remote_property_galaxy_home">/path/to/remote/galaxy-central</param> --> + <!-- If remote Pulsar server is configured to run jobs as the real user, + uncomment the following line to pass the current Galaxy user + along. --> + <!-- <param id="submit_user">$__user_name__</param> --> + <!-- Various other submission parameters can be passed along to the Pulsar + whose use will depend on the remote Pulsar's configured job manager. + For instance: + --> + <!-- <param id="submit_native_specification">-P bignodes -R y -pe threads 8</param> --> + <!-- Disable parameter rewriting and rewrite generated commands + instead. This may be required if remote host is Windows machine + but probably not otherwise. + --> + <!-- <param id="rewrite_parameters">false</params> --> + </destination> + <destination id="pulsar_mq_dest" runner="amqp_pulsar" > + <!-- The RESTful Pulsar client sends a request to Pulsar + to populate various system properties. This + extra step can be disabled and these calculated here + on client by uncommenting jobs_directory and + specifying any additional remote_property_ of + interest, this is not optional when using message + queues. + --> + <param id="jobs_directory">/path/to/remote/pulsar/files/staging/</param> + <!-- Otherwise MQ and Legacy pulsar destinations can be supplied + all the same destination parameters as the RESTful client documented + above (though url and private_token are ignored when using a MQ). + --> + </destination> + <destination id="ssh_torque" runner="cli"> + <param id="shell_plugin">SecureShell</param> + <param id="job_plugin">Torque</param> + <param id="shell_username">foo</param> + <param id="shell_hostname">foo.example.org</param> + <param id="job_Resource_List">walltime=24:00:00,ncpus=4</param> + </destination> + + <!-- Example CLI Slurm runner. --> + <destination id="ssh_slurm" runner="cli"> + <param id="shell_plugin">SecureShell</param> + <param id="job_plugin">Slurm</param> + <param id="shell_username">foo</param> + <param id="shell_hostname">my_host</param> + <param id="job_time">2:00:00</param> + <param id="job_ncpus">4</param> + <param id="job_partition">my_partition</param> + </destination> + + <destination id="condor" runner="condor"> + <!-- With no params, jobs are submitted to the 'vanilla' universe with: + notification = NEVER + getenv = true + Additional/override query ClassAd params can be specified with + <param> tags. + --> + <param id="request_cpus">8</param> + </destination> + + <!-- Jobs that hit the walltime on one destination can be automatically + resubmitted to another destination. Walltime detection is + currently only implemented in the slurm runner. + + Multiple resubmit tags can be defined, the first resubmit matching + the terminal condition of a job will be used. + + The 'condition' attribute is optional, if not present, the + resubmit destination will be used for all conditions. Currently, + only the "walltime_reached" condition is implemented. + + The 'handler' tag is optional, if not present, the job's original + handler will be reused for the resubmitted job. + --> + <destination id="short_fast" runner="slurm"> + <param id="nativeSpecification">--time=00:05:00 --nodes=1</param> + <resubmit condition="walltime_reached" destination="long_slow" handler="sge_handler" /> + </destination> + <destination id="long_slow" runner="sge"> + <!-- The destination that you resubmit jobs to can be any runner type --> + <param id="nativeSpecification">-l h_rt=96:00:00</param> + </destination> + + </destinations> + <resources default="default"> + <!-- Group different parameters defined in job_resource_params_conf.xml + together and assign these groups ids. Tool section below can map + tools to different groups. This is experimental functionality! + --> + <group id="default"></group> + <group id="memoryonly">memory</group> + <group id="all">processors,memory,time,project</group> + </resources> + <tools> + <!-- Tools can be configured to use specific destinations or handlers, + identified by either the "id" or "tags" attribute. If assigned to + a tag, a handler or destination that matches that tag will be + chosen at random. + --> + <tool id="foo" handler="trackster_handler"> + <param id="source">trackster</param> + </tool> + <tool id="bar" destination="dynamic"/> + <!-- Next example defines resource group to insert into tool interface + and pass to dynamic destination (as resource_params argument). --> + <tool id="longbar" destination="dynamic" resources="all" /> + <tool id="baz" handler="special_handlers" destination="bigmem"/> + </tools> + <limits> + <!-- Certain limits can be defined. The 'concurrent_jobs' limits all + control the number of jobs that can be "active" at a time, that + is, dispatched to a runner and in the 'queued' or 'running' + states. + + A race condition exists that will allow destination_* concurrency + limits to be surpassed when multiple handlers are allowed to + handle jobs for the same destination. To prevent this, assign all + jobs for a specific destination to a single handler. + --> + <!-- registered_user_concurrent_jobs: + Limit on the number of jobs a user with a registered Galaxy + account can have active across all destinations. + --> + <limit type="registered_user_concurrent_jobs">2</limit> + <!-- anonymous_user_concurrent_jobs: + Likewise, but for unregistered/anonymous users. + --> + <limit type="anonymous_user_concurrent_jobs">1</limit> + <!-- destination_user_concurrent_jobs: + The number of jobs a user can have active in the specified + destination, or across all destinations identified by the + specified tag. (formerly: concurrent_jobs) + --> + <limit type="destination_user_concurrent_jobs" id="local">1</limit> + <limit type="destination_user_concurrent_jobs" tag="mycluster">2</limit> + <limit type="destination_user_concurrent_jobs" tag="longjobs">1</limit> + <!-- destination_total_concurrent_jobs: + The number of jobs that can be active in the specified + destination (or across all destinations identified by the + specified tag) by any/all users. + --> + <limit type="destination_total_concurrent_jobs" id="local">16</limit> + <limit type="destination_total_concurrent_jobs" tag="longjobs">100</limit> + <!-- walltime: + Amount of time a job can run (in any destination) before it + will be terminated by Galaxy. + --> + <limit type="walltime">24:00:00</limit> + <!-- output_size: + Size that any defined tool output can grow to before the job + will be terminated. This does not include temporary files + created by the job. Format is flexible, e.g.: + '10GB' = '10g' = '10240 Mb' = '10737418240' + --> + <limit type="output_size">10GB</limit> + </limits> +</job_conf> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/job_conf.xml.sample_basic --- /dev/null +++ b/config/job_conf.xml.sample_basic @@ -0,0 +1,13 @@ +<?xml version="1.0"?> +<!-- A sample job config that explicitly configures job running the way it is configured by default (if there is no explicit config). --> +<job_conf> + <plugins> + <plugin id="local" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner" workers="4"/> + </plugins> + <handlers> + <handler id="main"/> + </handlers> + <destinations> + <destination id="local" runner="local"/> + </destinations> +</job_conf> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/job_metrics_conf.xml.sample --- /dev/null +++ b/config/job_metrics_conf.xml.sample @@ -0,0 +1,124 @@ +<?xml version="1.0"?> +<!-- If job_metrics.xml exists, this file will define the default job metric + plugin used for all jobs. Individual job_conf.xml destinations can + disable metric collection by setting metrics="off" on that destination. + The metrics attribute on destination definition elements can also be + a path - in which case that XML metrics file will be loaded and used for + that destination. Finally, the destination element may contain a job_metrics + child element (with all options defined below) to define job metrics in an + embedded manner directly in the job_conf.xml file. +--> +<job_metrics> + <!-- Each element in this file corresponds to a job instrumentation plugin + used to generate metrics in lib/galaxy/jobs/metrics/instrumenters. --> + + <!-- Core plugin captures Galaxy slots, start and end of job (in seconds + since epoch) and computes runtime in seconds. --> + <core /> + + <!-- Uncomment to dump processor count for each job - linux only. --> + <!-- <cpuinfo /> --> + <!-- Uncomment to dump information about all processors for for each + job - this is likely too much data. Linux only. --> + <!-- <cpuinfo verbose="true" /> --> + + <!-- Uncomment to dump system memory information for each job - linux + only. --> + <!-- <meminfo /> --> + + <!-- Uncomment to record operating system each job is executed on - linux + only. --> + <!-- <uname /> --> + + <!-- Uncomment following to enable plugin dumping complete environment + for each job, potentially useful for debuging --> + <!-- <env /> --> + <!-- env plugin can also record more targetted, obviously useful variables + as well. --> + <!-- <env variables="HOSTNAME,SLURM_CPUS_ON_NODE,SLURM_JOBID" /> --> + + <!-- <collectl /> --> + <!-- Collectl (http://collectl.sourceforge.net/) is a powerful monitoring + utility capable of gathering numerous system and process level + statistics of running applications. The Galaxy collectl job metrics + plugin by default will grab a variety of process level metrics + aggregated across all processes corresponding to a job, this behavior + is highly customiziable - both using the attributes documented below + or simply hacking up the code in lib/galaxy/jobs/metrics. + + Warning: In order to use this plugin collectl must be available on the + compute server the job runs on and on the local Galaxy server as well + (unless in this latter case summarize_process_data is set to False). + + Attributes (the follow describes attributes that can be used with + the collectl job metrics element above to modify its behavior). + + 'summarize_process_data': Boolean indicating whether to run collectl + in playback mode after jobs complete and gather process level + statistics for the job run. These statistics can be customized + with the 'process_statistics' attribute. (defaults to True) + + 'saved_logs_path': If set (it is off by default), all collectl logs + will be saved to the specified path after jobs complete. These + logs can later be replayed using collectl offline to generate + full time-series data corresponding to a job run. + + 'subsystems': Comma separated list of collectl subystems to collect + data for. Plugin doesn't currently expose all of them or offer + summary data for any of them except 'process' but extensions + would be welcome. May seem pointless to include subsystems + beside process since they won't be processed online by Galaxy - + but if 'saved_logs_path' these files can be played back at anytime. + + Available subsystems - 'process', 'cpu', 'memory', 'network', + 'disk', 'network'. (Default 'process'). + + Warning: If you override this - be sure to include 'process' + unless 'summarize_process_data' is set to false. + + 'process_statistics': If 'summarize_process_data' this attribute can be + specified as a comma separated list to override the statistics + that are gathered. Each statistics is of the for X_Y where X + if one of 'min', 'max', 'count', 'avg', or 'sum' and Y is a + value from 'S', 'VmSize', 'VmLck', 'VmRSS', 'VmData', 'VmStk', + 'VmExe', 'VmLib', 'CPU', 'SysT', 'UsrT', 'PCT', 'AccumT' 'WKB', + 'RKBC', 'WKBC', 'RSYS', 'WSYS', 'CNCL', 'MajF', 'MinF'. Consult + lib/galaxy/jobs/metrics/collectl/processes.py for more details + on what each of these resource types means. + + Defaults to 'max_VmSize,avg_VmSize,max_VmRSS,avg_VmRSS,sum_SysT,sum_UsrT,max_PCT avg_PCT,max_AccumT,sum_RSYS,sum_WSYS' + as variety of statistics roughly describing CPU and memory + usage of the program and VERY ROUGHLY describing I/O consumption. + + 'procfilt_on': By default Galaxy will tell collectl to only collect + 'process' level data for the current user (as identified) + by 'username' (default) - this can be disabled by settting this + to 'none' - the plugin will still only aggregate process level + statistics for the jobs process tree - but the additional + information can still be used offline with 'saved_logs_path' + if set. Obsecurely, this can also be set 'uid' to identify + the current user to filter on by UID instead of username - + this may needed on some clusters(?). + + 'interval': The time (in seconds) between data collection points. + Collectl uses a variety of different defaults for different + subsystems if this is not set, but process information (likely + the most pertinent for Galaxy jobs will collect data every + 60 seconds). + + 'flush': Interval (in seconds I think) between when collectl will + flush its buffer to disk. Galaxy overrides this to disable + flushing by default if not set. + + 'local_collectl_path', 'remote_collectl_path', 'collectl_path': + By default, jobs will just assume collectl is on the PATH, but + it can be overridden with 'local_collectl_path' and + 'remote_collectl_path' (or simply 'collectl_path' if it is not + on the path but installed in the same location both locally and + remotely). + + There are more and more increasingly obsecure options including - + log_collectl_program_output, interval2, and interval3. Consult + source code for more details. + --> +</job_metrics> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/job_resource_params_conf.xml.sample --- /dev/null +++ b/config/job_resource_params_conf.xml.sample @@ -0,0 +1,6 @@ +<parameters> + <param label="Processors" name="processors" type="integer" size="2" min="1" max="64" value="" help="Number of processing cores, 'ppn' value (1-64). Leave blank to use default value." /> + <param label="Memory" name="memory" type="integer" size="3" min="1" max="256" value="" help="Memory size in gigabytes, 'pmem' value (1-256). Leave blank to use default value." /> + <param label="Time" name="time" type="integer" size="3" min="1" max="744" value="" help="Maximum job time in hours, 'walltime' value (1-744). Leave blank to use default value." /> + <param label="Project" name="project" type="text" value="" help="Project to assign resource allocation to. Leave blank to use default value." /> +</parameters> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/migrated_tools_conf.xml.sample --- /dev/null +++ b/config/migrated_tools_conf.xml.sample @@ -0,0 +1,3 @@ +<?xml version="1.0"?> +<toolbox tool_path="../shed_tools"> +</toolbox> \ No newline at end of file diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/object_store_conf.xml.sample --- /dev/null +++ b/config/object_store_conf.xml.sample @@ -0,0 +1,35 @@ +<?xml version="1.0"?> +<object_store type="hierarchical"> + <backends> + <object_store type="distributed" id="primary" order="0"> + <backends> + <backend id="files1" type="disk" weight="1"> + <files_dir path="database/files1"/> + <extra_dir type="temp" path="database/tmp1"/> + <extra_dir type="job_work" path="database/job_working_directory1"/> + </backend> + <backend id="files2" type="disk" weight="1"> + <files_dir path="database/files2"/> + <extra_dir type="temp" path="database/tmp2"/> + <extra_dir type="job_work" path="database/job_working_directory2"/> + </backend> + </backends> + </object_store> + <object_store type="disk" id="secondary" order="1"> + <files_dir path="database/files3"/> + <extra_dir type="temp" path="database/tmp3"/> + <extra_dir type="job_work" path="database/job_working_directory3"/> + </object_store> + <!-- Sample S3 Object Store + + <object_store type="s3"> + <auth access_key="...." secret_key="....." /> + <bucket name="unique_bucket_name" use_reduced_redundancy="False" /> + <connection host="" port="" is_secure="" conn_path="" /> + <cache path="database/files/" size="100" /> + </object_store> + + --> + </backends> +</object_store> + diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/openid_conf.xml.sample --- /dev/null +++ b/config/openid_conf.xml.sample @@ -0,0 +1,8 @@ +<?xml version="1.0"?> +<openid> + <provider file="google.xml" /> + <provider file="yahoo.xml" /> + <provider file="aol.xml" /> + <provider file="launchpad.xml" /> + <provider file="genomespace.xml" /> +</openid> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/reports_wsgi.ini.sample --- /dev/null +++ b/config/reports_wsgi.ini.sample @@ -0,0 +1,69 @@ +# ---- HTTP Server ---------------------------------------------------------- + +[server:main] + +use = egg:Paste#http +port = 9001 +host = 127.0.0.1 +use_threadpool = true +threadpool_workers = 10 + +# ---- Galaxy Webapps Report Interface ------------------------------------------------- + +[app:main] + +# Specifies the factory for the universe WSGI application +paste.app_factory = galaxy.webapps.reports.buildapp:app_factory +log_level = DEBUG + +# Database connection +# Galaxy reports are intended for production Galaxy instances, so sqlite is not supported. +# You may use a SQLAlchemy connection string to specify an external database. +# database_connection = postgres:///galaxy_test?user=postgres&password=postgres + +# Where dataset files are saved +file_path = database/files +# Temporary storage for additional datasets, this should be shared through the cluster +new_file_path = database/tmp + +# Session support (beaker) +use_beaker_session = True +session_type = memory +session_data_dir = %(here)s/database/beaker_sessions +session_key = galaxysessions +session_secret = changethisinproduction + +# Configuration for debugging middleware +# debug = true +use_lint = false + +# NEVER enable this on a public site (even test or QA) +# use_interactive = true + +# path to sendmail +sendmail_path = /usr/sbin/sendmail + +# Address to join mailing list +mailing_join_addr = galaxy-user-join@bx.psu.edu + +# Write thread status periodically to 'heartbeat.log' (careful, uses disk space rapidly!) +## use_heartbeat = True + +# Profiling middleware (cProfile based) +## use_profile = True + +# Mail +# smtp_server = yourserver@yourfacility.edu +# error_email_to = your_bugs@bx.psu.edu + +# Use the new iframe / javascript based layout +use_new_layout = true + +# Serving static files (needed if running standalone) +static_enabled = True +static_cache_time = 360 +static_dir = %(here)s/static/ +static_images_dir = %(here)s/static/images +static_favicon_dir = %(here)s/static/favicon.ico +static_scripts_dir = %(here)s/static/scripts/ +static_style_dir = %(here)s/static/june_2007_style/blue diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/shed_data_manager_conf.xml.sample --- /dev/null +++ b/config/shed_data_manager_conf.xml.sample @@ -0,0 +1,3 @@ +<?xml version="1.0"?> +<data_managers> +</data_managers> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/shed_tool_conf.xml.sample --- /dev/null +++ b/config/shed_tool_conf.xml.sample @@ -0,0 +1,3 @@ +<?xml version="1.0"?> +<toolbox tool_path="../shed_tools"> +</toolbox> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/shed_tool_data_table_conf.xml.sample --- /dev/null +++ b/config/shed_tool_data_table_conf.xml.sample @@ -0,0 +1,3 @@ +<?xml version="1.0"?> +<tables> +</tables> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/tool_conf.xml.main --- /dev/null +++ b/config/tool_conf.xml.main @@ -0,0 +1,134 @@ +<?xml version='1.0' encoding='utf-8'?> +<toolbox> + <section id="getext" name="Get Data"> + <tool file="data_source/upload.xml" /> + <tool file="data_source/ucsc_tablebrowser.xml" /> + <tool file="data_source/ucsc_tablebrowser_archaea.xml" /> + <tool file="data_source/ebi_sra.xml" /> + <tool file="data_source/biomart.xml" /> + <tool file="data_source/gramene_mart.xml" /> + <tool file="data_source/flymine.xml" /> + <tool file="data_source/fly_modencode.xml" /> + <tool file="data_source/modmine.xml" /> + <tool file="data_source/mousemine.xml" /> + <tool file="data_source/ratmine.xml" /> + <tool file="data_source/yeastmine.xml" /> + <tool file="data_source/worm_modencode.xml" /> + <tool file="data_source/wormbase.xml" /> + <tool file="data_source/eupathdb.xml" /> + <tool file="genomespace/genomespace_file_browser_prod.xml" /> + <tool file="genomespace/genomespace_importer.xml" /> + </section> + <section id="send" name="Send Data"> + <tool file="genomespace/genomespace_exporter.xml" /> + </section> + <section id="liftOver" name="Lift-Over"> + <tool file="extract/liftOver_wrapper.xml" /> + </section> + <section id="textutil" name="Text Manipulation"> + <tool file="filters/fixedValueColumn.xml" /> + <tool file="filters/catWrapper.xml" /> + <tool file="filters/condense_characters.xml" /> + <tool file="filters/convert_characters.xml" /> + <tool file="filters/mergeCols.xml" /> + <tool file="filters/CreateInterval.xml" /> + <tool file="filters/cutWrapper.xml" /> + <tool file="filters/changeCase.xml" /> + <tool file="filters/pasteWrapper.xml" /> + <tool file="filters/remove_beginning.xml" /> + <tool file="filters/randomlines.xml" /> + <tool file="filters/headWrapper.xml" /> + <tool file="filters/tailWrapper.xml" /> + <tool file="filters/trimmer.xml" /> + <tool file="filters/wc_gnu.xml" /> + <tool file="filters/secure_hash_message_digest.xml" /> + </section> + <section id="convert" name="Convert Formats"> + <tool file="filters/bed2gff.xml" /> + <tool file="filters/gff2bed.xml" /> + <tool file="maf/maf_to_bed.xml" /> + <tool file="maf/maf_to_interval.xml" /> + <tool file="maf/maf_to_fasta.xml" /> + <tool file="filters/sff_extractor.xml" /> + <tool file="filters/wig_to_bigwig.xml" /> + <tool file="filters/bed_to_bigbed.xml" /> + </section> + <section id="filter" name="Filter and Sort"> + <tool file="stats/filtering.xml" /> + <tool file="filters/sorter.xml" /> + <tool file="filters/grep.xml" /> + <label id="gff" text="GFF" /> + <tool file="filters/gff/extract_GFF_Features.xml" /> + <tool file="filters/gff/gff_filter_by_attribute.xml" /> + <tool file="filters/gff/gff_filter_by_feature_count.xml" /> + <tool file="filters/gff/gtf_filter_by_attribute_values_list.xml" /> + </section> + <section id="group" name="Join, Subtract and Group"> + <tool file="filters/joiner.xml" /> + <tool file="filters/compare.xml" /> + <tool file="stats/grouping.xml" /> + </section> + <section id="features" name="Extract Features"> + <tool file="filters/ucsc_gene_bed_to_exon_bed.xml" /> + </section> + <section id="fetchSeq" name="Fetch Sequences"> + <tool file="extract/extract_genomic_dna.xml" /> + </section> + <section id="fetchAlign" name="Fetch Alignments"> + <tool file="maf/interval2maf_pairwise.xml" /> + <tool file="maf/interval2maf.xml" /> + <tool file="maf/interval_maf_to_merged_fasta.xml" /> + <tool file="maf/genebed_maf_to_fasta.xml" /> + <tool file="maf/maf_stats.xml" /> + <tool file="maf/maf_thread_for_species.xml" /> + <tool file="maf/maf_limit_to_species.xml" /> + <tool file="maf/maf_limit_size.xml" /> + <tool file="maf/maf_by_block_number.xml" /> + <tool file="maf/maf_filter.xml" /> + <tool file="maf/maf_reverse_complement.xml" /> + </section> + <section id="scores" name="Get Genomic Scores"> + <tool file="filters/wiggle_to_simple.xml" /> + <tool file="stats/aggregate_binned_scores_in_intervals.xml" /> + </section> + <section id="stats" name="Statistics"> + <tool file="stats/gsummary.xml" /> + <tool file="filters/uniq.xml" /> + </section> + <section id="plots" name="Graph/Display Data"> + <tool file="plotting/boxplot.xml" /> + <tool file="maf/vcf_to_maf_customtrack.xml" /> + <tool file="mutation/visualize.xml" /> + </section> + <section id="hgv" name="Phenotype Association"> + <tool file="evolution/codingSnps.xml" /> + <tool file="evolution/add_scores.xml" /> + <tool file="phenotype_association/sift.xml" /> + <tool file="phenotype_association/linkToGProfile.xml" /> + <tool file="phenotype_association/linkToDavid.xml" /> + <tool file="phenotype_association/ldtools.xml" /> + <tool file="phenotype_association/pass.xml" /> + <tool file="phenotype_association/gpass.xml" /> + <tool file="phenotype_association/beam.xml" /> + <tool file="phenotype_association/lps.xml" /> + <tool file="phenotype_association/master2pg.xml" /> + </section> + <label id="ngs" text="NGS Toolbox Beta" /> + <section id="cshl_library_information" name="NGS: QC and manipulation"> + <label id="illumina" text="Illumina data" /> + <label id="454" text="Roche-454 data" /> + <label id="solid" text="AB-SOLiD data" /> + <tool file="next_gen_conversion/solid2fastq.xml" /> + <tool file="solid_tools/solid_qual_stats.xml" /> + <tool file="solid_tools/solid_qual_boxplot.xml" /> + <label id="generic_fastq" text="Generic FASTQ manipulation" /> + <label id="fastx_toolkit_fastq" text="FASTX-Toolkit for FASTQ data" /> + </section> + <section id="ngs_mapping" name="NGS: Mapping"> + <label id="illumina" text="Illumina" /> + <label id="roche_454" text="Roche-454" /> + <label id="ab_solid" text="AB-SOLiD" /> + </section> + <section id="samtools" name="NGS: SAM Tools"> + </section> +</toolbox> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/tool_conf.xml.sample --- /dev/null +++ b/config/tool_conf.xml.sample @@ -0,0 +1,186 @@ +<?xml version='1.0' encoding='utf-8'?> +<toolbox> + <section id="getext" name="Get Data"> + <tool file="data_source/upload.xml" /> + <tool file="data_source/ucsc_tablebrowser.xml" /> + <tool file="data_source/ucsc_tablebrowser_test.xml" /> + <tool file="data_source/ucsc_tablebrowser_archaea.xml" /> + <tool file="data_source/ebi_sra.xml" /> + <tool file="data_source/microbial_import.xml" /> + <tool file="data_source/biomart.xml" /> + <tool file="data_source/biomart_test.xml" /> + <tool file="data_source/cbi_rice_mart.xml" /> + <tool file="data_source/gramene_mart.xml" /> + <tool file="data_source/fly_modencode.xml" /> + <tool file="data_source/flymine.xml" /> + <tool file="data_source/flymine_test.xml" /> + <tool file="data_source/modmine.xml" /> + <tool file="data_source/mousemine.xml" /> + <tool file="data_source/ratmine.xml" /> + <tool file="data_source/yeastmine.xml" /> + <tool file="data_source/metabolicmine.xml" /> + <tool file="data_source/worm_modencode.xml" /> + <tool file="data_source/wormbase.xml" /> + <tool file="data_source/wormbase_test.xml" /> + <tool file="data_source/eupathdb.xml" /> + <tool file="data_source/hbvar.xml" /> + <tool file="genomespace/genomespace_file_browser_prod.xml" /> + <tool file="genomespace/genomespace_importer.xml" /> + <tool file="validation/fix_errors.xml" /> + </section> + <section id="send" name="Send Data"> + <tool file="genomespace/genomespace_exporter.xml" /> + </section> + <section id="liftOver" name="Lift-Over"> + <tool file="extract/liftOver_wrapper.xml" /> + </section> + <section id="textutil" name="Text Manipulation"> + <tool file="filters/fixedValueColumn.xml" /> + <tool file="filters/catWrapper.xml" /> + <tool file="filters/cutWrapper.xml" /> + <tool file="filters/mergeCols.xml" /> + <tool file="filters/convert_characters.xml" /> + <tool file="filters/CreateInterval.xml" /> + <tool file="filters/cutWrapper.xml" /> + <tool file="filters/changeCase.xml" /> + <tool file="filters/pasteWrapper.xml" /> + <tool file="filters/remove_beginning.xml" /> + <tool file="filters/randomlines.xml" /> + <tool file="filters/headWrapper.xml" /> + <tool file="filters/tailWrapper.xml" /> + <tool file="filters/trimmer.xml" /> + <tool file="filters/wc_gnu.xml" /> + <tool file="filters/secure_hash_message_digest.xml" /> + </section> + <section id="filter" name="Filter and Sort"> + <tool file="stats/filtering.xml" /> + <tool file="filters/sorter.xml" /> + <tool file="filters/grep.xml" /> + + <label id="gff" text="GFF" /> + <tool file="filters/gff/extract_GFF_Features.xml" /> + <tool file="filters/gff/gff_filter_by_attribute.xml" /> + <tool file="filters/gff/gff_filter_by_feature_count.xml" /> + <tool file="filters/gff/gtf_filter_by_attribute_values_list.xml" /> + </section> + <section id="group" name="Join, Subtract and Group"> + <tool file="filters/joiner.xml" /> + <tool file="filters/compare.xml" /> + <tool file="stats/grouping.xml" /> + </section> + <section id="convert" name="Convert Formats"> + <tool file="filters/axt_to_concat_fasta.xml" /> + <tool file="filters/axt_to_fasta.xml" /> + <tool file="filters/axt_to_lav.xml" /> + <tool file="filters/bed2gff.xml" /> + <tool file="filters/gff2bed.xml" /> + <tool file="filters/lav_to_bed.xml" /> + <tool file="maf/maf_to_bed.xml" /> + <tool file="maf/maf_to_interval.xml" /> + <tool file="maf/maf_to_fasta.xml" /> + <tool file="filters/wiggle_to_simple.xml" /> + <tool file="filters/sff_extractor.xml" /> + <tool file="filters/gtf2bedgraph.xml" /> + <tool file="filters/wig_to_bigwig.xml" /> + <tool file="filters/bed_to_bigbed.xml" /> + </section> + <section id="features" name="Extract Features"> + <tool file="filters/ucsc_gene_bed_to_exon_bed.xml" /> + </section> + <section id="fetchSeq" name="Fetch Sequences"> + <tool file="extract/extract_genomic_dna.xml" /> + </section> + <section id="fetchAlign" name="Fetch Alignments"> + <tool file="maf/interval2maf_pairwise.xml" /> + <tool file="maf/interval2maf.xml" /> + <tool file="maf/maf_split_by_species.xml" /> + <tool file="maf/interval_maf_to_merged_fasta.xml" /> + <tool file="maf/genebed_maf_to_fasta.xml" /> + <tool file="maf/maf_stats.xml" /> + <tool file="maf/maf_thread_for_species.xml" /> + <tool file="maf/maf_limit_to_species.xml" /> + <tool file="maf/maf_limit_size.xml" /> + <tool file="maf/maf_by_block_number.xml" /> + <tool file="maf/maf_reverse_complement.xml" /> + <tool file="maf/maf_filter.xml" /> + </section> + <section id="scores" name="Get Genomic Scores"> + <tool file="filters/wiggle_to_simple.xml" /> + <tool file="stats/aggregate_binned_scores_in_intervals.xml" /> + </section> + <section id="stats" name="Statistics"> + <tool file="stats/gsummary.xml" /> + <tool file="filters/uniq.xml" /> + </section> + <section id="plots" name="Graph/Display Data"> + <tool file="plotting/bar_chart.xml" /> + <tool file="plotting/boxplot.xml" /> + <tool file="visualization/LAJ.xml" /> + <tool file="maf/vcf_to_maf_customtrack.xml" /> + <tool file="mutation/visualize.xml" /> + </section> + <section id="hyphy" name="Evolution"> + <tool file="evolution/codingSnps.xml" /> + <tool file="evolution/add_scores.xml" /> + </section> + <section id="motifs" name="Motif Tools"> + <tool file="meme/meme.xml" /> + <tool file="meme/fimo.xml" /> + </section> + <section id="NGS_QC" name="NGS: QC and manipulation"> + + <label id="fastqcsambam" text="FastQC: fastq/sam/bam" /> + + <label id="illumina" text="Illumina fastq" /> + + <label id="454" text="Roche-454 data" /> + <label id="solid" text="AB-SOLiD data" /> + <tool file="next_gen_conversion/solid2fastq.xml" /> + <tool file="solid_tools/solid_qual_stats.xml" /> + <tool file="solid_tools/solid_qual_boxplot.xml" /> + + <label id="generic_fastq" text="Generic FASTQ manipulation" /> + + <label id="fastx_toolkit" text="FASTX-Toolkit for FASTQ data" /> + </section> + <!-- + Keep this section commented until it includes tools that + will be hosted on test/main. The velvet wrappers have been + included in the distribution but will not be hosted on our + public servers for the current time. + <section name="NGS: Assembly" id="ngs_assembly"> + <label text="Velvet" id="velvet"/> + <tool file="sr_assembly/velvetg.xml" /> + <tool file="sr_assembly/velveth.xml" /> + </section> + --> + <section id="solexa_tools" name="NGS: Mapping"> + <tool file="sr_mapping/bfast_wrapper.xml" /> + <tool file="sr_mapping/PerM.xml" /> + <tool file="sr_mapping/srma_wrapper.xml" /> + <tool file="sr_mapping/mosaik.xml" /> + </section> + <section id="ngs-rna-tools" name="NGS: RNA Analysis"> + + <label id="rna_seq" text="RNA-seq" /> + <label id="filtering" text="Filtering" /> + </section> + <section id="samtools" name="NGS: SAM Tools"> + </section> + <section id="ngs-simulation" name="NGS: Simulation"> + <tool file="ngs_simulation/ngs_simulation.xml" /> + </section> + <section id="hgv" name="Phenotype Association"> + <tool file="evolution/codingSnps.xml" /> + <tool file="evolution/add_scores.xml" /> + <tool file="phenotype_association/sift.xml" /> + <tool file="phenotype_association/linkToGProfile.xml" /> + <tool file="phenotype_association/linkToDavid.xml" /> + <tool file="phenotype_association/ldtools.xml" /> + <tool file="phenotype_association/pass.xml" /> + <tool file="phenotype_association/gpass.xml" /> + <tool file="phenotype_association/beam.xml" /> + <tool file="phenotype_association/lps.xml" /> + <tool file="phenotype_association/master2pg.xml" /> + </section> +</toolbox> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/tool_data_table_conf.xml.sample --- /dev/null +++ b/config/tool_data_table_conf.xml.sample @@ -0,0 +1,63 @@ +<!-- Use the file tool_data_table_conf.xml.oldlocstyle if you don't want to update your loc files as changed in revision 4550:535d276c92bc--> +<tables> + <!-- Locations of all fasta files under genome directory --> + <table name="all_fasta" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/all_fasta.loc" /> + </table> + <!-- Locations of indexes in the BFAST mapper format --> + <table name="bfast_indexes" comment_char="#"> + <columns>value, dbkey, formats, name, path</columns> + <file path="tool-data/bfast_indexes.loc" /> + </table> + <!-- Locations of protein (mega)blast databases --> + <table name="blastdb_p" comment_char="#"> + <columns>value, name, path</columns> + <file path="tool-data/blastdb_p.loc" /> + </table> + <!-- Locations of indexes in the BWA mapper format --> + <table name="bwa_indexes" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/bwa_index.loc" /> + </table> + <!-- Locations of indexes in the BWA color-space mapper format --> + <table name="bwa_indexes_color" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/bwa_index_color.loc" /> + </table> + <!-- Locations of MAF files that have been indexed with bx-python --> + <table name="indexed_maf_files"> + <columns>name, value, dbkey, species</columns> + <file path="tool-data/maf_index.loc" /> + </table> + <!-- Locations of fasta files appropriate for NGS simulation --> + <table name="ngs_sim_fasta" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/ngs_sim_fasta.loc" /> + </table> + <!-- Locations of PerM base index files --> + <table name="perm_base_indexes" comment_char="#"> + <columns>value, name, path</columns> + <file path="tool-data/perm_base_index.loc" /> + </table> + <!-- Locations of PerM color-space index files --> + <table name="perm_color_indexes" comment_char="#"> + <columns>value, name, path</columns> + <file path="tool-data/perm_color_index.loc" /> + </table> + <!-- Location of Picard dict file and other files --> + <table name="picard_indexes" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/picard_index.loc" /> + </table> + <!-- Location of SRMA dict file and other files --> + <table name="srma_indexes" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/picard_index.loc" /> + </table> + <!-- Location of Mosaik files --> + <table name="mosaik_indexes" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/mosaik_index.loc" /> + </table> +</tables> diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/tool_shed_wsgi.ini.sample --- /dev/null +++ b/config/tool_shed_wsgi.ini.sample @@ -0,0 +1,114 @@ +# ---- HTTP Server ---------------------------------------------------------- + +[server:main] + +use = egg:Paste#http +port = 9009 + +# The address on which to listen. By default, only listen to localhost (the tool shed will not +# be accessible over the network). Use '0.0.0.0' to listen on all available network interfaces. +#host = 0.0.0.0 +host = 127.0.0.1 + +use_threadpool = true +threadpool_workers = 10 +# Set the number of seconds a thread can work before you should kill it (assuming it will never finish) to 3 hours. +threadpool_kill_thread_limit = 10800 + +# ---- Galaxy Webapps Community Interface ------------------------------------------------- + +[app:main] + +# Specifies the factory for the universe WSGI application +paste.app_factory = galaxy.webapps.tool_shed.buildapp:app_factory +log_level = DEBUG + +# Database connection +database_file = database/community.sqlite +# You may use a SQLAlchemy connection string to specify an external database instead +#database_connection = postgres:///community_test?host=/var/run/postgresql + +# Where the hgweb.config file is stored. The default is the Galaxy installation directory. +#hgweb_config_dir = None + +# Where tool shed repositories are stored. +file_path = database/community_files +# Temporary storage for additional datasets, this should be shared through the cluster +new_file_path = database/tmp + +# File containing old-style genome builds +#builds_file_path = tool-data/shared/ucsc/builds.txt + +# Session support (beaker) +use_beaker_session = True +session_type = memory +session_data_dir = %(here)s/database/beaker_sessions +session_key = galaxysessions +session_secret = changethisinproduction + +# -- Users and Security + +# Galaxy encodes various internal values when these values will be output in +# some format (for example, in a URL or cookie). You should set a key to be +# used by the algorithm that encodes and decodes these values. It can be any +# string. If left unchanged, anyone could construct a cookie that would grant +# them access to others' sessions. +id_secret = changethisinproductiontoo + +# User authentication can be delegated to an upstream proxy server (usually +# Apache). The upstream proxy should set a REMOTE_USER header in the request. +# Enabling remote user disables regular logins. For more information, see: +# https://wiki.galaxyproject.org/Admin/Config/ApacheProxy +#use_remote_user = False + +# Configuration for debugging middleware +debug = true +use_lint = false + +# NEVER enable this on a public site (even test or QA) +# use_interactive = true + +# this should be a comma-separated list of valid Galaxy users +#admin_users = user1@example.org,user2@example.org + +# Force everyone to log in (disable anonymous access) +require_login = False + +# path to sendmail +sendmail_path = /usr/sbin/sendmail + +# Number of saved tool test results produced by the install and test framework for each repository. +#num_tool_test_results_saved = 5 + +# For use by email messages sent from the tool shed +#smtp_server = smtp.your_tool_shed_server +#email_from = your_tool_shed_email@server + +# The URL linked by the "Support" link in the "Help" menu. +#support_url = https://wiki.galaxyproject.org/Support + +# Write thread status periodically to 'heartbeat.log' (careful, uses disk space rapidly!) +## use_heartbeat = True + +# Profiling middleware (cProfile based) +## use_profile = True + +# Enable creation of Galaxy flavor Docker Image +#enable_galaxy_flavor_docker_image = False + +# Use the new iframe / javascript based layout +use_new_layout = true + +# Show a message box under the masthead. +#message_box_visible = False +#message_box_content = None +#message_box_class = info + +# Serving static files (needed if running standalone) +static_enabled = True +static_cache_time = 360 +static_dir = %(here)s/static/ +static_images_dir = %(here)s/static/images +static_favicon_dir = %(here)s/static/favicon.ico +static_scripts_dir = %(here)s/static/scripts/ +static_style_dir = %(here)s/static/june_2007_style/blue diff -r 344edd250cc9bd38b3370e4be932daa9085e7b6d -r e4daf2910a9989eda0efe2b532adb860999532c3 config/tool_sheds_conf.xml.sample --- /dev/null +++ b/config/tool_sheds_conf.xml.sample @@ -0,0 +1,5 @@ +<?xml version="1.0"?> +<tool_sheds> + <tool_shed name="Galaxy main tool shed" url="http://toolshed.g2.bx.psu.edu/"/> + <tool_shed name="Galaxy test tool shed" url="http://testtoolshed.g2.bx.psu.edu/"/> +</tool_sheds> This diff is so big that we needed to truncate the remainder. https://bitbucket.org/galaxy/galaxy-central/commits/21fb6c2f4349/ Changeset: 21fb6c2f4349 User: natefoo Date: 2014-09-15 20:34:40 Summary: Rename universe_wsgi.ini to galaxy.ini Affected #: 2 files diff -r e4daf2910a9989eda0efe2b532adb860999532c3 -r 21fb6c2f4349c358321b9344b9dbda20264b78fb config/galaxy.ini.sample --- /dev/null +++ b/config/galaxy.ini.sample @@ -0,0 +1,882 @@ +# +# Galaxy is configured by default to be useable in a single-user development +# environment. To tune the application for a multi-user production +# environment, see the documentation at: +# +# http://usegalaxy.org/production +# + +# Throughout this sample configuration file, except where stated otherwise, +# uncommented values override the default if left unset, whereas commented +# values are set to the default value. +# Examples of many of these options are explained in more detail in the wiki: +# +# https://wiki.galaxyproject.org/Admin/Config +# +# Config hackers are encouraged to check there before asking for help. + +# ---- HTTP Server ---------------------------------------------------------- + +# Configuration of the internal HTTP server. + +[server:main] + +# The internal HTTP server to use. Currently only Paste is provided. This +# option is required. +use = egg:Paste#http + +# The port on which to listen. +#port = 8080 + +# The address on which to listen. By default, only listen to localhost (Galaxy +# will not be accessible over the network). Use '0.0.0.0' to listen on all +# available network interfaces. +#host = 127.0.0.1 + +# Use a threadpool for the web server instead of creating a thread for each +# request. +use_threadpool = True + +# Number of threads in the web server thread pool. +#threadpool_workers = 10 + +# Set the number of seconds a thread can work before you should kill it (assuming it will never finish) to 3 hours. +threadpool_kill_thread_limit = 10800 + +# ---- Filters -------------------------------------------------------------- + +# Filters sit between Galaxy and the HTTP server. + +# These filters are disabled by default. They can be enabled with +# 'filter-with' in the [app:main] section below. + +# Define the gzip filter. +[filter:gzip] +use = egg:Paste#gzip + +# Define the proxy-prefix filter. +[filter:proxy-prefix] +use = egg:PasteDeploy#prefix +prefix = /galaxy + +# ---- Galaxy --------------------------------------------------------------- + +# Configuration of the Galaxy application. + +[app:main] + +# -- Application and filtering + +# The factory for the WSGI application. This should not be changed. +paste.app_factory = galaxy.web.buildapp:app_factory + +# If not running behind a proxy server, you may want to enable gzip compression +# to decrease the size of data transferred over the network. If using a proxy +# server, please enable gzip compression there instead. +#filter-with = gzip + +# If running behind a proxy server and Galaxy is served from a subdirectory, +# enable the proxy-prefix filter and set the prefix in the +# [filter:proxy-prefix] section above. +#filter-with = proxy-prefix + +# If proxy-prefix is enabled and you're running more than one Galaxy instance +# behind one hostname, you will want to set this to the same path as the prefix +# in the filter above. This value becomes the "path" attribute set in the +# cookie so the cookies from each instance will not clobber each other. +#cookie_path = None + +# -- Database + +# By default, Galaxy uses a SQLite database at 'database/universe.sqlite'. You +# may use a SQLAlchemy connection string to specify an external database +# instead. This string takes many options which are explained in detail in the +# config file documentation. +#database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE + +# If the server logs errors about not having enough database pool connections, +# you will want to increase these values, or consider running more Galaxy +# processes. +#database_engine_option_pool_size = 5 +#database_engine_option_max_overflow = 10 + +# If using MySQL and the server logs the error "MySQL server has gone away", +# you will want to set this to some positive value (7200 should work). +#database_engine_option_pool_recycle = -1 + +# If large database query results are causing memory or response time issues in +# the Galaxy process, leave the result on the server instead. This option is +# only available for PostgreSQL and is highly recommended. +#database_engine_option_server_side_cursors = False + +# Log all database transactions, can be useful for debugging and performance +# profiling. Logging is done via Python's 'logging' module under the qualname +# 'galaxy.model.orm.logging_connection_proxy' +#database_query_profiling_proxy = False + +# By default, Galaxy will use the same database to track user data and +# tool shed install data. There are many situtations in which it is +# valuable to seperate these - for instance bootstrapping fresh Galaxy +# instances with pretested installs. The following optin can be used to +# separate the tool shed install database (all other options listed above +# but prefixed with install_ are also available). +#install_database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE + +# -- Files and directories + +# Dataset files are stored in this directory. +#file_path = database/files + +# Temporary files are stored in this directory. +#new_file_path = database/tmp + +# Tool config files, defines what tools are available in Galaxy. +# Tools can be locally developed or installed from Galaxy tool sheds. +#tool_config_file = tool_conf.xml,shed_tool_conf.xml + +# Enable / disable checking if any tools defined in the above non-shed tool_config_files +# (i.e., tool_conf.xml) have been migrated from the Galaxy code distribution to the Tool +# Shed. This setting should generally be set to False only for development Galaxy environments +# that are often rebuilt from scratch where migrated tools do not need to be available in the +# Galaxy tool panel. If the following setting remains commented, the default setting will be True. +#check_migrate_tools = True + +# Tool config maintained by tool migration scripts. If you use the migration +# scripts to install tools that have been migrated to the tool shed upon a new +# release, they will be added to this tool config file. +#migrated_tools_config = migrated_tools_conf.xml + +# File that contains the XML section and tool tags from all tool panel config +# files integrated into a single file that defines the tool panel layout. This +# file can be changed by the Galaxy administrator to alter the layout of the +# tool panel. If not present, Galaxy will create it. +#integrated_tool_panel_config = integrated_tool_panel.xml + +# Default path to the directory containing the tools defined in tool_conf.xml. +# Other tool config files must include the tool_path as an attribute in the <toolbox> tag. +#tool_path = tools + +# Path to the directory in which tool dependencies are placed. This is used by +# the tool shed to install dependencies and can also be used by administrators +# to manually install or link to dependencies. For details, see: +# https://wiki.galaxyproject.org/Admin/Config/ToolDependencies +# If this option is not set to a valid path, installing tools with dependencies +# from the Tool Shed will fail. +#tool_dependency_dir = None + +# File containing the Galaxy Tool Sheds that should be made available to +# install from in the admin interface +#tool_sheds_config_file = tool_sheds_conf.xml + +# Enable automatic polling of relative tool sheds to see if any updates +# are available for installed repositories. Ideally only one Galaxy +# server process should be able to check for repository updates. The +# setting for hours_between_check should be an integer between 1 and 24. +#enable_tool_shed_check = False +#hours_between_check = 12 + +# Enable use of an in-memory registry with bi-directional relationships +# between repositories (i.e., in addition to lists of dependencies for a +# repository, keep an in-memory registry of dependent items for each repository. +#manage_dependency_relationships = False + +# XML config file that contains data table entries for the ToolDataTableManager. This file is manually +# maintained by the Galaxy administrator. +#tool_data_table_config_path = tool_data_table_conf.xml + +# XML config file that contains additional data table entries for the ToolDataTableManager. This file +# is automatically generated based on the current installed tool shed repositories that contain valid +# tool_data_table_conf.xml.sample files. At the time of installation, these entries are automatically +# added to the following file, which is parsed and applied to the ToolDataTableManager at server start up. +#shed_tool_data_table_config = shed_tool_data_table_conf.xml + +# Directory where data used by tools is located, see the samples in that +# directory and the wiki for help: +# https://wiki.galaxyproject.org/Admin/DataIntegration +#tool_data_path = tool-data + +# File containing old-style genome builds +#builds_file_path = tool-data/shared/ucsc/builds.txt + +# Directory where chrom len files are kept, currently mainly used by trackster +#len_file_path = tool-data/shared/ucsc/chrom + +# Datatypes config file, defines what data (file) types are available in +# Galaxy. +#datatypes_config_file = config/datatypes_conf.xml + +# Disable the 'Auto-detect' option for file uploads +#datatypes_disable_auto = False + +# Visualizations config directory: where to look for individual visualization plugins. +# The path is relative to the Galaxy root dir. To use an absolute path begin the path +# with '/'. Defaults to "config/plugins/visualizations". +#visualization_plugins_directory = config/plugins/visualizations + +# Each job is given a unique empty directory as its current working directory. +# This option defines in what parent directory those directories will be +# created. +#job_working_directory = database/job_working_directory + +# If using a cluster, Galaxy will write job scripts and stdout/stderr to this +# directory. +#cluster_files_directory = database/pbs + +# Mako templates are compiled as needed and cached for reuse, this directory is +# used for the cache +#template_cache_path = database/compiled_templates + +# Citation related caching. Tool citations information maybe fetched from +# external sources such as http://dx.doi.org/ by Galaxy - the following parameters +# can be used to control the caching used to store this information. +#citation_cache_type = file +#citation_cache_data_dir = database/citations/data +#citation_cache_lock_dir = database/citations/lock + +# External service types config file, defines what types of external_services configurations +# are available in Galaxy. +#external_service_type_config_file = config/external_service_types_conf.xml + +# Path to the directory containing the external_service_types defined in the config. +#external_service_type_path = external_service_types + +# Tools with a number of outputs not known until runtime can write these +# outputs to a directory for collection by Galaxy when the job is done. +# Previously, this directory was new_file_path, but using one global directory +# can cause performance problems, so using job_working_directory ('.' or cwd +# when a job is run) is encouraged. By default, both are checked to avoid +# breaking existing tools. +#collect_outputs_from = new_file_path,job_working_directory + +# -- Data Storage (Object Store) +# +# Configuration file for the object store +# If this is set and exists, it overrides any other objectstore settings. +# object_store_config_file = object_store_conf.xml + +# Object store backend module (valid options are: disk, s3, swift, irods, +# distributed, hierarchical) +#object_store = disk + +# *Extremely* old Galaxy instances created datasets at the root of the +# `file_path` defined above. If your Galaxy instance has datasets at the root +# (instead of in directories composed by hashing the dataset id), you should +# enable this option to allow Galaxy to find them. +#object_store_check_old_style = False + +# Credentials used by certain (s3, swift) object store backends +#os_access_key = <your cloud object store access key> +#os_secret_key = <your cloud object store secret key> +#os_bucket_name = <name of an existing object store bucket or container> + +# If using 'swift' object store, you must specify the following connection +# properties +#os_host = swift.rc.nectar.org.au +#os_port = 8888 +#os_is_secure = False +#os_conn_path = / + +# Reduced redundancy can be used only with the 's3' object store +#os_use_reduced_redundancy = False + +# Path to cache directory for object store backends that utilize a cache (s3, +# swift, irods) +#object_store_cache_path = database/files/ + +# Size (in GB) that the cache used by object store should be limited to. +# If the value is not specified, the cache size will be limited only by the +# file system size. +#object_store_cache_size = 100 + +# Configuration file for the distributed object store, if object_store = +# distributed. See the sample at distributed_object_store_conf.xml.sample +#distributed_object_store_config_file = None + + +# -- Mail and notification + +# Galaxy sends mail for various things: Subscribing users to the mailing list +# if they request it, emailing password resets, notification from the Galaxy +# Sample Tracking system, reporting dataset errors, and sending activation emails. +# To do this, it needs to send mail through an SMTP server, which you may define here (host:port). +# Galaxy will automatically try STARTTLS but will continue upon failure. +#smtp_server = None + +# If your SMTP server requires a username and password, you can provide them +# here (password in cleartext here, but if your server supports STARTTLS it +# will be sent over the network encrypted). +#smtp_username = None +#smtp_password = None + +# If your SMTP server requires SSL from the beginning of the connection +# smtp_ssl = False + +# On the user registration form, users may choose to join the mailing list. +# This is the address of the list they'll be subscribed to. +#mailing_join_addr = galaxy-announce-join@bx.psu.edu + +# Datasets in an error state include a link to report the error. Those reports +# will be sent to this address. Error reports are disabled if no address is set. +# Also this email is shown as a contact to user in case of Galaxy misconfiguration and other events user may encounter. +#error_email_to = None + +# Activation email is used as a sender ('from' field) for the account activation mail. +# We recommend using string in the following format: Galaxy Project <galaxy-no-reply@example.com> +#activation_email = None + +# URL of the support resource for the galaxy instance. Used in activation emails. +#instance_resource_url = http://wiki.galaxyproject.org/ + +# E-mail domains blacklist is used for filtering out users that are using disposable email address +# during the registration. If their address domain matches any domain in the BL they are refused the registration. +#blacklist_file = config/disposable_email_blacklist.conf + +# Registration warning message is used to discourage people from registering multiple accounts. Applies mostly for the main Galaxy instance. +# If no message specified the warning box will not be shown. +#registration_warning_message = Please register only one account - we provide this service free of charge and have limited computational resources. Multi-accounts are tracked and will be subjected to account termination and data deletion. + +# When users opt to reset passwords, new ones are created, this option +# specifies the length of these passwords. +#reset_password_length = 15 + + +# -- Account activation + +# This is user account activation feature global flag. If set to "False" the rest of the Account +# activation configuration is ignored and user activation is disabled (a.k.a. accounts are active since registration). +# Note the activation is also not working in case the smtp server is not defined. +#user_activation_on = False + +# Activation grace period. Activation is not forced (login is not disabled) until +# grace period has passed. Users under grace period can't run jobs (see inactivity_box_content). +# In hours. Default is 3. Enter 0 to disable grace period. +# Users with OpenID logins have grace period forever. +#activation_grace_period = 0 + +# Used for warning box for inactive accounts (unable to run jobs). +# In use only if activation_grace_period is set. +#inactivity_box_content = Your account has not been activated yet. Feel free to browse around and see what's available, but you won't be able to upload data or run jobs until you have verified your email address. + + +# -- Display sites + +# Galaxy can display data at various external browsers. These options specify +# which browsers should be available. URLs and builds available at these +# browsers are defined in the specifield files. + +# If use_remote_user = True, display application servers will be denied access +# to Galaxy and so displaying datasets in these sites will fail. +# display_servers contains a list of hostnames which should be allowed to +# bypass security to display datasets. Please be aware that there are security +# implications if this is allowed. More details (including required changes to +# the proxy server config) are available in the Apache proxy documentation on +# the wiki. +# +# The list of servers in this sample config are for the UCSC Main, Test and +# Archaea browsers, but the default if left commented is to not allow any +# display sites to bypass security (you must uncomment the line below to allow +# them). +#display_servers = hgw1.cse.ucsc.edu,hgw2.cse.ucsc.edu,hgw3.cse.ucsc.edu,hgw4.cse.ucsc.edu,hgw5.cse.ucsc.edu,hgw6.cse.ucsc.edu,hgw7.cse.ucsc.edu,hgw8.cse.ucsc.edu,lowepub.cse.ucsc.edu + +# To disable the old-style display applications that are hardcoded into datatype classes, +# set enable_old_display_applications = False. +# This may be desirable due to using the new-style, XML-defined, display applications that +# have been defined for many of the datatypes that have the old-style. +# There is also a potential security concern with the old-style applications, +# where a malicious party could provide a link that appears to reference the Galaxy server, +# but contains a redirect to a third-party server, tricking a Galaxy user to access said +# site. +#enable_old_display_applications = True + +# -- Next gen LIMS interface on top of existing Galaxy Sample/Request management code. + +use_nglims = False +nglims_config_file = tool-data/nglims.yaml + +# -- UI Localization + +# Show a message box under the masthead. +#message_box_visible = False +#message_box_content = None +#message_box_class = info + +# Append "/{brand}" to the "Galaxy" text in the masthead. +#brand = None + +# Format string used when showing date and time information. +# The string may contain: +# - the directives used by Python time.strftime() function (see http://docs.python.org/2/library/time.html#time.strftime ), +# - $locale (complete format string for the server locale), +# - $iso8601 (complete format string as specified by ISO 8601 international standard). +# pretty_datetime_format = $locale (UTC) + +# The URL of the page to display in Galaxy's middle pane when loaded. This can be +# an absolute or relative URL. +#welcome_url = /static/welcome.html + +# The URL linked by the "Galaxy/brand" text. +#logo_url = / + +# The URL linked by the "Wiki" link in the "Help" menu. +#wiki_url = http://wiki.galaxyproject.org/ + +# The URL linked by the "Support" link in the "Help" menu. +#support_url = http://wiki.galaxyproject.org/Support + +# The URL linked by the "How to Cite Galaxy" link in the "Help" menu. +#citation_url = http://wiki.galaxyproject.org/CitingGalaxy + +#The URL linked by the "Search" link in the "Help" menu. +#search_url = http://galaxyproject.org/search/usegalaxy/ + +#The URL linked by the "Mailing Lists" link in the "Help" menu. +#mailing_lists_url = http://wiki.galaxyproject.org/MailingLists + +#The URL linked by the "Videos" link in the "Help" menu. +#videos_url = http://vimeo.com/galaxyproject + +# The URL linked by the "Terms and Conditions" link in the "Help" menu, as well +# as on the user registration and login forms and in the activation emails. +#terms_url = None + +# The URL linked by the "Galaxy Q&A" link in the "Help" menu +# The Galaxy Q&A site is under development; when the site is done, this URL +# will be set and uncommented. +#qa_url = + +# Serve static content, which must be enabled if you're not serving it via a +# proxy server. These options should be self explanatory and so are not +# documented individually. You can use these paths (or ones in the proxy +# server) to point to your own styles. +static_enabled = True +static_cache_time = 360 +static_dir = %(here)s/static/ +static_images_dir = %(here)s/static/images +static_favicon_dir = %(here)s/static/favicon.ico +static_scripts_dir = %(here)s/static/scripts/ +static_style_dir = %(here)s/static/june_2007_style/blue +static_robots_txt = %(here)s/static/robots.txt + +# Pack javascript at launch (/static/scripts/*.js) +# This only happens if the modified timestamp of the source .js is newer +# than the version (if it exists) in /static/scripts/packed/ +# Note that this requires java > 1.4 for executing yuicompressor.jar +#pack_scripts = False + +# Cloud Launch + +#enable_cloud_launch = False +#cloudlaunch_default_ami = ami-a7dbf6ce + +# Incremental Display Options + +#display_chunk_size = 65536 + +# -- Advanced proxy features + +# For help on configuring the Advanced proxy features, see: +# http://usegalaxy.org/production + +# Apache can handle file downloads (Galaxy-to-user) via mod_xsendfile. Set +# this to True to inform Galaxy that mod_xsendfile is enabled upstream. +#apache_xsendfile = False + +# The same download handling can be done by nginx using X-Accel-Redirect. This +# should be set to the path defined in the nginx config as an internal redirect +# with access to Galaxy's data files (see documentation linked above). +#nginx_x_accel_redirect_base = False + +# nginx can make use of mod_zip to create zip files containing multiple library +# files. If using X-Accel-Redirect, this can be the same value as that option. +#nginx_x_archive_files_base = False + +# If using compression in the upstream proxy server, use this option to disable +# gzipping of library .tar.gz and .zip archives, since the proxy server will do +# it faster on the fly. +#upstream_gzip = False + +# nginx can also handle file uploads (user-to-Galaxy) via nginx_upload_module. +# Configuration for this is complex and explained in detail in the +# documentation linked above. The upload store is a temporary directory in +# which files uploaded by the upload module will be placed. +#nginx_upload_store = False + +# This value overrides the action set on the file upload form, e.g. the web +# path where the nginx_upload_module has been configured to intercept upload +# requests. +#nginx_upload_path = False + +# -- Logging and Debugging + +# Verbosity of console log messages. Acceptable values can be found here: +# http://docs.python.org/library/logging.html#logging-levels +#log_level = DEBUG + +# Print database operations to the server log (warning, quite verbose!). +#database_engine_option_echo = False + +# Print database pool operations to the server log (warning, quite verbose!). +#database_engine_option_echo_pool = False + +# Turn on logging of application events and some user events to the database. +#log_events = True + +# Turn on logging of user actions to the database. Actions currently logged are +# grid views, tool searches, and use of "recently" used tools menu. The +# log_events and log_actions functionality will eventually be merged. +#log_actions = True + +# Sanitize All HTML Tool Output +# By default, all tool output served as 'text/html' will be sanitized +# thoroughly. This can be disabled if you have special tools that require +# unaltered output. +#sanitize_all_html = True + +# By default Galaxy will serve non-HTML tool output that may potentially +# contain browser executable JavaScript content as plain text. This will for +# instance cause SVG datasets to not render properly and so may be disabled +# by setting the following option to True. +#serve_xss_vulnerable_mimetypes = False + +# Debug enables access to various config options useful for development and +# debugging: use_lint, use_profile, use_printdebug and use_interactive. It +# also causes the files used by PBS/SGE (submission script, output, and error) +# to remain on disk after the job is complete. Debug mode is disabled if +# commented, but is uncommented by default in the sample config. +debug = True + +# Check for WSGI compliance. +#use_lint = False + +# Run the Python profiler on each request. +#use_profile = False + +# Intercept print statements and show them on the returned page. +#use_printdebug = True + +# Enable live debugging in your browser. This should NEVER be enabled on a +# public site. Enabled in the sample config for development. +use_interactive = True + +# Write thread status periodically to 'heartbeat.log', (careful, uses disk +# space rapidly!). Useful to determine why your processes may be consuming a +# lot of CPU. +#use_heartbeat = False + +# Enable the memory debugging interface (careful, negatively impacts server +# performance). +#use_memdump = False + +# -- Data Libraries + +# These library upload options are described in much more detail in the wiki: +# https://wiki.galaxyproject.org/Admin/DataLibraries/UploadingLibraryFiles + +# Add an option to the library upload form which allows administrators to +# upload a directory of files. +#library_import_dir = None + +# Add an option to the library upload form which allows authorized +# non-administrators to upload a directory of files. The configured directory +# must contain sub-directories named the same as the non-admin user's Galaxy +# login ( email ). The non-admin user is restricted to uploading files or +# sub-directories of files contained in their directory. +#user_library_import_dir = None + +# Add an option to the admin library upload tool allowing admins to paste +# filesystem paths to files and directories in a box, and these paths will be +# added to a library. Set to True to enable. Please note the security +# implication that this will give Galaxy Admins access to anything your Galaxy +# user has access to. +#allow_library_path_paste = False + +# Users may choose to download multiple files from a library in an archive. By +# default, Galaxy allows users to select from a few different archive formats +# if testing shows that Galaxy is able to create files using these formats. +# Specific formats can be disabled with this option, separate more than one +# format with commas. Available formats are currently 'zip', 'gz', and 'bz2'. +#disable_library_comptypes = + +# Some sequencer integration features in beta allow you to automatically +# transfer datasets. This is done using a lightweight transfer manager which +# runs outside of Galaxy (but is spawned by it automatically). Galaxy will +# communicate with this manager over the port specified here. +#transfer_manager_port = 8163 + +# Search data libraries with whoosh +#enable_whoosh_library_search = True +# Whoosh indexes are stored in this directory. +#whoosh_index_dir = database/whoosh_indexes + +# Search data libraries with lucene +#enable_lucene_library_search = False +# maxiumum file size to index for searching, in MB +#fulltext_max_size = 500 +#fulltext_noindex_filetypes = bam,sam,wig,bigwig,fasta,fastq,fastqsolexa,fastqillumina,fastqsanger +# base URL of server providing search functionality using lucene +#fulltext_url = http://localhost:8081 + +# -- Users and Security + +# Galaxy encodes various internal values when these values will be output in +# some format (for example, in a URL or cookie). You should set a key to be +# used by the algorithm that encodes and decodes these values. It can be any +# string. If left unchanged, anyone could construct a cookie that would grant +# them access to others' sessions. +#id_secret = USING THE DEFAULT IS NOT SECURE! + +# User authentication can be delegated to an upstream proxy server (usually +# Apache). The upstream proxy should set a REMOTE_USER header in the request. +# Enabling remote user disables regular logins. For more information, see: +# https://wiki.galaxyproject.org/Admin/Config/ApacheProxy +#use_remote_user = False + +# If use_remote_user is enabled and your external authentication +# method just returns bare usernames, set a default mail domain to be appended +# to usernames, to become your Galaxy usernames (email addresses). +#remote_user_maildomain = None + +# If use_remote_user is enabled, the header that the upstream proxy provides +# the remote username in defaults to HTTP_REMOTE_USER (the 'HTTP_' is prepended +# by WSGI). This option allows you to change the header. Note, you still need +# to prepend 'HTTP_' to the header in this option, but your proxy server should +# *not* include 'HTTP_' at the beginning of the header name. +#remote_user_header = HTTP_REMOTE_USER + +# If use_remote_user is enabled, you can set this to a URL that will log your +# users out. +#remote_user_logout_href = None + +# If your proxy and/or authentication source does not normalize e-mail +# addresses or user names being passed to Galaxy - set the following option +# to True to force these to lower case. +#normalize_remote_user_email = False + +# Administrative users - set this to a comma-separated list of valid Galaxy +# users (email addresses). These users will have access to the Admin section +# of the server, and will have access to create users, groups, roles, +# libraries, and more. For more information, see: +# https://wiki.galaxyproject.org/Admin/Interface +#admin_users = None + +# Force everyone to log in (disable anonymous access). +#require_login = False + +# Allow unregistered users to create new accounts (otherwise, they will have to +# be created by an admin). +#allow_user_creation = True + +# Allow administrators to delete accounts. +#allow_user_deletion = False + +# Allow administrators to log in as other users (useful for debugging) +#allow_user_impersonation = False + +# Allow users to remove their datasets from disk immediately (otherwise, +# datasets will be removed after a time period specified by an administrator in +# the cleanup scripts run via cron) +#allow_user_dataset_purge = False + +# By default, users' data will be public, but setting this to True will cause +# it to be private. Does not affect existing users and data, only ones created +# after this option is set. Users may still change their default back to +# public. +#new_user_dataset_access_role_default_private = False + +# -- Beta features + +# Enable Galaxy to communicate directly with a sequencer +#enable_sequencer_communication = False + +# Enable authentication via OpenID. Allows users to log in to their Galaxy +# account by authenticating with an OpenID provider. +#enable_openid = False +#openid_config_file = openid_conf.xml +#openid_consumer_cache_path = database/openid_consumer_cache + +# Optional list of email addresses of API users who can make calls on behalf of +# other users +#api_allow_run_as = None + +# Master key that allows many API admin actions to be used without actually +# having a defined admin user in the database/config. Only set this if you need +# to bootstrap Galaxy, you probably do not want to set this on public servers. +#master_api_key = changethis + +# Enable tool tags (associating tools with tags). This has its own option +# since its implementation has a few performance implications on startup for +# large servers. +#enable_tool_tags = False + +# Enable a feature when running workflows. When enabled, default datasets +# are selected for "Set at Runtime" inputs from the history such that the +# same input will not be selected twice, unless there are more inputs than +# compatible datasets in the history. +# When False, the most recently added compatible item in the history will +# be used for each "Set at Runtime" input, independent of others in the Workflow +#enable_unique_workflow_defaults = False + +# The URL to the myExperiment instance being used (omit scheme but include port) +#myexperiment_url = www.myexperiment.org:80 + +# Enable Galaxy's "Upload via FTP" interface. You'll need to install and +# configure an FTP server (we've used ProFTPd since it can use Galaxy's +# database for authentication) and set the following two options. + +# This should point to a directory containing subdirectories matching users' +# email addresses, where Galaxy will look for files. +#ftp_upload_dir = None + +# This should be the hostname of your FTP server, which will be provided to +# users in the help text. +#ftp_upload_site = None + +# Enable enforcement of quotas. Quotas can be set from the Admin interface. +#enable_quotas = False + +# This option allows users to see the full path of datasets via the "View +# Details" option in the history. Administrators can always see this. +#expose_dataset_path = False + +# Data manager configuration options +# Allow non-admin users to view available Data Manager options +#enable_data_manager_user_view = False +# File where Data Managers are configured +#data_manager_config_file = config/data_manager_conf.xml +# File where Tool Shed based Data Managers are configured +#shed_data_manager_config_file = shed_data_manager_conf.xml +# Directory to store Data Manager based tool-data; defaults to tool_data_path +#galaxy_data_manager_data_path = tool-data + +# -- Job Execution + +# To increase performance of job execution and the web interface, you can +# separate Galaxy into multiple processes. There are more than one way to do +# this, and they are explained in detail in the documentation: +# +# https://wiki.galaxyproject.org/Admin/Config/Performance/Scaling + +# By default, Galaxy manages and executes jobs from within a single process and +# notifies itself of new jobs via in-memory queues. Jobs are run locally on +# the system on which Galaxy is started. Advanced job running capabilities can +# be configured through the job configuration file. +#job_config_file = job_conf.xml + +# In multiprocess configurations, notification between processes about new jobs +# is done via the database. In single process configurations, this is done in +# memory, which is a bit quicker. Galaxy tries to automatically determine +# which method it should used based on your handler configuration in the job +# config file, but you can explicitly override this behavior by setting the +# following option to True or False. +#track_jobs_in_database = None + +# This enables splitting of jobs into tasks, if specified by the particular tool config. +# This is a new feature and not recommended for production servers yet. +#use_tasked_jobs = False +#local_task_queue_workers = 2 + +# Enable job recovery (if Galaxy is restarted while cluster jobs are running, +# it can "recover" them when it starts). This is not safe to use if you are +# running more than one Galaxy server using the same database. +#enable_job_recovery = True + +# Although it is fairly reliable, setting metadata can occasionally fail. In +# these instances, you can choose to retry setting it internally or leave it in +# a failed state (since retrying internally may cause the Galaxy process to be +# unresponsive). If this option is set to False, the user will be given the +# option to retry externally, or set metadata manually (when possible). +#retry_metadata_internally = True + +# If (for example) you run on a cluster and your datasets (by default, +# database/files/) are mounted read-only, this option will override tool output +# paths to write outputs to the working directory instead, and the job manager +# will move the outputs to their proper place in the dataset directory on the +# Galaxy server after the job completes. +#outputs_to_working_directory = False + +# If your network filesystem's caching prevents the Galaxy server from seeing +# the job's stdout and stderr files when it completes, you can retry reading +# these files. The job runner will retry the number of times specified below, +# waiting 1 second between tries. For NFS, you may want to try the -noac mount +# option (Linux) or -actimeo=0 (Solaris). +#retry_job_output_collection = 0 + +# Clean up various bits of jobs left on the filesystem after completion. These +# bits include the job working directory, external metadata temporary files, +# and DRM stdout and stderr files (if using a DRM). Possible values are: +# always, onsuccess, never +#cleanup_job = always + +# File to source to set up the environment when running jobs. By default, the +# environment in which the Galaxy server starts is used when running jobs +# locally, and the environment set up per the DRM's submission method and +# policy is used when running jobs on a cluster (try testing with `qsub` on the +# command line). environment_setup_file can be set to the path of a file on +# the cluster that should be sourced by the user to set up the environment +# prior to running tools. This can be especially useful for running jobs as +# the actual user, to remove the need to configure each user's environment +# individually. +#environment_setup_file = None + + +# Optional file containing job resource data entry fields definition. +# These fields will be presented to users in the tool forms and allow them to +# overwrite default job resources such as number of processors, memory, and walltime. +#job_resource_params_file = job_resource_params_conf.xml + +# If using job concurrency limits (configured in job_config_file), several +# extra database queries must be performed to determine the number of jobs a +# user has dispatched to a given destination. By default, these queries will +# happen for every job that is waiting to run, but if cache_user_job_count is +# set to True, it will only happen once per iteration of the handler queue. +# Although better for performance due to reduced queries, the tradeoff is a +# greater possibility that jobs will be dispatched past the configured limits +# if running many handlers. +#cache_user_job_count = False + +# ToolBox filtering +# Modules from lib/galaxy/tools/filters/ can be specified in the following lines. +# tool_* filters will be applied for all users and can not be changed by them. +# user_tool_* filters will be shown under user preferences and can be toogled +# on and off by runtime +#tool_filters +#tool_label_filters +#tool_section_filters +#user_tool_filters = examples:restrict_upload_to_admins, examples:restrict_encode +#user_tool_section_filters = examples:restrict_text +#user_tool_label_filters = examples:restrict_upload_to_admins, examples:restrict_encode + +# Galaxy Application Internal Message Queue + +# Galaxy uses AMQP internally TODO more documentation on what for. +# For examples, see http://ask.github.io/kombu/userguide/connections.html +# +# Without specifying anything here, galaxy will first attempt to use your +# specified database_connection above. If that's not specified either, Galaxy +# will automatically create and use a separate sqlite database located in your +# <galaxy>/database folder (indicated in the commented out line below). + +#amqp_internal_connection = sqlalchemy+sqlite:///./database/control.sqlite?isolation_level=IMMEDIATE + + + +# ---- Galaxy External Message Queue ------------------------------------------------- + +# Galaxy uses AMQ protocol to receive messages from external sources like +# bar code scanners. Galaxy has been tested against RabbitMQ AMQP implementation. +# For Galaxy to receive messages from a message queue the RabbitMQ server has +# to be set up with a user account and other parameters listed below. The 'host' +# and 'port' fields should point to where the RabbitMQ server is running. + +[galaxy_amqp] + +#host = 127.0.0.1 +#port = 5672 +#userid = galaxy +#password = galaxy +#virtual_host = galaxy_messaging_engine +#queue = galaxy_queue +#exchange = galaxy_exchange +#routing_key = bar_code_scanner +#rabbitmqctl_path = /path/to/rabbitmqctl + diff -r e4daf2910a9989eda0efe2b532adb860999532c3 -r 21fb6c2f4349c358321b9344b9dbda20264b78fb config/universe_wsgi.ini.sample --- a/config/universe_wsgi.ini.sample +++ /dev/null @@ -1,882 +0,0 @@ -# -# Galaxy is configured by default to be useable in a single-user development -# environment. To tune the application for a multi-user production -# environment, see the documentation at: -# -# http://usegalaxy.org/production -# - -# Throughout this sample configuration file, except where stated otherwise, -# uncommented values override the default if left unset, whereas commented -# values are set to the default value. -# Examples of many of these options are explained in more detail in the wiki: -# -# https://wiki.galaxyproject.org/Admin/Config -# -# Config hackers are encouraged to check there before asking for help. - -# ---- HTTP Server ---------------------------------------------------------- - -# Configuration of the internal HTTP server. - -[server:main] - -# The internal HTTP server to use. Currently only Paste is provided. This -# option is required. -use = egg:Paste#http - -# The port on which to listen. -#port = 8080 - -# The address on which to listen. By default, only listen to localhost (Galaxy -# will not be accessible over the network). Use '0.0.0.0' to listen on all -# available network interfaces. -#host = 127.0.0.1 - -# Use a threadpool for the web server instead of creating a thread for each -# request. -use_threadpool = True - -# Number of threads in the web server thread pool. -#threadpool_workers = 10 - -# Set the number of seconds a thread can work before you should kill it (assuming it will never finish) to 3 hours. -threadpool_kill_thread_limit = 10800 - -# ---- Filters -------------------------------------------------------------- - -# Filters sit between Galaxy and the HTTP server. - -# These filters are disabled by default. They can be enabled with -# 'filter-with' in the [app:main] section below. - -# Define the gzip filter. -[filter:gzip] -use = egg:Paste#gzip - -# Define the proxy-prefix filter. -[filter:proxy-prefix] -use = egg:PasteDeploy#prefix -prefix = /galaxy - -# ---- Galaxy --------------------------------------------------------------- - -# Configuration of the Galaxy application. - -[app:main] - -# -- Application and filtering - -# The factory for the WSGI application. This should not be changed. -paste.app_factory = galaxy.web.buildapp:app_factory - -# If not running behind a proxy server, you may want to enable gzip compression -# to decrease the size of data transferred over the network. If using a proxy -# server, please enable gzip compression there instead. -#filter-with = gzip - -# If running behind a proxy server and Galaxy is served from a subdirectory, -# enable the proxy-prefix filter and set the prefix in the -# [filter:proxy-prefix] section above. -#filter-with = proxy-prefix - -# If proxy-prefix is enabled and you're running more than one Galaxy instance -# behind one hostname, you will want to set this to the same path as the prefix -# in the filter above. This value becomes the "path" attribute set in the -# cookie so the cookies from each instance will not clobber each other. -#cookie_path = None - -# -- Database - -# By default, Galaxy uses a SQLite database at 'database/universe.sqlite'. You -# may use a SQLAlchemy connection string to specify an external database -# instead. This string takes many options which are explained in detail in the -# config file documentation. -#database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE - -# If the server logs errors about not having enough database pool connections, -# you will want to increase these values, or consider running more Galaxy -# processes. -#database_engine_option_pool_size = 5 -#database_engine_option_max_overflow = 10 - -# If using MySQL and the server logs the error "MySQL server has gone away", -# you will want to set this to some positive value (7200 should work). -#database_engine_option_pool_recycle = -1 - -# If large database query results are causing memory or response time issues in -# the Galaxy process, leave the result on the server instead. This option is -# only available for PostgreSQL and is highly recommended. -#database_engine_option_server_side_cursors = False - -# Log all database transactions, can be useful for debugging and performance -# profiling. Logging is done via Python's 'logging' module under the qualname -# 'galaxy.model.orm.logging_connection_proxy' -#database_query_profiling_proxy = False - -# By default, Galaxy will use the same database to track user data and -# tool shed install data. There are many situtations in which it is -# valuable to seperate these - for instance bootstrapping fresh Galaxy -# instances with pretested installs. The following optin can be used to -# separate the tool shed install database (all other options listed above -# but prefixed with install_ are also available). -#install_database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE - -# -- Files and directories - -# Dataset files are stored in this directory. -#file_path = database/files - -# Temporary files are stored in this directory. -#new_file_path = database/tmp - -# Tool config files, defines what tools are available in Galaxy. -# Tools can be locally developed or installed from Galaxy tool sheds. -#tool_config_file = tool_conf.xml,shed_tool_conf.xml - -# Enable / disable checking if any tools defined in the above non-shed tool_config_files -# (i.e., tool_conf.xml) have been migrated from the Galaxy code distribution to the Tool -# Shed. This setting should generally be set to False only for development Galaxy environments -# that are often rebuilt from scratch where migrated tools do not need to be available in the -# Galaxy tool panel. If the following setting remains commented, the default setting will be True. -#check_migrate_tools = True - -# Tool config maintained by tool migration scripts. If you use the migration -# scripts to install tools that have been migrated to the tool shed upon a new -# release, they will be added to this tool config file. -#migrated_tools_config = migrated_tools_conf.xml - -# File that contains the XML section and tool tags from all tool panel config -# files integrated into a single file that defines the tool panel layout. This -# file can be changed by the Galaxy administrator to alter the layout of the -# tool panel. If not present, Galaxy will create it. -#integrated_tool_panel_config = integrated_tool_panel.xml - -# Default path to the directory containing the tools defined in tool_conf.xml. -# Other tool config files must include the tool_path as an attribute in the <toolbox> tag. -#tool_path = tools - -# Path to the directory in which tool dependencies are placed. This is used by -# the tool shed to install dependencies and can also be used by administrators -# to manually install or link to dependencies. For details, see: -# https://wiki.galaxyproject.org/Admin/Config/ToolDependencies -# If this option is not set to a valid path, installing tools with dependencies -# from the Tool Shed will fail. -#tool_dependency_dir = None - -# File containing the Galaxy Tool Sheds that should be made available to -# install from in the admin interface -#tool_sheds_config_file = tool_sheds_conf.xml - -# Enable automatic polling of relative tool sheds to see if any updates -# are available for installed repositories. Ideally only one Galaxy -# server process should be able to check for repository updates. The -# setting for hours_between_check should be an integer between 1 and 24. -#enable_tool_shed_check = False -#hours_between_check = 12 - -# Enable use of an in-memory registry with bi-directional relationships -# between repositories (i.e., in addition to lists of dependencies for a -# repository, keep an in-memory registry of dependent items for each repository. -#manage_dependency_relationships = False - -# XML config file that contains data table entries for the ToolDataTableManager. This file is manually -# maintained by the Galaxy administrator. -#tool_data_table_config_path = tool_data_table_conf.xml - -# XML config file that contains additional data table entries for the ToolDataTableManager. This file -# is automatically generated based on the current installed tool shed repositories that contain valid -# tool_data_table_conf.xml.sample files. At the time of installation, these entries are automatically -# added to the following file, which is parsed and applied to the ToolDataTableManager at server start up. -#shed_tool_data_table_config = shed_tool_data_table_conf.xml - -# Directory where data used by tools is located, see the samples in that -# directory and the wiki for help: -# https://wiki.galaxyproject.org/Admin/DataIntegration -#tool_data_path = tool-data - -# File containing old-style genome builds -#builds_file_path = tool-data/shared/ucsc/builds.txt - -# Directory where chrom len files are kept, currently mainly used by trackster -#len_file_path = tool-data/shared/ucsc/chrom - -# Datatypes config file, defines what data (file) types are available in -# Galaxy. -#datatypes_config_file = datatypes_conf.xml - -# Disable the 'Auto-detect' option for file uploads -#datatypes_disable_auto = False - -# Visualizations config directory: where to look for individual visualization plugins. -# The path is relative to the Galaxy root dir. To use an absolute path begin the path -# with '/'. Defaults to "config/plugins/visualizations". -#visualization_plugins_directory = config/plugins/visualizations - -# Each job is given a unique empty directory as its current working directory. -# This option defines in what parent directory those directories will be -# created. -#job_working_directory = database/job_working_directory - -# If using a cluster, Galaxy will write job scripts and stdout/stderr to this -# directory. -#cluster_files_directory = database/pbs - -# Mako templates are compiled as needed and cached for reuse, this directory is -# used for the cache -#template_cache_path = database/compiled_templates - -# Citation related caching. Tool citations information maybe fetched from -# external sources such as http://dx.doi.org/ by Galaxy - the following parameters -# can be used to control the caching used to store this information. -#citation_cache_type = file -#citation_cache_data_dir = database/citations/data -#citation_cache_lock_dir = database/citations/lock - -# External service types config file, defines what types of external_services configurations -# are available in Galaxy. -#external_service_type_config_file = external_service_types_conf.xml - -# Path to the directory containing the external_service_types defined in the config. -#external_service_type_path = external_service_types - -# Tools with a number of outputs not known until runtime can write these -# outputs to a directory for collection by Galaxy when the job is done. -# Previously, this directory was new_file_path, but using one global directory -# can cause performance problems, so using job_working_directory ('.' or cwd -# when a job is run) is encouraged. By default, both are checked to avoid -# breaking existing tools. -#collect_outputs_from = new_file_path,job_working_directory - -# -- Data Storage (Object Store) -# -# Configuration file for the object store -# If this is set and exists, it overrides any other objectstore settings. -# object_store_config_file = object_store_conf.xml - -# Object store backend module (valid options are: disk, s3, swift, irods, -# distributed, hierarchical) -#object_store = disk - -# *Extremely* old Galaxy instances created datasets at the root of the -# `file_path` defined above. If your Galaxy instance has datasets at the root -# (instead of in directories composed by hashing the dataset id), you should -# enable this option to allow Galaxy to find them. -#object_store_check_old_style = False - -# Credentials used by certain (s3, swift) object store backends -#os_access_key = <your cloud object store access key> -#os_secret_key = <your cloud object store secret key> -#os_bucket_name = <name of an existing object store bucket or container> - -# If using 'swift' object store, you must specify the following connection -# properties -#os_host = swift.rc.nectar.org.au -#os_port = 8888 -#os_is_secure = False -#os_conn_path = / - -# Reduced redundancy can be used only with the 's3' object store -#os_use_reduced_redundancy = False - -# Path to cache directory for object store backends that utilize a cache (s3, -# swift, irods) -#object_store_cache_path = database/files/ - -# Size (in GB) that the cache used by object store should be limited to. -# If the value is not specified, the cache size will be limited only by the -# file system size. -#object_store_cache_size = 100 - -# Configuration file for the distributed object store, if object_store = -# distributed. See the sample at distributed_object_store_conf.xml.sample -#distributed_object_store_config_file = None - - -# -- Mail and notification - -# Galaxy sends mail for various things: Subscribing users to the mailing list -# if they request it, emailing password resets, notification from the Galaxy -# Sample Tracking system, reporting dataset errors, and sending activation emails. -# To do this, it needs to send mail through an SMTP server, which you may define here (host:port). -# Galaxy will automatically try STARTTLS but will continue upon failure. -#smtp_server = None - -# If your SMTP server requires a username and password, you can provide them -# here (password in cleartext here, but if your server supports STARTTLS it -# will be sent over the network encrypted). -#smtp_username = None -#smtp_password = None - -# If your SMTP server requires SSL from the beginning of the connection -# smtp_ssl = False - -# On the user registration form, users may choose to join the mailing list. -# This is the address of the list they'll be subscribed to. -#mailing_join_addr = galaxy-announce-join@bx.psu.edu - -# Datasets in an error state include a link to report the error. Those reports -# will be sent to this address. Error reports are disabled if no address is set. -# Also this email is shown as a contact to user in case of Galaxy misconfiguration and other events user may encounter. -#error_email_to = None - -# Activation email is used as a sender ('from' field) for the account activation mail. -# We recommend using string in the following format: Galaxy Project <galaxy-no-reply@example.com> -#activation_email = None - -# URL of the support resource for the galaxy instance. Used in activation emails. -#instance_resource_url = http://wiki.galaxyproject.org/ - -# E-mail domains blacklist is used for filtering out users that are using disposable email address -# during the registration. If their address domain matches any domain in the BL they are refused the registration. -#blacklist_file = config/disposable_email_blacklist.conf - -# Registration warning message is used to discourage people from registering multiple accounts. Applies mostly for the main Galaxy instance. -# If no message specified the warning box will not be shown. -#registration_warning_message = Please register only one account - we provide this service free of charge and have limited computational resources. Multi-accounts are tracked and will be subjected to account termination and data deletion. - -# When users opt to reset passwords, new ones are created, this option -# specifies the length of these passwords. -#reset_password_length = 15 - - -# -- Account activation - -# This is user account activation feature global flag. If set to "False" the rest of the Account -# activation configuration is ignored and user activation is disabled (a.k.a. accounts are active since registration). -# Note the activation is also not working in case the smtp server is not defined. -#user_activation_on = False - -# Activation grace period. Activation is not forced (login is not disabled) until -# grace period has passed. Users under grace period can't run jobs (see inactivity_box_content). -# In hours. Default is 3. Enter 0 to disable grace period. -# Users with OpenID logins have grace period forever. -#activation_grace_period = 0 - -# Used for warning box for inactive accounts (unable to run jobs). -# In use only if activation_grace_period is set. -#inactivity_box_content = Your account has not been activated yet. Feel free to browse around and see what's available, but you won't be able to upload data or run jobs until you have verified your email address. - - -# -- Display sites - -# Galaxy can display data at various external browsers. These options specify -# which browsers should be available. URLs and builds available at these -# browsers are defined in the specifield files. - -# If use_remote_user = True, display application servers will be denied access -# to Galaxy and so displaying datasets in these sites will fail. -# display_servers contains a list of hostnames which should be allowed to -# bypass security to display datasets. Please be aware that there are security -# implications if this is allowed. More details (including required changes to -# the proxy server config) are available in the Apache proxy documentation on -# the wiki. -# -# The list of servers in this sample config are for the UCSC Main, Test and -# Archaea browsers, but the default if left commented is to not allow any -# display sites to bypass security (you must uncomment the line below to allow -# them). -#display_servers = hgw1.cse.ucsc.edu,hgw2.cse.ucsc.edu,hgw3.cse.ucsc.edu,hgw4.cse.ucsc.edu,hgw5.cse.ucsc.edu,hgw6.cse.ucsc.edu,hgw7.cse.ucsc.edu,hgw8.cse.ucsc.edu,lowepub.cse.ucsc.edu - -# To disable the old-style display applications that are hardcoded into datatype classes, -# set enable_old_display_applications = False. -# This may be desirable due to using the new-style, XML-defined, display applications that -# have been defined for many of the datatypes that have the old-style. -# There is also a potential security concern with the old-style applications, -# where a malicious party could provide a link that appears to reference the Galaxy server, -# but contains a redirect to a third-party server, tricking a Galaxy user to access said -# site. -#enable_old_display_applications = True - -# -- Next gen LIMS interface on top of existing Galaxy Sample/Request management code. - -use_nglims = False -nglims_config_file = tool-data/nglims.yaml - -# -- UI Localization - -# Show a message box under the masthead. -#message_box_visible = False -#message_box_content = None -#message_box_class = info - -# Append "/{brand}" to the "Galaxy" text in the masthead. -#brand = None - -# Format string used when showing date and time information. -# The string may contain: -# - the directives used by Python time.strftime() function (see http://docs.python.org/2/library/time.html#time.strftime ), -# - $locale (complete format string for the server locale), -# - $iso8601 (complete format string as specified by ISO 8601 international standard). -# pretty_datetime_format = $locale (UTC) - -# The URL of the page to display in Galaxy's middle pane when loaded. This can be -# an absolute or relative URL. -#welcome_url = /static/welcome.html - -# The URL linked by the "Galaxy/brand" text. -#logo_url = / - -# The URL linked by the "Wiki" link in the "Help" menu. -#wiki_url = http://wiki.galaxyproject.org/ - -# The URL linked by the "Support" link in the "Help" menu. -#support_url = http://wiki.galaxyproject.org/Support - -# The URL linked by the "How to Cite Galaxy" link in the "Help" menu. -#citation_url = http://wiki.galaxyproject.org/CitingGalaxy - -#The URL linked by the "Search" link in the "Help" menu. -#search_url = http://galaxyproject.org/search/usegalaxy/ - -#The URL linked by the "Mailing Lists" link in the "Help" menu. -#mailing_lists_url = http://wiki.galaxyproject.org/MailingLists - -#The URL linked by the "Videos" link in the "Help" menu. -#videos_url = http://vimeo.com/galaxyproject - -# The URL linked by the "Terms and Conditions" link in the "Help" menu, as well -# as on the user registration and login forms and in the activation emails. -#terms_url = None - -# The URL linked by the "Galaxy Q&A" link in the "Help" menu -# The Galaxy Q&A site is under development; when the site is done, this URL -# will be set and uncommented. -#qa_url = - -# Serve static content, which must be enabled if you're not serving it via a -# proxy server. These options should be self explanatory and so are not -# documented individually. You can use these paths (or ones in the proxy -# server) to point to your own styles. -static_enabled = True -static_cache_time = 360 -static_dir = %(here)s/static/ -static_images_dir = %(here)s/static/images -static_favicon_dir = %(here)s/static/favicon.ico -static_scripts_dir = %(here)s/static/scripts/ -static_style_dir = %(here)s/static/june_2007_style/blue -static_robots_txt = %(here)s/static/robots.txt - -# Pack javascript at launch (/static/scripts/*.js) -# This only happens if the modified timestamp of the source .js is newer -# than the version (if it exists) in /static/scripts/packed/ -# Note that this requires java > 1.4 for executing yuicompressor.jar -#pack_scripts = False - -# Cloud Launch - -#enable_cloud_launch = False -#cloudlaunch_default_ami = ami-a7dbf6ce - -# Incremental Display Options - -#display_chunk_size = 65536 - -# -- Advanced proxy features - -# For help on configuring the Advanced proxy features, see: -# http://usegalaxy.org/production - -# Apache can handle file downloads (Galaxy-to-user) via mod_xsendfile. Set -# this to True to inform Galaxy that mod_xsendfile is enabled upstream. -#apache_xsendfile = False - -# The same download handling can be done by nginx using X-Accel-Redirect. This -# should be set to the path defined in the nginx config as an internal redirect -# with access to Galaxy's data files (see documentation linked above). -#nginx_x_accel_redirect_base = False - -# nginx can make use of mod_zip to create zip files containing multiple library -# files. If using X-Accel-Redirect, this can be the same value as that option. -#nginx_x_archive_files_base = False - -# If using compression in the upstream proxy server, use this option to disable -# gzipping of library .tar.gz and .zip archives, since the proxy server will do -# it faster on the fly. -#upstream_gzip = False - -# nginx can also handle file uploads (user-to-Galaxy) via nginx_upload_module. -# Configuration for this is complex and explained in detail in the -# documentation linked above. The upload store is a temporary directory in -# which files uploaded by the upload module will be placed. -#nginx_upload_store = False - -# This value overrides the action set on the file upload form, e.g. the web -# path where the nginx_upload_module has been configured to intercept upload -# requests. -#nginx_upload_path = False - -# -- Logging and Debugging - -# Verbosity of console log messages. Acceptable values can be found here: -# http://docs.python.org/library/logging.html#logging-levels -#log_level = DEBUG - -# Print database operations to the server log (warning, quite verbose!). -#database_engine_option_echo = False - -# Print database pool operations to the server log (warning, quite verbose!). -#database_engine_option_echo_pool = False - -# Turn on logging of application events and some user events to the database. -#log_events = True - -# Turn on logging of user actions to the database. Actions currently logged are -# grid views, tool searches, and use of "recently" used tools menu. The -# log_events and log_actions functionality will eventually be merged. -#log_actions = True - -# Sanitize All HTML Tool Output -# By default, all tool output served as 'text/html' will be sanitized -# thoroughly. This can be disabled if you have special tools that require -# unaltered output. -#sanitize_all_html = True - -# By default Galaxy will serve non-HTML tool output that may potentially -# contain browser executable JavaScript content as plain text. This will for -# instance cause SVG datasets to not render properly and so may be disabled -# by setting the following option to True. -#serve_xss_vulnerable_mimetypes = False - -# Debug enables access to various config options useful for development and -# debugging: use_lint, use_profile, use_printdebug and use_interactive. It -# also causes the files used by PBS/SGE (submission script, output, and error) -# to remain on disk after the job is complete. Debug mode is disabled if -# commented, but is uncommented by default in the sample config. -debug = True - -# Check for WSGI compliance. -#use_lint = False - -# Run the Python profiler on each request. -#use_profile = False - -# Intercept print statements and show them on the returned page. -#use_printdebug = True - -# Enable live debugging in your browser. This should NEVER be enabled on a -# public site. Enabled in the sample config for development. -use_interactive = True - -# Write thread status periodically to 'heartbeat.log', (careful, uses disk -# space rapidly!). Useful to determine why your processes may be consuming a -# lot of CPU. -#use_heartbeat = False - -# Enable the memory debugging interface (careful, negatively impacts server -# performance). -#use_memdump = False - -# -- Data Libraries - -# These library upload options are described in much more detail in the wiki: -# https://wiki.galaxyproject.org/Admin/DataLibraries/UploadingLibraryFiles - -# Add an option to the library upload form which allows administrators to -# upload a directory of files. -#library_import_dir = None - -# Add an option to the library upload form which allows authorized -# non-administrators to upload a directory of files. The configured directory -# must contain sub-directories named the same as the non-admin user's Galaxy -# login ( email ). The non-admin user is restricted to uploading files or -# sub-directories of files contained in their directory. -#user_library_import_dir = None - -# Add an option to the admin library upload tool allowing admins to paste -# filesystem paths to files and directories in a box, and these paths will be -# added to a library. Set to True to enable. Please note the security -# implication that this will give Galaxy Admins access to anything your Galaxy -# user has access to. -#allow_library_path_paste = False - -# Users may choose to download multiple files from a library in an archive. By -# default, Galaxy allows users to select from a few different archive formats -# if testing shows that Galaxy is able to create files using these formats. -# Specific formats can be disabled with this option, separate more than one -# format with commas. Available formats are currently 'zip', 'gz', and 'bz2'. -#disable_library_comptypes = - -# Some sequencer integration features in beta allow you to automatically -# transfer datasets. This is done using a lightweight transfer manager which -# runs outside of Galaxy (but is spawned by it automatically). Galaxy will -# communicate with this manager over the port specified here. -#transfer_manager_port = 8163 - -# Search data libraries with whoosh -#enable_whoosh_library_search = True -# Whoosh indexes are stored in this directory. -#whoosh_index_dir = database/whoosh_indexes - -# Search data libraries with lucene -#enable_lucene_library_search = False -# maxiumum file size to index for searching, in MB -#fulltext_max_size = 500 -#fulltext_noindex_filetypes = bam,sam,wig,bigwig,fasta,fastq,fastqsolexa,fastqillumina,fastqsanger -# base URL of server providing search functionality using lucene -#fulltext_url = http://localhost:8081 - -# -- Users and Security - -# Galaxy encodes various internal values when these values will be output in -# some format (for example, in a URL or cookie). You should set a key to be -# used by the algorithm that encodes and decodes these values. It can be any -# string. If left unchanged, anyone could construct a cookie that would grant -# them access to others' sessions. -#id_secret = USING THE DEFAULT IS NOT SECURE! - -# User authentication can be delegated to an upstream proxy server (usually -# Apache). The upstream proxy should set a REMOTE_USER header in the request. -# Enabling remote user disables regular logins. For more information, see: -# https://wiki.galaxyproject.org/Admin/Config/ApacheProxy -#use_remote_user = False - -# If use_remote_user is enabled and your external authentication -# method just returns bare usernames, set a default mail domain to be appended -# to usernames, to become your Galaxy usernames (email addresses). -#remote_user_maildomain = None - -# If use_remote_user is enabled, the header that the upstream proxy provides -# the remote username in defaults to HTTP_REMOTE_USER (the 'HTTP_' is prepended -# by WSGI). This option allows you to change the header. Note, you still need -# to prepend 'HTTP_' to the header in this option, but your proxy server should -# *not* include 'HTTP_' at the beginning of the header name. -#remote_user_header = HTTP_REMOTE_USER - -# If use_remote_user is enabled, you can set this to a URL that will log your -# users out. -#remote_user_logout_href = None - -# If your proxy and/or authentication source does not normalize e-mail -# addresses or user names being passed to Galaxy - set the following option -# to True to force these to lower case. -#normalize_remote_user_email = False - -# Administrative users - set this to a comma-separated list of valid Galaxy -# users (email addresses). These users will have access to the Admin section -# of the server, and will have access to create users, groups, roles, -# libraries, and more. For more information, see: -# https://wiki.galaxyproject.org/Admin/Interface -#admin_users = None - -# Force everyone to log in (disable anonymous access). -#require_login = False - -# Allow unregistered users to create new accounts (otherwise, they will have to -# be created by an admin). -#allow_user_creation = True - -# Allow administrators to delete accounts. -#allow_user_deletion = False - -# Allow administrators to log in as other users (useful for debugging) -#allow_user_impersonation = False - -# Allow users to remove their datasets from disk immediately (otherwise, -# datasets will be removed after a time period specified by an administrator in -# the cleanup scripts run via cron) -#allow_user_dataset_purge = False - -# By default, users' data will be public, but setting this to True will cause -# it to be private. Does not affect existing users and data, only ones created -# after this option is set. Users may still change their default back to -# public. -#new_user_dataset_access_role_default_private = False - -# -- Beta features - -# Enable Galaxy to communicate directly with a sequencer -#enable_sequencer_communication = False - -# Enable authentication via OpenID. Allows users to log in to their Galaxy -# account by authenticating with an OpenID provider. -#enable_openid = False -#openid_config_file = openid_conf.xml -#openid_consumer_cache_path = database/openid_consumer_cache - -# Optional list of email addresses of API users who can make calls on behalf of -# other users -#api_allow_run_as = None - -# Master key that allows many API admin actions to be used without actually -# having a defined admin user in the database/config. Only set this if you need -# to bootstrap Galaxy, you probably do not want to set this on public servers. -#master_api_key = changethis - -# Enable tool tags (associating tools with tags). This has its own option -# since its implementation has a few performance implications on startup for -# large servers. -#enable_tool_tags = False - -# Enable a feature when running workflows. When enabled, default datasets -# are selected for "Set at Runtime" inputs from the history such that the -# same input will not be selected twice, unless there are more inputs than -# compatible datasets in the history. -# When False, the most recently added compatible item in the history will -# be used for each "Set at Runtime" input, independent of others in the Workflow -#enable_unique_workflow_defaults = False - -# The URL to the myExperiment instance being used (omit scheme but include port) -#myexperiment_url = www.myexperiment.org:80 - -# Enable Galaxy's "Upload via FTP" interface. You'll need to install and -# configure an FTP server (we've used ProFTPd since it can use Galaxy's -# database for authentication) and set the following two options. - -# This should point to a directory containing subdirectories matching users' -# email addresses, where Galaxy will look for files. -#ftp_upload_dir = None - -# This should be the hostname of your FTP server, which will be provided to -# users in the help text. -#ftp_upload_site = None - -# Enable enforcement of quotas. Quotas can be set from the Admin interface. -#enable_quotas = False - -# This option allows users to see the full path of datasets via the "View -# Details" option in the history. Administrators can always see this. -#expose_dataset_path = False - -# Data manager configuration options -# Allow non-admin users to view available Data Manager options -#enable_data_manager_user_view = False -# File where Data Managers are configured -#data_manager_config_file = data_manager_conf.xml -# File where Tool Shed based Data Managers are configured -#shed_data_manager_config_file = shed_data_manager_conf.xml -# Directory to store Data Manager based tool-data; defaults to tool_data_path -#galaxy_data_manager_data_path = tool-data - -# -- Job Execution - -# To increase performance of job execution and the web interface, you can -# separate Galaxy into multiple processes. There are more than one way to do -# this, and they are explained in detail in the documentation: -# -# https://wiki.galaxyproject.org/Admin/Config/Performance/Scaling - -# By default, Galaxy manages and executes jobs from within a single process and -# notifies itself of new jobs via in-memory queues. Jobs are run locally on -# the system on which Galaxy is started. Advanced job running capabilities can -# be configured through the job configuration file. -#job_config_file = job_conf.xml - -# In multiprocess configurations, notification between processes about new jobs -# is done via the database. In single process configurations, this is done in -# memory, which is a bit quicker. Galaxy tries to automatically determine -# which method it should used based on your handler configuration in the job -# config file, but you can explicitly override this behavior by setting the -# following option to True or False. -#track_jobs_in_database = None - -# This enables splitting of jobs into tasks, if specified by the particular tool config. -# This is a new feature and not recommended for production servers yet. -#use_tasked_jobs = False -#local_task_queue_workers = 2 - -# Enable job recovery (if Galaxy is restarted while cluster jobs are running, -# it can "recover" them when it starts). This is not safe to use if you are -# running more than one Galaxy server using the same database. -#enable_job_recovery = True - -# Although it is fairly reliable, setting metadata can occasionally fail. In -# these instances, you can choose to retry setting it internally or leave it in -# a failed state (since retrying internally may cause the Galaxy process to be -# unresponsive). If this option is set to False, the user will be given the -# option to retry externally, or set metadata manually (when possible). -#retry_metadata_internally = True - -# If (for example) you run on a cluster and your datasets (by default, -# database/files/) are mounted read-only, this option will override tool output -# paths to write outputs to the working directory instead, and the job manager -# will move the outputs to their proper place in the dataset directory on the -# Galaxy server after the job completes. -#outputs_to_working_directory = False - -# If your network filesystem's caching prevents the Galaxy server from seeing -# the job's stdout and stderr files when it completes, you can retry reading -# these files. The job runner will retry the number of times specified below, -# waiting 1 second between tries. For NFS, you may want to try the -noac mount -# option (Linux) or -actimeo=0 (Solaris). -#retry_job_output_collection = 0 - -# Clean up various bits of jobs left on the filesystem after completion. These -# bits include the job working directory, external metadata temporary files, -# and DRM stdout and stderr files (if using a DRM). Possible values are: -# always, onsuccess, never -#cleanup_job = always - -# File to source to set up the environment when running jobs. By default, the -# environment in which the Galaxy server starts is used when running jobs -# locally, and the environment set up per the DRM's submission method and -# policy is used when running jobs on a cluster (try testing with `qsub` on the -# command line). environment_setup_file can be set to the path of a file on -# the cluster that should be sourced by the user to set up the environment -# prior to running tools. This can be especially useful for running jobs as -# the actual user, to remove the need to configure each user's environment -# individually. -#environment_setup_file = None - - -# Optional file containing job resource data entry fields definition. -# These fields will be presented to users in the tool forms and allow them to -# overwrite default job resources such as number of processors, memory, and walltime. -#job_resource_params_file = job_resource_params_conf.xml - -# If using job concurrency limits (configured in job_config_file), several -# extra database queries must be performed to determine the number of jobs a -# user has dispatched to a given destination. By default, these queries will -# happen for every job that is waiting to run, but if cache_user_job_count is -# set to True, it will only happen once per iteration of the handler queue. -# Although better for performance due to reduced queries, the tradeoff is a -# greater possibility that jobs will be dispatched past the configured limits -# if running many handlers. -#cache_user_job_count = False - -# ToolBox filtering -# Modules from lib/galaxy/tools/filters/ can be specified in the following lines. -# tool_* filters will be applied for all users and can not be changed by them. -# user_tool_* filters will be shown under user preferences and can be toogled -# on and off by runtime -#tool_filters -#tool_label_filters -#tool_section_filters -#user_tool_filters = examples:restrict_upload_to_admins, examples:restrict_encode -#user_tool_section_filters = examples:restrict_text -#user_tool_label_filters = examples:restrict_upload_to_admins, examples:restrict_encode - -# Galaxy Application Internal Message Queue - -# Galaxy uses AMQP internally TODO more documentation on what for. -# For examples, see http://ask.github.io/kombu/userguide/connections.html -# -# Without specifying anything here, galaxy will first attempt to use your -# specified database_connection above. If that's not specified either, Galaxy -# will automatically create and use a separate sqlite database located in your -# <galaxy>/database folder (indicated in the commented out line below). - -#amqp_internal_connection = sqlalchemy+sqlite:///./database/control.sqlite?isolation_level=IMMEDIATE - - - -# ---- Galaxy External Message Queue ------------------------------------------------- - -# Galaxy uses AMQ protocol to receive messages from external sources like -# bar code scanners. Galaxy has been tested against RabbitMQ AMQP implementation. -# For Galaxy to receive messages from a message queue the RabbitMQ server has -# to be set up with a user account and other parameters listed below. The 'host' -# and 'port' fields should point to where the RabbitMQ server is running. - -[galaxy_amqp] - -#host = 127.0.0.1 -#port = 5672 -#userid = galaxy -#password = galaxy -#virtual_host = galaxy_messaging_engine -#queue = galaxy_queue -#exchange = galaxy_exchange -#routing_key = bar_code_scanner -#rabbitmqctl_path = /path/to/rabbitmqctl - https://bitbucket.org/galaxy/galaxy-central/commits/bbf9c5f5fdaf/ Changeset: bbf9c5f5fdaf User: natefoo Date: 2014-09-15 22:43:22 Summary: Handle backwards compatibility for old config file locations. Affected #: 2 files diff -r 21fb6c2f4349c358321b9344b9dbda20264b78fb -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f lib/galaxy/app.py --- a/lib/galaxy/app.py +++ b/lib/galaxy/app.py @@ -112,7 +112,7 @@ if self.config.enable_openid: from galaxy.web.framework import openid_manager self.openid_manager = openid_manager.OpenIDManager( self.config.openid_consumer_cache_path ) - self.openid_providers = OpenIDProviders.from_file( self.config.openid_config ) + self.openid_providers = OpenIDProviders.from_file( self.config.openid_config_file ) else: self.openid_providers = OpenIDProviders() # Start the heartbeat process if configured and available diff -r 21fb6c2f4349c358321b9344b9dbda20264b78fb -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f lib/galaxy/config.py --- a/lib/galaxy/config.py +++ b/lib/galaxy/config.py @@ -38,6 +38,10 @@ def __init__( self, **kwargs ): self.config_dict = kwargs self.root = kwargs.get( 'root_dir', '.' ) + + # Resolve paths of other config files + self.__parse_config_file_options( kwargs ) + # Collect the umask and primary gid from the environment self.umask = os.umask( 077 ) # get the current umask os.umask( self.umask ) # can't get w/o set, so set it back @@ -66,9 +70,7 @@ self.cookie_path = kwargs.get( "cookie_path", "/" ) # Galaxy OpenID settings self.enable_openid = string_as_bool( kwargs.get( 'enable_openid', False ) ) - self.openid_config = kwargs.get( 'openid_config_file', 'openid_conf.xml' ) self.enable_quotas = string_as_bool( kwargs.get( 'enable_quotas', False ) ) - self.tool_sheds_config = kwargs.get( 'tool_sheds_config_file', 'tool_sheds_conf.xml' ) self.enable_unique_workflow_defaults = string_as_bool( kwargs.get( 'enable_unique_workflow_defaults', False ) ) self.tool_path = resolve_path( kwargs.get( "tool_path", "tools" ), self.root ) self.tool_data_path = resolve_path( kwargs.get( "tool_data_path", "tool-data" ), os.getcwd() ) @@ -77,13 +79,6 @@ self.test_conf = resolve_path( kwargs.get( "test_conf", "" ), self.root ) # The value of migrated_tools_config is the file reserved for containing only those tools that have been eliminated from the distribution # and moved to the tool shed. - self.migrated_tools_config = resolve_path( kwargs.get( 'migrated_tools_config', 'migrated_tools_conf.xml' ), self.root ) - if 'tool_config_file' in kwargs: - tcf = kwargs[ 'tool_config_file' ] - elif 'tool_config_files' in kwargs: - tcf = kwargs[ 'tool_config_files' ] - else: - tcf = 'tool_conf.xml,shed_tool_conf.xml' self.integrated_tool_panel_config = resolve_path( kwargs.get( 'integrated_tool_panel_config', 'integrated_tool_panel.xml' ), self.root ) self.tool_filters = listify( kwargs.get( "tool_filters", [] ), do_strip=True ) self.tool_label_filters = listify( kwargs.get( "tool_label_filters", [] ), do_strip=True ) @@ -93,7 +88,6 @@ self.user_label_filters = listify( kwargs.get( "user_tool_label_filters", [] ), do_strip=True ) self.user_section_filters = listify( kwargs.get( "user_tool_section_filters", [] ), do_strip=True ) - self.tool_configs = [ resolve_path( p, self.root ) for p in listify( tcf ) ] # Check for tools defined in the above non-shed tool configs (i.e., tool_conf.xml) tht have # been migrated from the Galaxy code distribution to the Tool Shed. self.check_migrate_tools = string_as_bool( kwargs.get( 'check_migrate_tools', True ) ) @@ -102,8 +96,6 @@ self.shed_tool_data_path = resolve_path( self.shed_tool_data_path, self.root ) else: self.shed_tool_data_path = self.tool_data_path - self.tool_data_table_config_path = [ resolve_path( x, self.root ) for x in kwargs.get( 'tool_data_table_config_path', 'tool_data_table_conf.xml' ).split( ',' ) ] - self.shed_tool_data_table_config = resolve_path( kwargs.get( 'shed_tool_data_table_config', 'shed_tool_data_table_conf.xml' ), self.root ) self.manage_dependency_relationships = string_as_bool( kwargs.get( 'manage_dependency_relationships', False ) ) self.running_functional_tests = string_as_bool( kwargs.get( 'running_functional_tests', False ) ) self.hours_between_check = kwargs.get( 'hours_between_check', 12 ) @@ -127,8 +119,6 @@ self.hours_between_check = 12 self.update_integrated_tool_panel = kwargs.get( "update_integrated_tool_panel", True ) self.enable_data_manager_user_view = string_as_bool( kwargs.get( "enable_data_manager_user_view", "False" ) ) - self.data_manager_config_file = resolve_path( kwargs.get('data_manager_config_file', 'data_manager_conf.xml' ), self.root ) - self.shed_data_manager_config_file = resolve_path( kwargs.get('shed_data_manager_config_file', 'shed_data_manager_conf.xml' ), self.root ) self.galaxy_data_manager_data_path = kwargs.get( 'galaxy_data_manager_data_path', self.tool_data_path ) self.tool_secret = kwargs.get( "tool_secret", "" ) self.id_secret = kwargs.get( "id_secret", "USING THE DEFAULT IS NOT SECURE!" ) @@ -148,9 +138,6 @@ self.template_path = resolve_path( kwargs.get( "template_path", "templates" ), self.root ) self.template_cache = resolve_path( kwargs.get( "template_cache_path", "database/compiled_templates" ), self.root ) self.dependency_resolvers_config_file = resolve_path( kwargs.get( 'dependency_resolvers_config_file', 'dependency_resolvers_conf.xml' ), self.root ) - self.job_metrics_config_file = resolve_path( kwargs.get( 'job_metrics_config_file', 'job_metrics_conf.xml' ), self.root ) - self.job_config_file = resolve_path( kwargs.get( 'job_config_file', 'job_conf.xml' ), self.root ) - self.job_resource_params_file = resolve_path( kwargs.get( 'job_resource_params_file', 'job_resource_params_conf.xml' ), self.root ) self.local_job_queue_workers = int( kwargs.get( "local_job_queue_workers", "5" ) ) self.cluster_job_queue_workers = int( kwargs.get( "cluster_job_queue_workers", "3" ) ) self.job_queue_cleanup_interval = int( kwargs.get("job_queue_cleanup_interval", "5") ) @@ -195,7 +182,6 @@ self.start_job_runners = listify(kwargs.get( 'start_job_runners', '' )) self.expose_dataset_path = string_as_bool( kwargs.get( 'expose_dataset_path', 'False' ) ) # External Service types used in sample tracking - self.external_service_type_config_file = resolve_path( kwargs.get( 'external_service_type_config_file', 'external_service_types_conf.xml' ), self.root ) self.external_service_type_path = resolve_path( kwargs.get( 'external_service_type_path', 'external_service_types' ), self.root ) # Tasked job runner. self.use_tasked_jobs = string_as_bool( kwargs.get( 'use_tasked_jobs', False ) ) @@ -282,9 +268,6 @@ self.os_is_secure = string_as_bool( kwargs.get( 'os_is_secure', True ) ) self.os_conn_path = kwargs.get( 'os_conn_path', '/' ) self.object_store_cache_size = float(kwargs.get( 'object_store_cache_size', -1 )) - self.object_store_config_file = kwargs.get( 'object_store_config_file', None ) - if self.object_store_config_file is not None: - self.object_store_config_file = resolve_path( self.object_store_config_file, self.root ) self.distributed_object_store_config_file = kwargs.get( 'distributed_object_store_config_file', None ) if self.distributed_object_store_config_file is not None: self.distributed_object_store_config_file = resolve_path( self.distributed_object_store_config_file, self.root ) @@ -326,7 +309,6 @@ # Store per-tool runner configs self.tool_handlers = self.__read_tool_job_config( global_conf_parser, 'galaxy:tool_handlers', 'name' ) self.tool_runners = self.__read_tool_job_config( global_conf_parser, 'galaxy:tool_runners', 'url' ) - self.datatypes_config = kwargs.get( 'datatypes_config_file', 'datatypes_conf.xml' ) # Cloud configuration options self.enable_cloud_launch = string_as_bool( kwargs.get( 'enable_cloud_launch', False ) ) self.cloudlaunch_default_ami = kwargs.get( 'cloudlaunch_default_ami', 'ami-a7dbf6ce' ) @@ -392,6 +374,64 @@ else: return None + def __parse_config_file_options( self, kwargs ): + """ + Backwards compatibility for config files moved to the config/ dir. + """ + defaults = dict( + data_manager_config_file = [ 'data_manager_conf.xml', 'config/data_manager_conf.xml', 'config/data_manager_conf.xml.sample' ], + datatypes_config_file = [ 'datatypes_conf.xml', 'config/datatypes_conf.xml', 'config/datatypes_conf.xml.sample' ], + external_service_type_config_file = [ 'external_service_types_conf.xml', 'config/external_service_types_conf.xml.sample', 'config/external_service_types_conf.xml.sample' ], + job_config_file = [ 'job_conf.xml', 'config/job_conf.xml' ], + job_metrics_config_file = [ 'job_metrics_conf.xml', 'config/job_metrics_conf.xml' ], + job_resource_params_file = [ 'job_resource_params_conf.xml', 'config/job_resource_params_conf.xml' ], + migrated_tools_config = [ 'migrated_tools_conf.xml', 'config/migrated_tools_conf.xml' ], + object_store_config_file = [ 'object_store_conf.xml', 'config/object_store_conf.xml' ], + openid_config_file = [ 'openid_conf.xml', 'config/openid_conf.xml' 'config/openid_conf.xml.sample' ], + shed_data_manager_config_file = [ 'shed_data_manager_conf.xml', 'config/shed_data_manager_conf.xml' ], + shed_tool_data_table_config = [ 'shed_tool_data_table_conf.xml', 'config/shed_tool_data_table_conf.xml' ], + tool_sheds_config_file = [ 'tool_sheds_conf.xml', 'config/tool_sheds_conf.xml', 'config/tool_sheds_conf.xml.sample' ], + ) + + listify_defaults = dict( + tool_data_table_config_path = [ 'tool_data_table_conf.xml', 'config/tool_data_table_conf.xml', 'config/tool_data_table_conf.xml.sample' ], + tool_config_file = [ 'tool_conf.xml,shed_tool_conf.xml', 'config/tool_conf.xml,config/shed_tool_conf.xml', 'config/tool_conf.xml.sample,config/shed_tool_conf.xml' ] + ) + + for var, defaults in defaults.items(): + if kwargs.get( var, None ) is not None: + path = var + else: + for default in defaults: + if os.path.exists( resolve_path( default, self.root ) ): + path = default + break + else: + path = defaults[-1] + print( "Using config '%s' at: %s" % ( var, path ) ) + setattr( self, var, resolve_path( path, self.root ) ) + + for var, defaults in listify_defaults.items(): + paths = [] + if kwargs.get( var, None ) is not None: + paths = listify( var ) + else: + for default in defaults: + for path in listify( default ): + if not os.path.exists( resolve_path( path, self.root ) ): + break + else: + paths = listify( default ) + break + else: + paths = listify( defaults[-1] ) + print( "Using config '%s' at: %s" % ( var, ', '.join( paths ) ) ) + setattr( self, var, [ resolve_path( x, self.root ) for x in paths ] ) + + # Backwards compatibility for names used in too many places to fix + self.datatypes_config = self.datatypes_config_file + self.tool_configs = self.tool_config_file + def __read_tool_job_config( self, global_conf_parser, section, key ): try: tool_runners_config = global_conf_parser.items( section ) @@ -633,8 +673,8 @@ import tool_shed.tool_shed_registry # Set up the tool sheds registry - if os.path.isfile( self.config.tool_sheds_config ): - self.tool_shed_registry = tool_shed.tool_shed_registry.Registry( self.config.root, self.config.tool_sheds_config ) + if os.path.isfile( self.config.tool_sheds_config_file ): + self.tool_shed_registry = tool_shed.tool_shed_registry.Registry( self.config.root, self.config.tool_sheds_config_file ) else: self.tool_shed_registry = None https://bitbucket.org/galaxy/galaxy-central/commits/70d2dff1297d/ Changeset: 70d2dff1297d User: natefoo Date: 2014-09-15 22:43:51 Summary: Rename universe_wsgi.ini -> galaxy.ini everywhere. Affected #: 75 files diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb README.txt --- a/README.txt +++ b/README.txt @@ -22,13 +22,13 @@ http://localhost:8080 You may wish to make changes from the default configuration. This can be done -in the universe_wsgi.ini file. Tools can be either installed from the Tool Shed +in the config/galaxy.ini file. Tools can be either installed from the Tool Shed or added manually. For details please see the Galaxy wiki: -https://wiki.galaxyproject.org/Admin/Tools/AddToolFromToolShedTutorial. +https://wiki.galaxyproject.org/Admin/Tools/AddToolFromToolShedTutorial Not all dependencies are included for the tools provided in the sample tool_conf.xml. A full list of external dependencies is available at: -https://wiki.galaxyproject.org/Admin/Tools/ToolDependencies \ No newline at end of file +https://wiki.galaxyproject.org/Admin/Tools/ToolDependencies diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb buildbot_setup.sh --- a/buildbot_setup.sh +++ b/buildbot_setup.sh @@ -43,15 +43,10 @@ " SAMPLES=" -tool_conf.xml.sample -datatypes_conf.xml.sample -universe_wsgi.ini.sample -tool_data_table_conf.xml.sample -tool_sheds_conf.xml.sample -shed_tool_data_table_conf.xml.sample -migrated_tools_conf.xml.sample -data_manager_conf.xml.sample -shed_data_manager_conf.xml.sample +config/galaxy.ini.sample +config/shed_tool_data_table_conf.xml.sample +config/migrated_tools_conf.xml.sample +config/shed_data_manager_conf.xml.sample tool-data/shared/igv/igv_build_sites.txt.sample tool-data/shared/rviewer/rviewer_build_sites.txt.sample tool-data/shared/ucsc/builds.txt.sample diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb config/datatypes_conf.xml.sample --- a/config/datatypes_conf.xml.sample +++ b/config/datatypes_conf.xml.sample @@ -318,10 +318,10 @@ applications and the URL to those sites. The `display` attributes on the `ucsc` and `gbrowse` sites replace the - `ucsc_display_sites` and `gbrowse_display_sites` options in - universe_wsgi.ini. Because these are used by "old-style" display - applications, their types cannot change if you want the old-style display - links for these sites to work. + `ucsc_display_sites` and `gbrowse_display_sites` options in galaxy.ini. + Because these are used by "old-style" display applications, their types + cannot change if you want the old-style display links for these sites to + work. --><site type="ucsc" file="tool-data/shared/ucsc/ucsc_build_sites.txt" display="main,test,archaea,ucla"/><site type="gbrowse" file="tool-data/shared/gbrowse/gbrowse_build_sites.txt" display="modencode,sgd_yeast,tair,wormbase,wormbase_ws120,wormbase_ws140,wormbase_ws170,wormbase_ws180,wormbase_ws190,wormbase_ws200,wormbase_ws204,wormbase_ws210,wormbase_ws220,wormbase_ws225"/> diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb config/galaxy.ini.sample --- a/config/galaxy.ini.sample +++ b/config/galaxy.ini.sample @@ -8,7 +8,9 @@ # Throughout this sample configuration file, except where stated otherwise, # uncommented values override the default if left unset, whereas commented -# values are set to the default value. +# values are set to the default value. Relative paths are relative to the root +# Galaxy directory. +# # Examples of many of these options are explained in more detail in the wiki: # # https://wiki.galaxyproject.org/Admin/Config @@ -132,7 +134,9 @@ # Tool config files, defines what tools are available in Galaxy. # Tools can be locally developed or installed from Galaxy tool sheds. -#tool_config_file = tool_conf.xml,shed_tool_conf.xml +# (config/tool_conf.xml.sample will be used if left unset and +# config/tool_conf.xml does not exist). +#tool_config_file = config/tool_conf.xml,shed_tool_conf.xml # Enable / disable checking if any tools defined in the above non-shed tool_config_files # (i.e., tool_conf.xml) have been migrated from the Galaxy code distribution to the Tool @@ -165,8 +169,8 @@ #tool_dependency_dir = None # File containing the Galaxy Tool Sheds that should be made available to -# install from in the admin interface -#tool_sheds_config_file = tool_sheds_conf.xml +# install from in the admin interface (.sample used if default does not exist). +#tool_sheds_config_file = config/tool_sheds_conf.xml # Enable automatic polling of relative tool sheds to see if any updates # are available for installed repositories. Ideally only one Galaxy @@ -180,15 +184,16 @@ # repository, keep an in-memory registry of dependent items for each repository. #manage_dependency_relationships = False -# XML config file that contains data table entries for the ToolDataTableManager. This file is manually -# maintained by the Galaxy administrator. -#tool_data_table_config_path = tool_data_table_conf.xml +# XML config file that contains data table entries for the +# ToolDataTableManager. This file is manually # maintained by the Galaxy +# administrator (.sample used if default does not exist). +#tool_data_table_config_path = config/tool_data_table_conf.xml # XML config file that contains additional data table entries for the ToolDataTableManager. This file # is automatically generated based on the current installed tool shed repositories that contain valid # tool_data_table_conf.xml.sample files. At the time of installation, these entries are automatically # added to the following file, which is parsed and applied to the ToolDataTableManager at server start up. -#shed_tool_data_table_config = shed_tool_data_table_conf.xml +#shed_tool_data_table_config = config/shed_tool_data_table_conf.xml # Directory where data used by tools is located, see the samples in that # directory and the wiki for help: @@ -202,7 +207,7 @@ #len_file_path = tool-data/shared/ucsc/chrom # Datatypes config file, defines what data (file) types are available in -# Galaxy. +# Galaxy (.sample is used if default does not exist). #datatypes_config_file = config/datatypes_conf.xml # Disable the 'Auto-detect' option for file uploads @@ -234,7 +239,7 @@ #citation_cache_lock_dir = database/citations/lock # External service types config file, defines what types of external_services configurations -# are available in Galaxy. +# are available in Galaxy (.sample is used if default does not exist). #external_service_type_config_file = config/external_service_types_conf.xml # Path to the directory containing the external_service_types defined in the config. @@ -252,7 +257,7 @@ # # Configuration file for the object store # If this is set and exists, it overrides any other objectstore settings. -# object_store_config_file = object_store_conf.xml +# object_store_config_file = config/object_store_conf.xml # Object store backend module (valid options are: disk, s3, swift, irods, # distributed, hierarchical) @@ -448,14 +453,14 @@ # proxy server. These options should be self explanatory and so are not # documented individually. You can use these paths (or ones in the proxy # server) to point to your own styles. -static_enabled = True -static_cache_time = 360 -static_dir = %(here)s/static/ -static_images_dir = %(here)s/static/images -static_favicon_dir = %(here)s/static/favicon.ico -static_scripts_dir = %(here)s/static/scripts/ -static_style_dir = %(here)s/static/june_2007_style/blue -static_robots_txt = %(here)s/static/robots.txt +#static_enabled = True +#static_cache_time = 360 +#static_dir = static/ +#static_images_dir = static/images +#static_favicon_dir = static/favicon.ico +#static_scripts_dir = static/scripts/ +#static_style_dir = static/june_2007_style/blue +#static_robots_txt = static/robots.txt # Pack javascript at launch (/static/scripts/*.js) # This only happens if the modified timestamp of the source .js is newer @@ -691,7 +696,8 @@ # Enable authentication via OpenID. Allows users to log in to their Galaxy # account by authenticating with an OpenID provider. #enable_openid = False -#openid_config_file = openid_conf.xml +# .sample used if default does not exist +#openid_config_file = config/openid_conf.xml #openid_consumer_cache_path = database/openid_consumer_cache # Optional list of email addresses of API users who can make calls on behalf of @@ -741,10 +747,11 @@ # Data manager configuration options # Allow non-admin users to view available Data Manager options #enable_data_manager_user_view = False -# File where Data Managers are configured +# File where Data Managers are configured (.sample used if default does not +# exist) #data_manager_config_file = config/data_manager_conf.xml # File where Tool Shed based Data Managers are configured -#shed_data_manager_config_file = shed_data_manager_conf.xml +#shed_data_manager_config_file = config/shed_data_manager_conf.xml # Directory to store Data Manager based tool-data; defaults to tool_data_path #galaxy_data_manager_data_path = tool-data @@ -760,7 +767,7 @@ # notifies itself of new jobs via in-memory queues. Jobs are run locally on # the system on which Galaxy is started. Advanced job running capabilities can # be configured through the job configuration file. -#job_config_file = job_conf.xml +#job_config_file = config/job_conf.xml # In multiprocess configurations, notification between processes about new jobs # is done via the database. In single process configurations, this is done in @@ -822,7 +829,7 @@ # Optional file containing job resource data entry fields definition. # These fields will be presented to users in the tool forms and allow them to # overwrite default job resources such as number of processors, memory, and walltime. -#job_resource_params_file = job_resource_params_conf.xml +#job_resource_params_file = config/job_resource_params_conf.xml # If using job concurrency limits (configured in job_config_file), several # extra database queries must be performed to determine the number of jobs a diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb config/job_conf.xml.sample_advanced --- a/config/job_conf.xml.sample_advanced +++ b/config/job_conf.xml.sample_advanced @@ -86,7 +86,7 @@ </plugins><handlers default="handlers"><!-- Additional job handlers - the id should match the name of a - [server:<id>] in universe_wsgi.ini. + [server:<id>] in galaxy.ini. --><handler id="handler0" tags="handlers"/><handler id="handler1" tags="handlers"/> @@ -148,7 +148,7 @@ This configuration allows any docker instance to write to any Galaxy file - for greater isolation set outputs_to_working_directory in - universe_wsgi.ini. This will cause $defaults to allow writing to much + galaxy.ini. This will cause $defaults to allow writing to much less. It will then expand as follows: $galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$default_file_path:ro diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb config/plugins/visualizations/README.txt --- a/config/plugins/visualizations/README.txt +++ b/config/plugins/visualizations/README.txt @@ -8,9 +8,8 @@ the user when they click the 'visualizations' icon for a dataset in their history panel. -The framework must be enabled in your 'universe_wsgi.ini' file by -uncommenting (and having a valid path for) the -'visualizations_plugin_directory' entry. +The framework must be enabled in your 'galaxy.ini' file by uncommenting (and +having a valid path for) the 'visualizations_plugin_directory' entry. For more information, see http://wiki.galaxyproject.org/VisualizationsRegistry diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb config/tool_conf.xml.main --- a/config/tool_conf.xml.main +++ b/config/tool_conf.xml.main @@ -98,7 +98,6 @@ <section id="plots" name="Graph/Display Data"><tool file="plotting/boxplot.xml" /><tool file="maf/vcf_to_maf_customtrack.xml" /> - <tool file="mutation/visualize.xml" /></section><section id="hgv" name="Phenotype Association"><tool file="evolution/codingSnps.xml" /> diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb config/tool_conf.xml.sample --- a/config/tool_conf.xml.sample +++ b/config/tool_conf.xml.sample @@ -117,7 +117,6 @@ <tool file="plotting/boxplot.xml" /><tool file="visualization/LAJ.xml" /><tool file="maf/vcf_to_maf_customtrack.xml" /> - <tool file="mutation/visualize.xml" /></section><section id="hyphy" name="Evolution"><tool file="evolution/codingSnps.xml" /> diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb contrib/galaxy.debian-init --- a/contrib/galaxy.debian-init +++ b/contrib/galaxy.debian-init @@ -17,7 +17,7 @@ GROUP="nogroup" DIR="/home/galaxy/galaxy_dist/" PYTHON="/usr/bin/python" -OPTS="-ES ./scripts/paster.py serve --log-file /home/galaxy/galaxy.log universe_wsgi.ini" +OPTS="./scripts/paster.py serve --log-file /home/galaxy/galaxy.log config/galaxy.ini" PIDFILE="/var/run/galaxy.pid" case "${1:-''}" in diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb contrib/galaxy_config_merger.py --- /dev/null +++ b/contrib/galaxy_config_merger.py @@ -0,0 +1,87 @@ +#! /usr/bin/env python +''' +galaxy_config_merger.py + +Created by Anne Pajon on 31 Jan 2012 + +Copyright (c) 2012 Cancer Research UK - Cambridge Research Institute. + +This source file is licensed under the Academic Free License version +3.0 available at http://www.opensource.org/licenses/AFL-3.0. + +Permission is hereby granted to reproduce, translate, adapt, alter, +transform, modify, or arrange this source file (the "Original Work"); +to distribute or communicate copies of it under any license of your +choice that does not contradict the terms and conditions; to perform +or display the Original Work publicly. + +THE ORIGINAL WORK IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS +AND WITHOUT WARRANTY, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT +LIMITATION, THE WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY OR +FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY OF +THE ORIGINAL WORK IS WITH YOU. + +Script for merging specific local Galaxy config galaxy.ini.cri with default Galaxy galaxy.ini.sample +''' +import ConfigParser +import sys +import optparse +import logging + +def main(): + # logging configuration + logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) + + # get the options + parser = optparse.OptionParser() + parser.add_option("-s", "--sample", dest="sample", action="store", help="path to Galaxy galaxy.ini.sample file") + parser.add_option("-c", "--config", dest="config", action="store", help="path to your own galaxy.ini file") + parser.add_option("-o", "--output", dest="output", action="store", help="path to the new merged galaxy.ini.new file") + (options, args) = parser.parse_args() + + for option in ['sample', 'config']: + if getattr(options, option) == None: + print "Please supply a --%s parameter.\n" % (option) + parser.print_help() + sys.exit() + + config_sample = ConfigParser.RawConfigParser() + config_sample.read(options.sample) + config_sample_content = open(options.sample, 'r').read() + + config = ConfigParser.RawConfigParser() + config.read(options.config) + + logging.info("Merging your own config file %s into the sample one %s." % (options.config, options.sample)) + logging.info("---------- DIFFERENCE ANALYSIS BEGIN ----------") + for section in config.sections(): + if not config_sample.has_section(section): + logging.warning("-MISSING- section [%s] not found in sample file. It will be ignored." % section) + else: + for (name, value) in config.items(section): + if not config_sample.has_option(section, name): + if not "#%s" % name in config_sample_content: + logging.warning("-MISSING- section [%s] option '%s' not found in sample file. It will be ignored." % (section, name)) + else: + logging.info("-notset- section [%s] option '%s' not set in sample file. It will be added." % (section, name)) + config_sample.set(section, name, value) + else: + if not config_sample.get(section, name) == value: + logging.info("- diff - section [%s] option '%s' has different value ('%s':'%s'). It will be modified." % (section, name, config_sample.get(section, name), value)) + config_sample.set(section, name, value) + logging.info("---------- DIFFERENCE ANALYSIS END ----------") + + if options.output: + outputfile = open(options.output, 'w') + config_sample.write(outputfile) + outputfile.close() + else: + #print "----------" + #config_sample.write(sys.stdout) + #print "----------" + logging.info("use -o OUTPUT to write the merged configuration into a file.") + + logging.info("read Galaxy galaxy.ini.sample for detailed information.") + +if __name__ == '__main__': + main() diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb contrib/universe_merger.py --- a/contrib/universe_merger.py +++ /dev/null @@ -1,87 +0,0 @@ -#! /usr/bin/env python -''' -universe_merger.py - -Created by Anne Pajon on 31 Jan 2012 - -Copyright (c) 2012 Cancer Research UK - Cambridge Research Institute. - -This source file is licensed under the Academic Free License version -3.0 available at http://www.opensource.org/licenses/AFL-3.0. - -Permission is hereby granted to reproduce, translate, adapt, alter, -transform, modify, or arrange this source file (the "Original Work"); -to distribute or communicate copies of it under any license of your -choice that does not contradict the terms and conditions; to perform -or display the Original Work publicly. - -THE ORIGINAL WORK IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS -AND WITHOUT WARRANTY, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT -LIMITATION, THE WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY OR -FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY OF -THE ORIGINAL WORK IS WITH YOU. - -Script for merging specific local Galaxy config universe_wsgi.ini.cri with default Galaxy universe_wsgi.ini.sample -''' -import ConfigParser -import sys -import optparse -import logging - -def main(): - # logging configuration - logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO) - - # get the options - parser = optparse.OptionParser() - parser.add_option("-s", "--sample", dest="sample", action="store", help="path to Galaxy universe_wsgi.ini.sample file") - parser.add_option("-c", "--config", dest="config", action="store", help="path to your own universe_wsgi.ini file") - parser.add_option("-o", "--output", dest="output", action="store", help="path to the new merged universe_wsgi.ini.new file") - (options, args) = parser.parse_args() - - for option in ['sample', 'config']: - if getattr(options, option) == None: - print "Please supply a --%s parameter.\n" % (option) - parser.print_help() - sys.exit() - - config_sample = ConfigParser.RawConfigParser() - config_sample.read(options.sample) - config_sample_content = open(options.sample, 'r').read() - - config = ConfigParser.RawConfigParser() - config.read(options.config) - - logging.info("Merging your own config file %s into the sample one %s." % (options.config, options.sample)) - logging.info("---------- DIFFERENCE ANALYSIS BEGIN ----------") - for section in config.sections(): - if not config_sample.has_section(section): - logging.warning("-MISSING- section [%s] not found in sample file. It will be ignored." % section) - else: - for (name, value) in config.items(section): - if not config_sample.has_option(section, name): - if not "#%s" % name in config_sample_content: - logging.warning("-MISSING- section [%s] option '%s' not found in sample file. It will be ignored." % (section, name)) - else: - logging.info("-notset- section [%s] option '%s' not set in sample file. It will be added." % (section, name)) - config_sample.set(section, name, value) - else: - if not config_sample.get(section, name) == value: - logging.info("- diff - section [%s] option '%s' has different value ('%s':'%s'). It will be modified." % (section, name, config_sample.get(section, name), value)) - config_sample.set(section, name, value) - logging.info("---------- DIFFERENCE ANALYSIS END ----------") - - if options.output: - outputfile = open(options.output, 'w') - config_sample.write(outputfile) - outputfile.close() - else: - #print "----------" - #config_sample.write(sys.stdout) - #print "----------" - logging.info("use -o OUTPUT to write the merged configuration into a file.") - - logging.info("read Galaxy universe_wsgi.ini.sample for detailed information.") - -if __name__ == '__main__': - main() diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb doc/source/lib/galaxy.webapps.galaxy.api.rst --- a/doc/source/lib/galaxy.webapps.galaxy.api.rst +++ b/doc/source/lib/galaxy.webapps.galaxy.api.rst @@ -91,7 +91,7 @@ about a particular dataset. For a more comprehensive Data Library example, set the following option in your -universe_wsgi.ini as well, and restart galaxy again:: +galaxy.ini as well, and restart galaxy again:: admin_users = you@example.org library_import_dir = /path/to/some/directory diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/jobs/__init__.py --- a/lib/galaxy/jobs/__init__.py +++ b/lib/galaxy/jobs/__init__.py @@ -267,7 +267,7 @@ log.debug('Done loading job configuration') def __parse_job_conf_legacy(self): - """Loads the old-style job configuration from options in the galaxy config file (by default, universe_wsgi.ini). + """Loads the old-style job configuration from options in the galaxy config file (by default, config/galaxy.ini). """ log.debug('Loading job configuration from %s' % self.app.config.config_file) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/jobs/metrics/instrumenters/core.py --- a/lib/galaxy/jobs/metrics/instrumenters/core.py +++ b/lib/galaxy/jobs/metrics/instrumenters/core.py @@ -20,7 +20,7 @@ elif key == RUNTIME_SECONDS_KEY: return ( "Job Runtime (Wall Clock)", formatting.seconds_to_str( value ) ) else: - # TODO: Use localized version of this from universe_wsgi.ini + # TODO: Use localized version of this from galaxy.ini title = "Job Start Time" if key == START_EPOCH_KEY else "Job End Time" return (title, time.strftime( '%Y-%m-%d %H:%M:%S', time.localtime( value ) ) ) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/jobs/runners/lwr.py --- a/lib/galaxy/jobs/runners/lwr.py +++ b/lib/galaxy/jobs/runners/lwr.py @@ -542,7 +542,7 @@ metadata_kwds['output_fnames'] = outputs metadata_kwds['compute_tmp_dir'] = working_directory metadata_kwds['config_root'] = remote_galaxy_home - default_config_file = os.path.join(remote_galaxy_home, 'universe_wsgi.ini') + default_config_file = os.path.join(remote_galaxy_home, 'galaxy.ini') metadata_kwds['config_file'] = remote_system_properties.get('galaxy_config_file', default_config_file) metadata_kwds['dataset_files_path'] = remote_system_properties.get('galaxy_dataset_files_path', None) if LwrJobRunner.__use_remote_datatypes_conf( client ): diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/jobs/runners/lwr_client/__init__.py --- a/lib/galaxy/jobs/runners/lwr_client/__init__.py +++ b/lib/galaxy/jobs/runners/lwr_client/__init__.py @@ -14,22 +14,22 @@ for information on how to configure Galaxy to interact with the LWR. Galaxy also supports an older, less rich configuration of job runners directly -in its main ``universe_wsgi.ini`` file. The following section describes how to +in its main ``galaxy.ini`` file. The following section describes how to configure Galaxy to communicate with the LWR in this legacy mode. Legacy ------ A Galaxy tool can be configured to be executed remotely via LWR by -adding a line to the ``universe_wsgi.ini`` file under the -``galaxy:tool_runners`` section with the format:: +adding a line to the ``galaxy.ini`` file under the ``galaxy:tool_runners`` +section with the format:: <tool_id> = lwr://http://<lwr_host>:<lwr_port> As an example, if a host named remotehost is running the LWR server application on port ``8913``, then the tool with id ``test_tool`` can be configured to run remotely on remotehost by adding the following -line to ``universe.ini``:: +line to ``galaxy.ini``:: test_tool = lwr://http://remotehost:8913 diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/jobs/runners/pulsar.py --- a/lib/galaxy/jobs/runners/pulsar.py +++ b/lib/galaxy/jobs/runners/pulsar.py @@ -558,7 +558,7 @@ metadata_kwds['output_fnames'] = outputs metadata_kwds['compute_tmp_dir'] = working_directory metadata_kwds['config_root'] = remote_galaxy_home - default_config_file = os.path.join(remote_galaxy_home, 'universe_wsgi.ini') + default_config_file = os.path.join(remote_galaxy_home, 'config/galaxy.ini') metadata_kwds['config_file'] = remote_system_properties.get('galaxy_config_file', default_config_file) metadata_kwds['dataset_files_path'] = remote_system_properties.get('galaxy_dataset_files_path', None) if PulsarJobRunner.__use_remote_datatypes_conf( client ): diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/model/migrate/check.py --- a/lib/galaxy/model/migrate/check.py +++ b/lib/galaxy/model/migrate/check.py @@ -105,7 +105,7 @@ db_schema = schema.ControlledSchema( engine, migrate_repository ) if migrate_repository.versions.latest != db_schema.version: config_arg = '' - if os.path.abspath( os.path.join( os.getcwd(), 'universe_wsgi.ini' ) ) != galaxy_config_file: + if os.path.abspath( os.path.join( os.getcwd(), 'config', 'galaxy.ini' ) ) != galaxy_config_file: config_arg = ' -c %s' % galaxy_config_file.replace( os.path.abspath( os.getcwd() ), '.' ) raise Exception( "Your database has version '%d' but this code expects version '%d'. Please backup your database and then migrate the schema by running 'sh manage_db.sh%s upgrade'." % ( db_schema.version, migrate_repository.versions.latest, config_arg ) ) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/model/orm/scripts.py --- a/lib/galaxy/model/orm/scripts.py +++ b/lib/galaxy/model/orm/scripts.py @@ -18,7 +18,7 @@ log = logging.getLogger( __name__ ) -DEFAULT_CONFIG_FILE = 'universe_wsgi.ini' +DEFAULT_CONFIG_FILE = 'config/galaxy.ini' DEFAULT_CONFIG_PREFIX = '' DEFAULT_DATABASE = 'galaxy' @@ -85,7 +85,7 @@ 'lib/galaxy/webapps/tool_shed/model/migrate' >>> config['db_url'] 'sqlite:///pg/testdb1' - >>> write_ini('universe_wsgi.ini', 'database_file', 'moo.sqlite') + >>> write_ini('galaxy.ini', 'database_file', 'moo.sqlite') >>> config = get_config(['manage_db.py'], cwd=config_dir) >>> config['db_url'] 'sqlite:///moo.sqlite?isolation_level=IMMEDIATE' diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/objectstore/__init__.py --- a/lib/galaxy/objectstore/__init__.py +++ b/lib/galaxy/objectstore/__init__.py @@ -596,7 +596,7 @@ Depending on the configuration setting, invoke the appropriate object store """ - if config_xml is None and config.object_store_config_file: + if config_xml is None and os.path.exists( config.object_store_config_file ): # This is a top level invocation of build_object_store_from_config, and # we have an object_store_conf.xml -- read the .xml and build # accordingly diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/objectstore/s3.py --- a/lib/galaxy/objectstore/s3.py +++ b/lib/galaxy/objectstore/s3.py @@ -49,7 +49,7 @@ self._parse_config_xml(config_xml) self._configure_connection() self.bucket = self._get_bucket(self.bucket) - # Clean cache only if value is set in universe_wsgi.ini + # Clean cache only if value is set in galaxy.ini if self.cache_size != -1: # Convert GBs to bytes for comparison self.cache_size = self.cache_size * 1073741824 diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/tools/filters/examples.py --- a/lib/galaxy/tools/filters/examples.py +++ b/lib/galaxy/tools/filters/examples.py @@ -32,7 +32,7 @@ users. This can be enabled by renaming this file to examples.py and adding - the following to the ``app:main`` section of ``universe_wsgi.ini``: + the following to the ``app:main`` section of ``galaxy.ini``: tool_filters = examples:restrict_upload_to_admins """ @@ -46,7 +46,7 @@ This tool filter will disable all gatk tools when enabled. This can be enabled by renaming this file to examples.py and adding the following to the - ``app:main`` section of ``universe_wsgi.ini``: + ``app:main`` section of ``galaxy.ini``: tool_filters = examples:disable_gatk """ @@ -59,7 +59,7 @@ which tool sections. Anonymous users will only be able to view the "Get Data" tool section (with id getext). This can be enabled by renaming this file to examples.py and adding the following to the ``app:main`` section of - ``universe_wsgi.ini``: + ``galaxy.ini``: tool_section_filters = examples:explicit_user_mapping """ @@ -82,7 +82,7 @@ This tool filter will disable all tools with the string alpha appearing in the version for all users except those explicitly appearing in the DEVELOPERS list defined above. This can be enabled by renaming this file to examples.py and - adding the following to the ``app:main`` section of ``universe_wsgi.ini``: + adding the following to the ``app:main`` section of ``galaxy.ini``: tool_filters = examples:restrict_development_tools """ @@ -98,7 +98,7 @@ the URL the user is making the request to. This could allow a single Galaxy instance to seem like several different instances hosting different tools based on the URL used to access the Galxy. This can be enabled by renaming this file to examples.py and adding - the following to the ``app:main`` section of ``universe_wsgi.ini``: + the following to the ``app:main`` section of ``galaxy.ini``: tool_section_filters = examples:per_host_tool_sections """ diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/tools/filters/examples.py.sample --- a/lib/galaxy/tools/filters/examples.py.sample +++ b/lib/galaxy/tools/filters/examples.py.sample @@ -6,7 +6,7 @@ """ This tool filter will hide the upload tool from all users except admin users. This can be enabled by renaming this file to examples.py and adding - the following to the ``app:main`` section of ``universe_wsgi.ini``: + the following to the ``app:main`` section of ``galaxy.ini``: tool_filters = examples:restrict_upload_to_admins """ @@ -19,7 +19,7 @@ """ This tool filter will disable all gatk tools when enabled. This can be enabled by renaming this file to examples.py and adding the following to the - ``app:main`` section of ``universe_wsgi.ini``: + ``app:main`` section of ``galaxy.ini``: tool_filters = examples:disable_gatk """ @@ -32,7 +32,7 @@ which tool sections. Anonymous users will only be able to view the "Get Data" tool section (with id getext). This can be enabled by renaming this file to examples.py and adding the following to the ``app:main`` section of - ``universe_wsgi.ini``: + ``galaxy.ini``: tool_section_filters = examples:explicit_user_mapping """ @@ -55,7 +55,7 @@ This tool filter will disable all tools with the string alpha appearing in the version for all users except those explicitly appearing in the DEVELOPERS list defined above. This can be enabled by renaming this file to examples.py and - adding the following to the ``app:main`` section of ``universe_wsgi.ini``: + adding the following to the ``app:main`` section of ``galaxy.ini``: tool_filters = examples:restrict_development_tools """ @@ -71,7 +71,7 @@ the URL the user is making the request to. This could allow a single Galaxy instance to seem like several different instances hosting different tools based on the URL used to access the Galxy. This can be enabled by renaming this file to examples.py and adding - the following to the ``app:main`` section of ``universe_wsgi.ini``: + the following to the ``app:main`` section of ``galaxy.ini``: tool_section_filters = examples:per_host_tool_sections """ diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/web/base/controllers/admin.py --- a/lib/galaxy/web/base/controllers/admin.py +++ b/lib/galaxy/web/base/controllers/admin.py @@ -1027,7 +1027,7 @@ @web.require_admin def memdump( self, trans, ids = 'None', sorts = 'None', pages = 'None', new_id = None, new_sort = None, **kwd ): if self.app.memdump is None: - return trans.show_error_message( "Memdump is not enabled (set <code>use_memdump = True</code> in universe_wsgi.ini)" ) + return trans.show_error_message( "Memdump is not enabled (set <code>use_memdump = True</code> in galaxy.ini)" ) heap = self.app.memdump.get() p = util.Params( kwd ) msg = None diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py --- a/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py +++ b/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py @@ -27,7 +27,7 @@ def get_message_for_no_shed_tool_config(): # This Galaxy instance is not configured with a shed-related tool panel configuration file. - message = 'The tool_config_file setting in universe_wsgi.ini must include at least one shed tool configuration file name with a <toolbox> ' + message = 'The tool_config_file setting in galaxy.ini must include at least one shed tool configuration file name with a <toolbox> ' message += 'tag that includes a tool_path attribute value which is a directory relative to the Galaxy installation directory in order to ' message += 'automatically install tools from a tool shed into Galaxy (e.g., the file name shed_tool_conf.xml whose <toolbox> tag is ' message += '<toolbox tool_path="../shed_tools">). For details, see the "Installation of Galaxy tool shed repository tools into a local ' @@ -229,7 +229,7 @@ :param install_tool_dependencies (optional): Set to True if you want to install tool dependencies defined for the specified repository being installed. The default setting is False. :param shed_tool_conf (optional): The shed-related tool panel configuration file configured in the "tool_config_file" setting in the Galaxy config file - (e.g., universe_wsgi.ini). At least one shed-related tool panel config file is required to be configured. Setting + (e.g., galaxy.ini). At least one shed-related tool panel config file is required to be configured. Setting this parameter to a specific file enables you to choose where the specified repository will be installed because the tool_path attribute of the <toolbox> from the specified file is used as the installation location (e.g., <toolbox tool_path="../shed_tools">). If this parameter is not set, a shed-related tool panel configuration @@ -283,7 +283,7 @@ :param install_tool_dependencies (optional): Set to True if you want to install tool dependencies defined for the specified repository being installed. The default setting is False. :param shed_tool_conf (optional): The shed-related tool panel configuration file configured in the "tool_config_file" setting in the Galaxy config file - (e.g., universe_wsgi.ini). At least one shed-related tool panel config file is required to be configured. Setting + (e.g., galaxy.ini). At least one shed-related tool panel config file is required to be configured. Setting this parameter to a specific file enables you to choose where the specified repository will be installed because the tool_path attribute of the <toolbox> from the specified file is used as the installation location (e.g., <toolbox tool_path="../shed_tools">). If this parameter is not set, a shed-related tool panel configuration diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py --- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py +++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py @@ -616,7 +616,7 @@ if includes_tool_dependencies: message = "Tool dependencies defined in this repository can be automatically installed if you set " message += "the value of your <b>tool_dependency_dir</b> setting in your Galaxy config file " - message += "(universe_wsgi.ini) and restart your Galaxy server." + message += "(galaxy.ini) and restart your Galaxy server." status = "warning" install_tool_dependencies_check_box_checked = False else: @@ -865,7 +865,7 @@ message = 'All selected tool dependencies are already installed.' status = 'error' else: - message = 'Set the value of your <b>tool_dependency_dir</b> setting in your Galaxy config file (universe_wsgi.ini) ' + message = 'Set the value of your <b>tool_dependency_dir</b> setting in your Galaxy config file (galaxy.ini) ' message += ' and restart your Galaxy server to install tool dependencies.' status = 'error' installed_tool_dependencies_select_field = \ @@ -934,7 +934,7 @@ kwd[ 'message' ] = 'All selected tool dependencies are already installed.' kwd[ 'status' ] = 'error' else: - message = 'Set the value of your <b>tool_dependency_dir</b> setting in your Galaxy config file (universe_wsgi.ini) ' + message = 'Set the value of your <b>tool_dependency_dir</b> setting in your Galaxy config file (galaxy.ini) ' message += ' and restart your Galaxy server to install tool dependencies.' kwd[ 'message' ] = message kwd[ 'status' ] = 'error' @@ -969,7 +969,7 @@ @web.require_admin def prepare_for_install( self, trans, **kwd ): if not suc.have_shed_tool_conf_for_install( trans.app ): - message = 'The <b>tool_config_file</b> setting in <b>universe_wsgi.ini</b> must include at least one ' + message = 'The <b>tool_config_file</b> setting in <b>galaxy.ini</b> must include at least one ' message += 'shed tool configuration file name with a <b><toolbox></b> tag that includes a <b>tool_path</b> ' message += 'attribute value which is a directory relative to the Galaxy installation directory in order ' message += 'to automatically install tools from a Galaxy Tool Shed (e.g., the file name <b>shed_tool_conf.xml</b> ' @@ -1213,7 +1213,7 @@ if includes_tool_dependencies: message = "Tool dependencies defined in this repository can be automatically installed if you set " message += "the value of your <b>tool_dependency_dir</b> setting in your Galaxy config file " - message += "(universe_wsgi.ini) and restart your Galaxy server before installing the repository." + message += "(galaxy.ini) and restart your Galaxy server before installing the repository." status = "warning" install_tool_dependencies_check_box_checked = False else: @@ -1681,7 +1681,7 @@ if trans.app.config.tool_dependency_dir is None: if includes_tool_dependencies: message += "Tool dependencies defined in this repository can be automatically installed if you set the value of your <b>tool_dependency_dir</b> " - message += "setting in your Galaxy config file (universe_wsgi.ini) and restart your Galaxy server before installing the repository. " + message += "setting in your Galaxy config file (galaxy.ini) and restart your Galaxy server before installing the repository. " status = "warning" install_tool_dependencies_check_box_checked = False else: diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/webapps/galaxy/controllers/user.py --- a/lib/galaxy/webapps/galaxy/controllers/user.py +++ b/lib/galaxy/webapps/galaxy/controllers/user.py @@ -1192,13 +1192,13 @@ def toolbox_filters( self, trans, cntrller, **kwd ): """ Sets the user's default filters for the toolbox. - Toolbox filters are specified in universe_wsgi.ini. + Toolbox filters are specified in galaxy.ini. The user can activate them and the choice is stored in user_preferences. """ def get_filter_mapping( db_filters, config_filters ): """ - Compare the allowed filters from the universe_wsgi.ini config file with the previously saved or default filters from the database. + Compare the allowed filters from the galaxy.ini config file with the previously saved or default filters from the database. We need that to toogle the checkboxes for the formular in the right way. Furthermore we extract all information associated to a filter to display them in the formular. """ diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/galaxy/webapps/galaxy/controllers/visualization.py --- a/lib/galaxy/webapps/galaxy/controllers/visualization.py +++ b/lib/galaxy/webapps/galaxy/controllers/visualization.py @@ -793,7 +793,7 @@ # validate name vs. registry registry = trans.app.visualizations_registry if not registry: - raise HTTPNotFound( 'No visualization registry (possibly disabled in universe_wsgi.ini)' ) + raise HTTPNotFound( 'No visualization registry (possibly disabled in galaxy.ini)' ) if visualization_name not in registry.plugins: raise HTTPNotFound( 'Unknown or invalid visualization: ' + visualization_name ) # or redirect to list? diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/pulsar/client/__init__.py --- a/lib/pulsar/client/__init__.py +++ b/lib/pulsar/client/__init__.py @@ -10,26 +10,26 @@ Galaxy job runners are configured in Galaxy's ``job_conf.xml`` file. See ``job_conf.xml.sample_advanced`` in your Galaxy code base or on -`Bitbucket <https://bitbucket.org/galaxy/galaxy-dist/src/tip/job_conf.xml.sample_advanced?at=default>`_ +`Bitbucket <https://bitbucket.org/galaxy/galaxy-dist/src/tip/config/job_conf.xml.sample_advanced?at=default>`_ for information on how to configure Galaxy to interact with the Pulsar. Galaxy also supports an older, less rich configuration of job runners directly -in its main ``universe_wsgi.ini`` file. The following section describes how to +in its main ``galaxy.ini`` file. The following section describes how to configure Galaxy to communicate with the Pulsar in this legacy mode. Legacy ------ A Galaxy tool can be configured to be executed remotely via Pulsar by -adding a line to the ``universe_wsgi.ini`` file under the -``galaxy:tool_runners`` section with the format:: +adding a line to the ``galaxy.ini`` file under the ``galaxy:tool_runners`` +section with the format:: <tool_id> = pulsar://http://<pulsar_host>:<pulsar_port> As an example, if a host named remotehost is running the Pulsar server application on port ``8913``, then the tool with id ``test_tool`` can be configured to run remotely on remotehost by adding the following -line to ``universe.ini``:: +line to ``galaxy.ini``:: test_tool = pulsar://http://remotehost:8913 diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/tool_shed/galaxy_install/install_manager.py --- a/lib/tool_shed/galaxy_install/install_manager.py +++ b/lib/tool_shed/galaxy_install/install_manager.py @@ -726,7 +726,7 @@ if self.app.config.tool_dependency_dir is None: no_tool_dependency_dir_message = "Tool dependencies can be automatically installed only if you set " no_tool_dependency_dir_message += "the value of your 'tool_dependency_dir' setting in your Galaxy " - no_tool_dependency_dir_message += "configuration file (universe_wsgi.ini) and restart your Galaxy server. " + no_tool_dependency_dir_message += "configuration file (galaxy.ini) and restart your Galaxy server. " raise exceptions.ConfigDoesNotAllowException( no_tool_dependency_dir_message ) new_tool_panel_section_label = install_options.get( 'new_tool_panel_section_label', '' ) shed_tool_conf = install_options.get( 'shed_tool_conf', None ) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/tool_shed/galaxy_install/migrate/check.py --- a/lib/tool_shed/galaxy_install/migrate/check.py +++ b/lib/tool_shed/galaxy_install/migrate/check.py @@ -71,7 +71,7 @@ have_tool_dependencies = True break config_arg = '' - if os.path.abspath( os.path.join( os.getcwd(), 'universe_wsgi.ini' ) ) != galaxy_config_file: + if os.path.abspath( os.path.join( os.getcwd(), 'galaxy.ini' ) ) != galaxy_config_file: config_arg = ' -c %s' % galaxy_config_file.replace( os.path.abspath( os.getcwd() ), '.' ) if not app.config.running_functional_tests: if tool_shed_accessible: diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/tool_shed/galaxy_install/migrate/common.py --- a/lib/tool_shed/galaxy_install/migrate/common.py +++ b/lib/tool_shed/galaxy_install/migrate/common.py @@ -10,7 +10,7 @@ def __init__( self, tools_migration_config ): install_dependencies = 'install_dependencies' in sys.argv - galaxy_config_file = 'universe_wsgi.ini' + galaxy_config_file = 'galaxy.ini' self.name = 'galaxy' if '-c' in sys.argv: pos = sys.argv.index( '-c' ) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb lib/tool_shed/galaxy_install/tool_migration_manager.py --- a/lib/tool_shed/galaxy_install/tool_migration_manager.py +++ b/lib/tool_shed/galaxy_install/tool_migration_manager.py @@ -50,7 +50,7 @@ # to install but print informative error message. if install_dependencies and app.config.tool_dependency_dir is None: message = 'You are attempting to install tool dependencies but do not have a value ' - message += 'for "tool_dependency_dir" set in your universe_wsgi.ini file. Set this ' + message += 'for "tool_dependency_dir" set in your galaxy.ini file. Set this ' message += 'location value to the path where you want tool dependencies installed and ' message += 'rerun the migration script.' raise Exception( message ) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb rolling_restart.sh --- a/rolling_restart.sh +++ b/rolling_restart.sh @@ -16,7 +16,13 @@ fi } -servers=`sed -n 's/^\[server:\(.*\)\]/\1/ p' universe_wsgi.ini | xargs echo` + +CONFIG_FILE=config/galaxy.ini +if [ ! -f $CONFIG_FILE ]; then + CONFIG_FILE=universe_wsgi.ini +fi + +servers=`sed -n 's/^\[server:\(.*\)\]/\1/ p' $CONFIG_FILE | xargs echo` for server in $servers; do # If there's a pid @@ -31,7 +37,7 @@ echo "$server not running" fi # Start the server (and background) (should this be nohup'd?) - python ./scripts/paster.py serve universe_wsgi.ini --server-name=$server --pid-file=$server.pid --log-file=$server.log --daemon $@ + python ./scripts/paster.py serve $CONFIG_FILE --server-name=$server --pid-file=$server.pid --log-file=$server.log --daemon $@ # Wait for the server to start sleep 1 # Grab the new pid diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb run.sh --- a/run.sh +++ b/run.sh @@ -18,8 +18,13 @@ python ./scripts/build_universe_config.py "$GALAXY_UNIVERSE_CONFIG_DIR" fi +CONFIG_FILE=config/galaxy.ini +if [ ! -f $CONFIG_FILE ]; then + CONFIG_FILE=universe_wsgi.ini +fi + if [ -n "$GALAXY_RUN_ALL" ]; then - servers=`sed -n 's/^\[server:\(.*\)\]/\1/ p' universe_wsgi.ini | xargs echo` + servers=`sed -n 's/^\[server:\(.*\)\]/\1/ p' $CONFIG_FILE | xargs echo` daemon=`echo "$@" | grep -q daemon` if [ $? -ne 0 ]; then echo 'ERROR: $GALAXY_RUN_ALL cannot be used without the `--daemon` or `--stop-daemon` arguments to run.sh' @@ -27,8 +32,8 @@ fi for server in $servers; do echo "Handling $server with log file $server.log..." - python ./scripts/paster.py serve universe_wsgi.ini --server-name=$server --pid-file=$server.pid --log-file=$server.log $@ + python ./scripts/paster.py serve $CONFIG_FILE --server-name=$server --pid-file=$server.pid --log-file=$server.log $@ done else - python ./scripts/paster.py serve universe_wsgi.ini $@ + python ./scripts/paster.py serve $CONFIG_FILE $@ fi diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/api/README --- a/scripts/api/README +++ b/scripts/api/README @@ -1,7 +1,7 @@ This is not documentation. These are hints and examples to get you started until the documentation is written. -Set these options in universe_wsgi.ini and start the server: +Set these options in galaxy.ini and start the server: admin_users = you@example.org library_import_dir = /path/to/some/directory diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/build_universe_config.py --- a/scripts/build_universe_config.py +++ b/scripts/build_universe_config.py @@ -20,7 +20,7 @@ ## TODO: Expand enviroment variables here, that would ## also make Galaxy much easier to configure. - destination= "universe_wsgi.ini" + destination= "config/galaxy.ini" if len(argv) > 2: destination = argv[2] diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/check_eggs.py --- a/scripts/check_eggs.py +++ b/scripts/check_eggs.py @@ -10,7 +10,7 @@ from optparse import OptionParser parser = OptionParser() -parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)', default='universe_wsgi.ini' ) +parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default='config/galaxy.ini' ) parser.add_option( '-q', '--quiet', dest='quiet', action="store_true", help='Quiet (no output, only set return code)', default=False ) ( options, args ) = parser.parse_args() @@ -23,7 +23,7 @@ root.addHandler( logging.StreamHandler( sys.stdout ) ) config_arg = '' -if options.config != 'universe_wsgi.ini': +if options.config != 'config/galaxy.ini': config_arg = '-c %s' % options.config lib = os.path.abspath( os.path.join( os.path.dirname( __file__ ), "..", "lib" ) ) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/admin_cleanup_datasets.py --- a/scripts/cleanup_datasets/admin_cleanup_datasets.py +++ b/scripts/cleanup_datasets/admin_cleanup_datasets.py @@ -9,11 +9,11 @@ script originally distributed with Galaxy. Basic Usage: - admin_cleanup_datasets.py universe_wsgi.ini -d 60 \ + admin_cleanup_datasets.py galaxy.ini -d 60 \ --template=email_template.txt Required Arguments: - config_file - the Galaxy configuration file (universe_wsgi.ini) + config_file - the Galaxy configuration file (galaxy.ini) Optional Arguments: -d --days - number of days old the dataset must be (default: 60) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/delete_datasets.sh --- a/scripts/cleanup_datasets/delete_datasets.sh +++ b/scripts/cleanup_datasets/delete_datasets.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/cleanup_datasets/cleanup_datasets.py ./universe_wsgi.ini -d 10 -6 -r $@ >> ./scripts/cleanup_datasets/delete_datasets.log +python ./scripts/cleanup_datasets/cleanup_datasets.py ./config/galaxy.ini -d 10 -6 -r $@ >> ./scripts/cleanup_datasets/delete_datasets.log diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/delete_userless_histories.sh --- a/scripts/cleanup_datasets/delete_userless_histories.sh +++ b/scripts/cleanup_datasets/delete_userless_histories.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/cleanup_datasets/cleanup_datasets.py ./universe_wsgi.ini -d 10 -1 $@ >> ./scripts/cleanup_datasets/delete_userless_histories.log +python ./scripts/cleanup_datasets/cleanup_datasets.py ./config/galaxy.ini -d 10 -1 $@ >> ./scripts/cleanup_datasets/delete_userless_histories.log diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/pgcleanup.py --- a/scripts/cleanup_datasets/pgcleanup.py +++ b/scripts/cleanup_datasets/pgcleanup.py @@ -61,10 +61,10 @@ self.action_names.append(name) def __parse_args(self): - default_config = os.path.abspath(os.path.join(galaxy_root, 'universe_wsgi.ini')) + default_config = os.path.abspath(os.path.join(galaxy_root, 'config', 'galaxy.ini')) parser = OptionParser() - parser.add_option('-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)', default=default_config) + parser.add_option('-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default=default_config) parser.add_option('-d', '--debug', action='store_true', dest='debug', help='Enable debug logging', default=False) parser.add_option('--dry-run', action='store_true', dest='dry_run', help="Dry run (rollback all transactions)", default=False) parser.add_option('--force-retry', action='store_true', dest='force_retry', help="Retry file removals (on applicable actions)", default=False) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/populate_uuid.sh --- a/scripts/cleanup_datasets/populate_uuid.sh +++ b/scripts/cleanup_datasets/populate_uuid.sh @@ -2,4 +2,4 @@ cd `dirname $0`/../.. export PYTHONPATH=./lib/ -python ./scripts/cleanup_datasets/populate_uuid.py ./universe_wsgi.ini $@ \ No newline at end of file +python ./scripts/cleanup_datasets/populate_uuid.py ./config/galaxy.ini $@ diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/purge_datasets.sh --- a/scripts/cleanup_datasets/purge_datasets.sh +++ b/scripts/cleanup_datasets/purge_datasets.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/cleanup_datasets/cleanup_datasets.py ./universe_wsgi.ini -d 10 -3 -r $@ >> ./scripts/cleanup_datasets/purge_datasets.log +python ./scripts/cleanup_datasets/cleanup_datasets.py ./config/galaxy.ini -d 10 -3 -r $@ >> ./scripts/cleanup_datasets/purge_datasets.log diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/purge_folders.sh --- a/scripts/cleanup_datasets/purge_folders.sh +++ b/scripts/cleanup_datasets/purge_folders.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/cleanup_datasets/cleanup_datasets.py ./universe_wsgi.ini -d 10 -5 -r $@ >> ./scripts/cleanup_datasets/purge_folders.log +python ./scripts/cleanup_datasets/cleanup_datasets.py ./config/galaxy.ini -d 10 -5 -r $@ >> ./scripts/cleanup_datasets/purge_folders.log diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/purge_histories.sh --- a/scripts/cleanup_datasets/purge_histories.sh +++ b/scripts/cleanup_datasets/purge_histories.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/cleanup_datasets/cleanup_datasets.py ./universe_wsgi.ini -d 10 -2 -r $@ >> ./scripts/cleanup_datasets/purge_histories.log +python ./scripts/cleanup_datasets/cleanup_datasets.py ./config/galaxy.ini -d 10 -2 -r $@ >> ./scripts/cleanup_datasets/purge_histories.log diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/purge_libraries.sh --- a/scripts/cleanup_datasets/purge_libraries.sh +++ b/scripts/cleanup_datasets/purge_libraries.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/cleanup_datasets/cleanup_datasets.py ./universe_wsgi.ini -d 10 -4 -r $@ >> ./scripts/cleanup_datasets/purge_libraries.log +python ./scripts/cleanup_datasets/cleanup_datasets.py ./config/galaxy.ini -d 10 -4 -r $@ >> ./scripts/cleanup_datasets/purge_libraries.log diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/cleanup_datasets/update_metadata.sh --- a/scripts/cleanup_datasets/update_metadata.sh +++ b/scripts/cleanup_datasets/update_metadata.sh @@ -6,4 +6,4 @@ . ./scripts/get_python.sh . ./setup_paths.sh -$GALAXY_PYTHON ./scripts/cleanup_datasets/update_metadata.py ./universe_wsgi.ini $@ +$GALAXY_PYTHON ./scripts/cleanup_datasets/update_metadata.py ./config/galaxy.ini $@ diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/common_startup.sh --- a/scripts/common_startup.sh +++ b/scripts/common_startup.sh @@ -10,21 +10,11 @@ done SAMPLES=" - tool_shed_wsgi.ini.sample - datatypes_conf.xml.sample - external_service_types_conf.xml.sample - migrated_tools_conf.xml.sample - reports_wsgi.ini.sample - shed_tool_conf.xml.sample - tool_conf.xml.sample - shed_tool_data_table_conf.xml.sample - tool_data_table_conf.xml.sample - tool_sheds_conf.xml.sample - data_manager_conf.xml.sample - shed_data_manager_conf.xml.sample - openid_conf.xml.sample - job_metrics_conf.xml.sample - universe_wsgi.ini.sample + config/galaxy.ini.sample + config/migrated_tools_conf.xml.sample + config/shed_tool_conf.xml.sample + config/shed_tool_data_table_conf.xml.sample + config/shed_data_manager_conf.xml.sample lib/tool_shed/scripts/bootstrap_tool_shed/user_info.xml.sample tool-data/shared/ucsc/builds.txt.sample tool-data/shared/ucsc/ucsc_build_sites.txt.sample diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/create_db.py --- a/scripts/create_db.py +++ b/scripts/create_db.py @@ -1,19 +1,19 @@ """ Creates the initial galaxy database schema using the settings defined in -universe_wsgi.ini. +config/galaxy.ini. This script is also wrapped by create_db.sh. .. note: pass '-c /location/to/your_config.ini' for non-standard ini file locations. -.. note: if no database_connection is set in universe_wsgi.ini, the default, -sqlite database will be constructed. - Using the database_file setting in universe_wsgi.ini will create the file - at the settings location (??) +.. note: if no database_connection is set in galaxy.ini, the default, sqlite +database will be constructed. + Using the database_file setting in galaxy.ini will create the file at the + settings location (??) -.. seealso: universe_wsgi.ini, specifically the settings: database_connection -and database file +.. seealso: galaxy.ini, specifically the settings: database_connection and +database file """ import sys diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/data_libraries/build_lucene_index.py --- a/scripts/data_libraries/build_lucene_index.py +++ b/scripts/data_libraries/build_lucene_index.py @@ -3,8 +3,8 @@ Build index for full-text lucene search of files in data libraries. Requires a full text search server and configuration settings in -universe_wsgi.ini. See the lucene settings in the data library -search section for more details. +galaxy.ini. See the lucene settings in the data library search section for more +details. Run from the ~/scripts/data_libraries directory: %sh build_lucene_index.sh @@ -32,7 +32,7 @@ ignore_exts = gconfig.get( "fulltext_noindex_filetypes", "" ).split( "," ) search_url = gconfig.get( "fulltext_url", None ) if not search_url: - raise ValueError( "Need to specify search functionality in universe_wsgi.ini" ) + raise ValueError( "Need to specify search functionality in galaxy.ini" ) dataset_file = create_dataset_file( get_lddas( sa_session, max_size, ignore_exts ) ) try: build_index( search_url, dataset_file ) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/data_libraries/build_lucene_index.sh --- a/scripts/data_libraries/build_lucene_index.sh +++ b/scripts/data_libraries/build_lucene_index.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/data_libraries/build_lucene_index.py ./universe_wsgi.ini +python ./scripts/data_libraries/build_lucene_index.py ./config/galaxy.ini diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/data_libraries/build_whoosh_index.py --- a/scripts/data_libraries/build_whoosh_index.py +++ b/scripts/data_libraries/build_whoosh_index.py @@ -2,8 +2,8 @@ """ Build index for full-text whoosh search of files in data libraries. -Requires configuration settings in universe_wsgi.ini. See the whoosh settings -in the data library search section for more details. +Requires configuration settings in galaxy.ini. See the whoosh settings in the +data library search section for more details. Run from the ~/scripts/data_libraries directory: %sh build_whoosh_index.sh diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/data_libraries/build_whoosh_index.sh --- a/scripts/data_libraries/build_whoosh_index.sh +++ b/scripts/data_libraries/build_whoosh_index.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/data_libraries/build_whoosh_index.py ./universe_wsgi.ini +python ./scripts/data_libraries/build_whoosh_index.py ./config/galaxy.ini diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/fetch_eggs.py --- a/scripts/fetch_eggs.py +++ b/scripts/fetch_eggs.py @@ -9,7 +9,7 @@ from optparse import OptionParser parser = OptionParser() -parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)', default='universe_wsgi.ini' ) +parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default='config/galaxy.ini' ) parser.add_option( '-e', '--egg-name', dest='egg_name', help='Egg name (as defined in eggs.ini) to fetch, or "all" for all eggs, even those not needed by your configuration' ) parser.add_option( '-p', '--platform', dest='platform', help='Fetch for a specific platform (by default, eggs are fetched for *this* platform' ) ( options, args ) = parser.parse_args() @@ -49,7 +49,7 @@ print "%s %s is installed at %s" % ( dist.project_name, dist.version, dist.location ) except EggNotFetchable, e: config_arg = '' - if options.config != 'universe_wsgi.ini': + if options.config != 'config/galaxy.ini': config_arg = '-c %s ' % options.config try: assert options.egg_name != 'all' diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/functional_tests.py --- a/scripts/functional_tests.py +++ b/scripts/functional_tests.py @@ -100,7 +100,7 @@ """ cwd = os.getcwd() static_dir = os.path.join( cwd, 'static' ) - #TODO: these should be copied from universe_wsgi.ini + #TODO: these should be copied from config/galaxy.ini return dict( #TODO: static_enabled needed here? static_enabled=True, @@ -398,8 +398,8 @@ for label in kwargs: config_tuple = label, kwargs[ label ] config_items.append( config_tuple ) - # Write a temporary file, based on universe_wsgi.ini.sample, using the configuration options defined above. - generate_config_file( 'universe_wsgi.ini.sample', galaxy_config_file, config_items ) + # Write a temporary file, based on config/galaxy.ini.sample, using the configuration options defined above. + generate_config_file( 'config/galaxy.ini.sample', galaxy_config_file, config_items ) # Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh. kwargs[ 'global_conf' ] = get_webapp_global_conf() kwargs[ 'global_conf' ][ '__file__' ] = galaxy_config_file diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/helper.py --- a/scripts/helper.py +++ b/scripts/helper.py @@ -9,10 +9,10 @@ from ConfigParser import ConfigParser from optparse import OptionParser -default_config = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '..', 'universe_wsgi.ini') ) +default_config = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '..', 'config/galaxy.ini') ) parser = OptionParser() -parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)', default=default_config ) +parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default=default_config ) parser.add_option( '-e', '--encode-id', dest='encode_id', help='Encode an ID' ) parser.add_option( '-d', '--decode-id', dest='decode_id', help='Decode an ID' ) parser.add_option( '--hda', dest='hda_id', help='Display HistoryDatasetAssociation info' ) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/make_egg_packager.py --- a/scripts/make_egg_packager.py +++ b/scripts/make_egg_packager.py @@ -4,7 +4,7 @@ from optparse import OptionParser parser = OptionParser() -parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)', default='universe_wsgi.ini' ) +parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default='config/galaxy.ini' ) parser.add_option( '-p', '--platform', dest='platform', help='Fetch for a specific platform (by default, eggs are fetched for *this* platform' ) ( options, args ) = parser.parse_args() diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/manage_tools.py --- a/scripts/manage_tools.py +++ b/scripts/manage_tools.py @@ -22,7 +22,7 @@ log = logging.getLogger( __name__ ) -config_file = read_config_file_arg( sys.argv, 'universe_wsgi.ini' ) +config_file = read_config_file_arg( sys.argv, 'config/galaxy.ini' ) if not os.path.exists( config_file ): print "Galaxy config file does not exist (hint: use '-c config.ini' for non-standard locations): %s" % config_file sys.exit( 1 ) @@ -31,7 +31,7 @@ cp = SafeConfigParser() cp.read( config_file ) -if config_file == 'universe_wsgi.ini.sample' and 'GALAXY_TEST_DBURI' in os.environ: +if config_file == 'config/galaxy.ini.sample' and 'GALAXY_TEST_DBURI' in os.environ: # Running functional tests. db_url = os.environ[ 'GALAXY_TEST_DBURI' ] elif cp.has_option( "app:main", "install_database_connection" ): diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/others/incorrect_gops_jobs.sh --- a/scripts/others/incorrect_gops_jobs.sh +++ b/scripts/others/incorrect_gops_jobs.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/others/incorrect_gops_jobs.py ./universe_wsgi.ini >> ./scripts/others/incorrect_gops_jobs.log +python ./scripts/others/incorrect_gops_jobs.py ./config/galaxy.ini >> ./scripts/others/incorrect_gops_jobs.log diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/others/incorrect_gops_join_jobs.sh --- a/scripts/others/incorrect_gops_join_jobs.sh +++ b/scripts/others/incorrect_gops_join_jobs.sh @@ -1,4 +1,4 @@ #!/bin/sh cd `dirname $0`/../.. -python ./scripts/others/incorrect_gops_join_jobs.py ./universe_wsgi.ini >> ./scripts/others/incorrect_gops_join_jobs.log +python ./scripts/others/incorrect_gops_join_jobs.py ./config/galaxy.ini >> ./scripts/others/incorrect_gops_join_jobs.log diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/scramble.py --- a/scripts/scramble.py +++ b/scripts/scramble.py @@ -2,7 +2,7 @@ from optparse import OptionParser parser = OptionParser() -parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)', default='universe_wsgi.ini' ) +parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default='config/galaxy.ini' ) parser.add_option( '-e', '--egg-name', dest='egg_name', help='Egg name (as defined in eggs.ini) to fetch, or "all" for all eggs, even those not needed by your configuration' ) ( options, args ) = parser.parse_args() @@ -36,7 +36,7 @@ sys.exit( 1 ) for dependency in egg.dependencies: config_arg = '' - if options.config != 'universe_wsgi.ini': + if options.config != 'config/galaxy.ini': config_arg = '-c %s' % options.config print "Checking %s dependency: %s" % ( egg.name, dependency ) try: diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/set_dataset_sizes.py --- a/scripts/set_dataset_sizes.py +++ b/scripts/set_dataset_sizes.py @@ -4,10 +4,10 @@ from ConfigParser import ConfigParser from optparse import OptionParser -default_config = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '..', 'universe_wsgi.ini') ) +default_config = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '..', 'config/galaxy.ini') ) parser = OptionParser() -parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)', default=default_config ) +parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default=default_config ) ( options, args ) = parser.parse_args() def init(): diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/set_user_disk_usage.py --- a/scripts/set_user_disk_usage.py +++ b/scripts/set_user_disk_usage.py @@ -4,10 +4,10 @@ from ConfigParser import ConfigParser from optparse import OptionParser -default_config = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '..', 'universe_wsgi.ini') ) +default_config = os.path.abspath( os.path.join( os.path.dirname( __file__ ), '..', 'config/galaxy.ini') ) parser = OptionParser() -parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)', default=default_config ) +parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', default=default_config ) parser.add_option( '-u', '--username', dest='username', help='Username of user to update', default='all' ) parser.add_option( '-e', '--email', dest='email', help='Email address of user to update', default='all' ) parser.add_option( '--dry-run', dest='dryrun', help='Dry run (show changes but do not save to database)', action='store_true', default=False ) diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/sync_reports_config.py --- a/scripts/sync_reports_config.py +++ b/scripts/sync_reports_config.py @@ -5,13 +5,13 @@ MAIN_SECTION = "app:main" def sync(): - # Add or replace the relevant properites from universe_wsgi.ini + # Add or replace the relevant properites from galaxy.ini # into reports_wsgi.ini - reports_config_file = "reports_wsgi.ini" + reports_config_file = "config/reports_wsgi.ini" if len(argv) > 1: reports_config_file = argv[1] - universe_config_file = "universe_wsgi.ini" + universe_config_file = "config/galaxy.ini" if len(argv) > 2: universe_config_file = argv[2] diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb scripts/transfer.py --- a/scripts/transfer.py +++ b/scripts/transfer.py @@ -41,8 +41,8 @@ """ def __init__( self ): self.parser = optparse.OptionParser() - self.parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (universe_wsgi.ini)', - default=os.path.abspath( os.path.join( galaxy_root, 'universe_wsgi.ini' ) ) ) + self.parser.add_option( '-c', '--config', dest='config', help='Path to Galaxy config file (config/galaxy.ini)', + default=os.path.abspath( os.path.join( galaxy_root, 'config/galaxy.ini' ) ) ) self.parser.add_option( '-d', '--debug', action='store_true', dest='debug', help="Debug (don't detach)" ) self.parser.add_option( '-s', '--slow', action='store_true', dest='slow', help="Transfer slowly (for debugging)" ) self.opts = None diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb static/scripts/galaxy-app-base.js --- a/static/scripts/galaxy-app-base.js +++ b/static/scripts/galaxy-app-base.js @@ -11,7 +11,7 @@ * logger : the logger/metrics-logger * localize : the string localizer * config : the current configuration (any k/v in - * universe_wsgi.ini available from the configuration API) + * galaxy.ini available from the configuration API) * user : the current user (as a mvc/user/user-model) */ function GalaxyApp( options ){ diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb templates/galaxy_client_app.mako --- a/templates/galaxy_client_app.mako +++ b/templates/galaxy_client_app.mako @@ -45,7 +45,7 @@ ## ---------------------------------------------------------------------------- <%def name="get_config_dict()"> - ## Return a dictionary of universe_wsgi.ini settings + ## Return a dictionary of galaxy.ini settings <% config_dict = {} try: diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb test/casperjs/modules/user.js --- a/test/casperjs/modules/user.js +++ b/test/casperjs/modules/user.js @@ -220,13 +220,13 @@ }; // ------------------------------------------------------------------- Admin -/** Gets the admin user data from spaceghost if set and checks the universe_wsgi.ini file for the email. +/** Gets the admin user data from spaceghost if set and checks the galaxy.ini file for the email. * @returns {Object|null} the admin data object (email, pasword, username) - * or null if no admin is set in both the universe_wsgi.ini and spaceghost. + * or null if no admin is set in both the galaxy.ini and spaceghost. */ User.prototype.getAdminData = function getAdminData(){ //TODO: this might be better inside sg - // check for the setting in sg and the universe_wsgi.ini file + // check for the setting in sg and the galaxy.ini file var adminData = this.spaceghost.options.adminUser, iniAdminEmails = this.spaceghost.getUniverseSetting( 'admin_users' ); iniAdminEmails = ( iniAdminEmails )? diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb test/casperjs/spaceghost.js --- a/test/casperjs/spaceghost.js +++ b/test/casperjs/spaceghost.js @@ -1141,12 +1141,12 @@ return read.match( regex ); }; -/** Read a configuration setting from the universe_wsgi.ini file. +/** Read a configuration setting from the galaxy.ini file. * @param {String} iniKey the setting key to find * @returns {String} value from file for iniKey (or null if not found or commented out) */ SpaceGhost.prototype.getUniverseSetting = function getUniverseSetting( iniKey ){ - var iniFilepath = '../../universe_wsgi.ini', + var iniFilepath = '../../config/galaxy.ini', regex = new RegExp( '^([#]*)\\\s*' + iniKey + '\\\s*=\\\s*(.*)$', 'm' ), match = this.searchFile( iniFilepath, regex ); this.debug( 'regex: ' + regex ); diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb test/functional/tools/README.txt --- a/test/functional/tools/README.txt +++ b/test/functional/tools/README.txt @@ -10,6 +10,6 @@ to ensure these tools get loaded during the testing process. Finally, to play around witht these tools interactively - simply -replace the 'universe_wsgi.ini' option 'tool_config_file' with: +replace the 'galaxy.ini' option 'tool_config_file' with: tool_config_file = test/functional/tools/samples_tool_conf.xml diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb test/install_and_test_tool_shed_repositories/base/util.py --- a/test/install_and_test_tool_shed_repositories/base/util.py +++ b/test/install_and_test_tool_shed_repositories/base/util.py @@ -453,7 +453,7 @@ """ cwd = os.getcwd() static_dir = os.path.join( cwd, 'static' ) - #TODO: these should be copied from universe_wsgi.ini + #TODO: these should be copied from galaxy.ini #TODO: static_enabled needed here? return dict( static_enabled = True, static_cache_time = 360, diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py --- a/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py +++ b/test/install_and_test_tool_shed_repositories/repositories_with_tools/functional_tests.py @@ -495,8 +495,8 @@ for label in kwargs: config_tuple = label, kwargs[ label ] config_items.append( config_tuple ) - # Write a temporary file, based on universe_wsgi.ini.sample, using the configuration options defined above. - generate_config_file( 'universe_wsgi.ini.sample', galaxy_config_file, config_items ) + # Write a temporary file, based on galaxy.ini.sample, using the configuration options defined above. + generate_config_file( 'config/galaxy.ini.sample', galaxy_config_file, config_items ) # kwargs must be a list when passed to the Galaxy webapp (i.e., UniverseApplication). # The following is used to set metadata externally. kwargs[ 'tool_config_file' ] = [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ] diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py --- a/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py +++ b/test/install_and_test_tool_shed_repositories/tool_dependency_definitions/functional_tests.py @@ -347,8 +347,8 @@ for label in kwargs: config_tuple = label, kwargs[ label ] config_items.append( config_tuple ) - # Write a temporary file, based on universe_wsgi.ini.sample, using the configuration options defined above. - generate_config_file( 'universe_wsgi.ini.sample', galaxy_config_file, config_items ) + # Write a temporary file, based on galaxy.ini.sample, using the configuration options defined above. + generate_config_file( 'config/galaxy.ini.sample', galaxy_config_file, config_items ) kwargs[ 'tool_config_file' ] = [ galaxy_tool_conf_file, galaxy_shed_tool_conf_file ] # Set the global_conf[ '__file__' ] option to the location of the temporary .ini file, which gets passed to set_metadata.sh. kwargs[ 'global_conf' ] = install_and_test_base_util.get_webapp_global_conf() diff -r bbf9c5f5fdaf96fd02639f3342e47d4eecae8a4f -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb test/tool_shed/functional_tests.py --- a/test/tool_shed/functional_tests.py +++ b/test/tool_shed/functional_tests.py @@ -94,7 +94,7 @@ """ cwd = os.getcwd() static_dir = os.path.join( cwd, 'static' ) - #TODO: these should be copied from universe_wsgi.ini + #TODO: these should be copied from galaxy.ini return dict( #TODO: static_enabled needed here? static_enabled = True, @@ -356,7 +356,7 @@ if not os.environ.get( 'GALAXY_SHED_DATA_MANAGER_CONF' ): open( galaxy_shed_data_manager_conf_file, 'wb' ).write( shed_data_manager_conf_xml_template ) galaxy_global_conf = get_webapp_global_conf() - galaxy_global_conf[ '__file__' ] = 'universe_wsgi.ini.sample' + galaxy_global_conf[ '__file__' ] = 'config/galaxy.ini.sample' kwargs = dict( allow_user_creation = True, allow_user_deletion = True, https://bitbucket.org/galaxy/galaxy-central/commits/aeca0d388e83/ Changeset: aeca0d388e83 User: natefoo Date: 2014-09-15 22:52:50 Summary: Bug fixes to scripts for config subdir. Affected #: 3 files diff -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 scripts/helper.py --- a/scripts/helper.py +++ b/scripts/helper.py @@ -26,8 +26,7 @@ sys.exit( 1 ) options.config = os.path.abspath( options.config ) -os.chdir( os.path.dirname( options.config ) ) -sys.path.append( 'lib' ) +sys.path.append( os.path.join( os.path.dirname( __file__ ), '..', 'lib' ) ) from galaxy import eggs import pkg_resources @@ -35,7 +34,7 @@ config = ConfigParser( dict( file_path = 'database/files', id_secret = 'USING THE DEFAULT IS NOT SECURE!', database_connection = 'sqlite:///database/universe.sqlite?isolation_level=IMMEDIATE' ) ) -config.read( os.path.basename( options.config ) ) +config.read( options.config ) from galaxy.web import security from galaxy.model import mapping diff -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 scripts/set_dataset_sizes.py --- a/scripts/set_dataset_sizes.py +++ b/scripts/set_dataset_sizes.py @@ -13,15 +13,14 @@ def init(): options.config = os.path.abspath( options.config ) - os.chdir( os.path.dirname( options.config ) ) - sys.path.append( 'lib' ) + sys.path.append( os.path.join( os.path.dirname( __file__ ), '..', 'lib' ) ) from galaxy import eggs import pkg_resources config = ConfigParser( dict( file_path = 'database/files', database_connection = 'sqlite:///database/universe.sqlite?isolation_level=IMMEDIATE' ) ) - config.read( os.path.basename( options.config ) ) + config.read( options.config ) from galaxy.model import mapping diff -r 70d2dff1297db4c5462c6c9889e290790d3ae5bb -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 scripts/set_user_disk_usage.py --- a/scripts/set_user_disk_usage.py +++ b/scripts/set_user_disk_usage.py @@ -21,8 +21,7 @@ if options.email == 'all': options.email = None - os.chdir( os.path.dirname( options.config ) ) - sys.path.append( 'lib' ) + sys.path.append( os.path.join( os.path.dirname( __file__ ), '..', 'lib' ) ) from galaxy import eggs import pkg_resources @@ -35,7 +34,7 @@ config_parser = ConfigParser( dict( here = os.getcwd(), database_connection = 'sqlite:///database/universe.sqlite?isolation_level=IMMEDIATE' ) ) - config_parser.read( os.path.basename( options.config ) ) + config_parser.read( options.config ) config_dict = {} for key, value in config_parser.items( "app:main" ): https://bitbucket.org/galaxy/galaxy-central/commits/14d243c7069e/ Changeset: 14d243c7069e User: natefoo Date: 2014-09-15 22:57:28 Summary: merge. Affected #: 34 files diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/galaxy/tools/parameters/basic.py --- a/lib/galaxy/tools/parameters/basic.py +++ b/lib/galaxy/tools/parameters/basic.py @@ -794,6 +794,8 @@ if value is not None: if not isinstance( value, list ): value = [ value ] + # We could have an unvalidated value here when e.g. running a workflow. + value = [ val.value if isinstance( val, UnvalidatedValue ) else val for val in value ] field = form_builder.SelectField( self.name, self.multiple, self.display, self.refresh_on_change, refresh_on_change_values=self.refresh_on_change_values ) options = self.get_options( trans, context ) for text, optval, selected in options: diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py --- a/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py +++ b/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py @@ -417,12 +417,13 @@ query = irmm.get_query_for_setting_metadata_on_repositories( order=False ) # Now reset metadata on all remaining repositories. for repository in query: - repository_id = trans.security.encode_id( repository.id ) try: - invalid_file_tups, metadata_dict = irmm.reset_all_metadata_on_installed_repository( repository_id ) - if invalid_file_tups: + irmm.set_repository( repository ) + irmm.reset_all_metadata_on_installed_repository() + irmm_invalid_file_tups = irmm.get_invalid_file_tups() + if irmm_invalid_file_tups: message = tool_util.generate_message_for_invalid_tools( trans.app, - invalid_file_tups, + irmm_invalid_file_tups, repository, None, as_html=False ) diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py --- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py +++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py @@ -1732,20 +1732,21 @@ tool_path, relative_install_dir = repository.get_tool_relative_path( trans.app ) if relative_install_dir: original_metadata_dict = repository.metadata - irmm = InstalledRepositoryMetadataManager( trans.app ) - metadata_dict, invalid_file_tups = \ - irmm.generate_metadata_for_changeset_revision( repository=repository, - changeset_revision=repository.changeset_revision, - repository_clone_url=repository_clone_url, - shed_config_dict = repository.get_shed_config_dict( trans.app ), - relative_install_dir=relative_install_dir, - repository_files_dir=None, - resetting_all_metadata_on_repository=False, - updating_installed_repository=False, - persist=False ) - repository.metadata = metadata_dict - if metadata_dict != original_metadata_dict: - irmm.update_in_shed_tool_config( repository ) + irmm = InstalledRepositoryMetadataManager( app=trans.app, + repository=repository, + changeset_revision=repository.changeset_revision, + repository_clone_url=repository_clone_url, + shed_config_dict = repository.get_shed_config_dict( trans.app ), + relative_install_dir=relative_install_dir, + repository_files_dir=None, + resetting_all_metadata_on_repository=False, + updating_installed_repository=False, + persist=False ) + irmm.generate_metadata_for_changeset_revision() + irmm_metadata_dict = irmm.get_metadata_dict() + if irmm_metadata_dict != original_metadata_dict: + repository.metadata = irmm_metadata_dict + irmm.update_in_shed_tool_config() trans.install_model.context.add( repository ) trans.install_model.context.flush() message = 'Metadata has been reset on repository <b>%s</b>.' % repository.name @@ -1938,22 +1939,24 @@ dmh.remove_from_data_manager( repository ) # Update the repository metadata. tpm = tool_panel_manager.ToolPanelManager( trans.app ) - irmm = InstalledRepositoryMetadataManager( trans.app, tpm ) - metadata_dict, invalid_file_tups = \ - irmm.generate_metadata_for_changeset_revision( repository=repository, - changeset_revision=latest_changeset_revision, - repository_clone_url=repository_clone_url, - shed_config_dict=repository.get_shed_config_dict( trans.app ), - relative_install_dir=relative_install_dir, - repository_files_dir=None, - resetting_all_metadata_on_repository=False, - updating_installed_repository=True, - persist=True ) - if 'tools' in metadata_dict: - tool_panel_dict = metadata_dict.get( 'tool_panel_section', None ) + irmm = InstalledRepositoryMetadataManager( app=trans.app, + tpm=tpm, + repository=repository, + changeset_revision=latest_changeset_revision, + repository_clone_url=repository_clone_url, + shed_config_dict=repository.get_shed_config_dict( trans.app ), + relative_install_dir=relative_install_dir, + repository_files_dir=None, + resetting_all_metadata_on_repository=False, + updating_installed_repository=True, + persist=True ) + irmm.generate_metadata_for_changeset_revision() + irmm_metadata_dict = irmm.get_metadata_dict() + if 'tools' in irmm_metadata_dict: + tool_panel_dict = irmm_metadata_dict.get( 'tool_panel_section', None ) if tool_panel_dict is None: tool_panel_dict = tpm.generate_tool_panel_dict_from_shed_tool_conf_entries( repository ) - repository_tools_tups = irmm.get_repository_tools_tups( metadata_dict ) + repository_tools_tups = irmm.get_repository_tools_tups() tpm.add_to_tool_panel( repository_name=str( repository.name ), repository_clone_url=repository_clone_url, changeset_revision=str( repository.installed_changeset_revision ), @@ -1963,18 +1966,18 @@ tool_panel_dict=tool_panel_dict, new_install=False ) # Add new Data Manager entries - if 'data_manager' in metadata_dict: + if 'data_manager' in irmm_metadata_dict: dmh = data_manager.DataManagerHandler( trans.app ) new_data_managers = dmh.install_data_managers( trans.app.config.shed_data_manager_config_file, - metadata_dict, + irmm_metadata_dict, repository.get_shed_config_dict( trans.app ), os.path.join( relative_install_dir, name ), repository, repository_tools_tups ) - if 'repository_dependencies' in metadata_dict or 'tool_dependencies' in metadata_dict: - new_repository_dependencies_dict = metadata_dict.get( 'repository_dependencies', {} ) + if 'repository_dependencies' in irmm_metadata_dict or 'tool_dependencies' in irmm_metadata_dict: + new_repository_dependencies_dict = irmm_metadata_dict.get( 'repository_dependencies', {} ) new_repository_dependencies = new_repository_dependencies_dict.get( 'repository_dependencies', [] ) - new_tool_dependencies_dict = metadata_dict.get( 'tool_dependencies', {} ) + new_tool_dependencies_dict = irmm_metadata_dict.get( 'tool_dependencies', {} ) if new_repository_dependencies: # [[http://localhost:9009', package_picard_1_56_0', devteam', 910b0b056666', False', False']] proceed_to_install = False @@ -2017,7 +2020,7 @@ updating_repository_id=trans.security.encode_id( repository.id ), updating_to_ctx_rev=latest_ctx_rev, updating_to_changeset_revision=latest_changeset_revision, - encoded_updated_metadata=encoding_util.tool_shed_encode( metadata_dict ), + encoded_updated_metadata=encoding_util.tool_shed_encode( irmm_metadata_dict ), updating=True ) return self.prepare_for_install( trans, **new_kwd ) # Updates received did not include any newly defined repository dependencies but did include @@ -2033,12 +2036,12 @@ proceed_to_install = True break if proceed_to_install: - encoded_tool_dependencies_dict = encoding_util.tool_shed_encode( metadata_dict.get( 'tool_dependencies', {} ) ) + encoded_tool_dependencies_dict = encoding_util.tool_shed_encode( irmm_metadata_dict.get( 'tool_dependencies', {} ) ) encoded_relative_install_dir = encoding_util.tool_shed_encode( relative_install_dir ) new_kwd = dict( updating_repository_id=trans.security.encode_id( repository.id ), updating_to_ctx_rev=latest_ctx_rev, updating_to_changeset_revision=latest_changeset_revision, - encoded_updated_metadata=encoding_util.tool_shed_encode( metadata_dict ), + encoded_updated_metadata=encoding_util.tool_shed_encode( irmm_metadata_dict ), encoded_relative_install_dir=encoded_relative_install_dir, encoded_tool_dependencies_dict=encoded_tool_dependencies_dict, message=message, @@ -2047,7 +2050,7 @@ # Updates received did not include any newly defined repository dependencies or newly defined # tool dependencies that need to be installed. repository = trans.app.update_repository_manager.update_repository_record( repository=repository, - updated_metadata_dict=metadata_dict, + updated_metadata_dict=irmm_metadata_dict, updated_changeset_revision=latest_changeset_revision, updated_ctx_rev=latest_ctx_rev ) message = "The installed repository named '%s' has been updated to change set revision '%s'. " % \ diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/galaxy/webapps/tool_shed/api/repositories.py --- a/lib/galaxy/webapps/tool_shed/api/repositories.py +++ b/lib/galaxy/webapps/tool_shed/api/repositories.py @@ -429,13 +429,13 @@ def handle_repository( trans, rmm, repository, results ): log.debug( "Resetting metadata on repository %s" % str( repository.name ) ) - repository_id = trans.security.encode_id( repository.id ) try: - invalid_file_tups, metadata_dict = \ - rmm.reset_all_metadata_on_repository_in_tool_shed( repository_id ) - if invalid_file_tups: + rmm.set_repository( repository ) + rmm.reset_all_metadata_on_repository_in_tool_shed() + rmm_invalid_file_tups = rmm.get_invalid_file_tups() + if rmm_invalid_file_tups: message = tool_util.generate_message_for_invalid_tools( trans.app, - invalid_file_tups, + rmm_invalid_file_tups, repository, None, as_html=False ) @@ -451,7 +451,11 @@ status = '%s : %s' % ( str( repository.name ), message ) results[ 'repository_status' ].append( status ) return results - rmm = repository_metadata_manager.RepositoryMetadataManager( trans.app, trans.user ) + rmm = repository_metadata_manager.RepositoryMetadataManager( app=trans.app, + user=trans.user, + resetting_all_metadata_on_repository=True, + updating_installed_repository=False, + persist=False ) start_time = strftime( "%Y-%m-%d %H:%M:%S" ) results = dict( start_time=start_time, repository_status=[], @@ -511,12 +515,17 @@ results = dict( start_time=start_time, repository_status=[] ) try: - rmm = repository_metadata_manager.RepositoryMetadataManager( trans.app, trans.user ) - invalid_file_tups, metadata_dict = \ - rmm.reset_all_metadata_on_repository_in_tool_shed( trans.security.encode_id( repository.id ) ) - if invalid_file_tups: + rmm = repository_metadata_manager.RepositoryMetadataManager( app=trans.app, + user=trans.user, + repository=repository, + resetting_all_metadata_on_repository=True, + updating_installed_repository=False, + persist=False ) + rmm.reset_all_metadata_on_repository_in_tool_shed() + rmm_invalid_file_tups = rmm.get_invalid_file_tups() + if rmm_invalid_file_tups: message = tool_util.generate_message_for_invalid_tools( trans.app, - invalid_file_tups, + rmm_invalid_file_tups, repository, None, as_html=False ) diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/galaxy/webapps/tool_shed/controllers/hg.py --- a/lib/galaxy/webapps/tool_shed/controllers/hg.py +++ b/lib/galaxy/webapps/tool_shed/controllers/hg.py @@ -1,6 +1,8 @@ import os, logging from galaxy import web from galaxy.web.base.controller import BaseUIController + +from tool_shed.util.common_util import generate_clone_url_for_repository_in_tool_shed from tool_shed.util.shed_util_common import get_repository_by_name_and_owner from tool_shed.util.hg_util import update_repository from tool_shed.metadata import repository_metadata_manager @@ -47,9 +49,19 @@ # interface will result in a new head being created. repo = hg.repository( ui.ui(), repository.repo_path( trans.app ) ) update_repository( repo, ctx_rev=None ) + repository_clone_url = generate_clone_url_for_repository_in_tool_shed( trans.user, repository ) # Set metadata using the repository files on disk. - rmm = repository_metadata_manager.RepositoryMetadataManager( trans.app, trans.user ) - error_message, status = rmm.set_repository_metadata( trans.request.host, repository ) + rmm = repository_metadata_manager.RepositoryMetadataManager( app=trans.app, + user=trans.user, + repository=repository, + changeset_revision=repository.tip( trans.app ), + repository_clone_url=repository_clone_url, + relative_install_dir=repository.repo_path( trans.app ), + repository_files_dir=None, + resetting_all_metadata_on_repository=False, + updating_installed_repository=False, + persist=False ) + error_message, status = rmm.set_repository_metadata( trans.request.host ) if status == 'ok' and error_message: log.debug( "Successfully reset metadata on repository %s owned by %s, but encountered problem: %s" % \ ( str( repository.name ), str( repository.user.username ), error_message ) ) diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/galaxy/webapps/tool_shed/controllers/repository.py --- a/lib/galaxy/webapps/tool_shed/controllers/repository.py +++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py @@ -2759,15 +2759,18 @@ def reset_all_metadata( self, trans, id, **kwd ): """Reset all metadata on the complete changelog for a single repository in the tool shed.""" # This method is called only from the ~/templates/webapps/tool_shed/repository/manage_repository.mako template. - rmm = repository_metadata_manager.RepositoryMetadataManager( trans.app, trans.user ) - invalid_file_tups, metadata_dict = \ - rmm.reset_all_metadata_on_repository_in_tool_shed( id ) - if invalid_file_tups: - repository = suc.get_repository_in_tool_shed( trans.app, id ) + repository = suc.get_repository_in_tool_shed( trans.app, id ) + rmm = repository_metadata_manager.RepositoryMetadataManager( app=trans.app, + user=trans.user, + repository=repository ) + rmm.reset_all_metadata_on_repository_in_tool_shed() + rmm_metadata_dict = rmm.get_metadata_dict() + rmm_invalid_file_tups = rmm.get_invalid_file_tups() + if rmm_invalid_file_tups: message = tool_util.generate_message_for_invalid_tools( trans.app, - invalid_file_tups, + rmm_invalid_file_tups, repository, - metadata_dict ) + rmm_metadata_dict ) status = 'error' else: message = "All repository metadata has been reset. " @@ -2849,10 +2852,10 @@ if tip == repository.tip( trans.app ): message += 'No changes to repository. ' else: - rmm = repository_metadata_manager.RepositoryMetadataManager( trans.app, trans.user ) - status, error_message = rmm.set_repository_metadata_due_to_new_tip( trans.request.host, - repository, - **kwd ) + rmm = repository_metadata_manager.RepositoryMetadataManager( app=trans.app, + user=trans.user, + repository=repository ) + status, error_message = rmm.set_repository_metadata_due_to_new_tip( trans.request.host, **kwd ) if error_message: message = error_message else: diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/galaxy/webapps/tool_shed/controllers/upload.py --- a/lib/galaxy/webapps/tool_shed/controllers/upload.py +++ b/lib/galaxy/webapps/tool_shed/controllers/upload.py @@ -263,10 +263,11 @@ ( len( files_to_remove ), upload_point ) else: message += " %d files were removed from the repository root. " % len( files_to_remove ) - rmm = repository_metadata_manager.RepositoryMetadataManager( trans.app, trans.user ) + rmm = repository_metadata_manager.RepositoryMetadataManager( app=trans.app, + user=trans.user, + repository=repository ) status, error_message = \ rmm.set_repository_metadata_due_to_new_tip( trans.request.host, - repository, content_alert_str=content_alert_str, **kwd ) if error_message: diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/tool_shed/capsule/capsule_manager.py --- a/lib/tool_shed/capsule/capsule_manager.py +++ b/lib/tool_shed/capsule/capsule_manager.py @@ -792,9 +792,10 @@ results_dict[ 'ok' ] = False results_dict[ 'error_message' ] += error_message try: - rmm = repository_metadata_manager.RepositoryMetadataManager( self.app, self.user ) + rmm = repository_metadata_manager.RepositoryMetadataManager( app=self.app, + user=self.user, + repository=repository ) status, error_message = rmm.set_repository_metadata_due_to_new_tip( self.host, - repository, content_alert_str=content_alert_str ) if error_message: results_dict[ 'ok' ] = False diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/tool_shed/galaxy_install/install_manager.py --- a/lib/tool_shed/galaxy_install/install_manager.py +++ b/lib/tool_shed/galaxy_install/install_manager.py @@ -510,31 +510,33 @@ """ shed_config_dict = self.app.toolbox.get_shed_config_dict_by_filename( shed_tool_conf ) tdtm = data_table_manager.ToolDataTableManager( self.app ) - irmm = InstalledRepositoryMetadataManager( self.app, self.tpm ) - metadata_dict, invalid_file_tups = \ - irmm.generate_metadata_for_changeset_revision( repository=tool_shed_repository, - changeset_revision=tool_shed_repository.changeset_revision, - repository_clone_url=repository_clone_url, - shed_config_dict=shed_config_dict, - relative_install_dir=relative_install_dir, - repository_files_dir=None, - resetting_all_metadata_on_repository=False, - updating_installed_repository=False, - persist=True ) - tool_shed_repository.metadata = metadata_dict + irmm = InstalledRepositoryMetadataManager( app=self.app, + tpm=self.tpm, + repository=tool_shed_repository, + changeset_revision=tool_shed_repository.changeset_revision, + repository_clone_url=repository_clone_url, + shed_config_dict=shed_config_dict, + relative_install_dir=relative_install_dir, + repository_files_dir=None, + resetting_all_metadata_on_repository=False, + updating_installed_repository=False, + persist=True ) + irmm.generate_metadata_for_changeset_revision() + irmm_metadata_dict = irmm.get_metadata_dict() + tool_shed_repository.metadata = irmm_metadata_dict # Update the tool_shed_repository.tool_shed_status column in the database. tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( self.app, tool_shed_repository ) if tool_shed_status_dict: tool_shed_repository.tool_shed_status = tool_shed_status_dict self.install_model.context.add( tool_shed_repository ) self.install_model.context.flush() - if 'tool_dependencies' in metadata_dict and not reinstalling: + if 'tool_dependencies' in irmm_metadata_dict and not reinstalling: tool_dependencies = tool_dependency_util.create_tool_dependency_objects( self.app, tool_shed_repository, relative_install_dir, set_status=True ) - if 'sample_files' in metadata_dict: - sample_files = metadata_dict.get( 'sample_files', [] ) + if 'sample_files' in irmm_metadata_dict: + sample_files = irmm_metadata_dict.get( 'sample_files', [] ) tool_index_sample_files = tdtm.get_tool_index_sample_files( sample_files ) tool_data_table_conf_filename, tool_data_table_elems = \ tdtm.install_tool_data_tables( tool_shed_repository, tool_index_sample_files ) @@ -543,13 +545,13 @@ None, self.app.config.shed_tool_data_table_config, persist=True ) - if 'tools' in metadata_dict: - tool_panel_dict = self.tpm.generate_tool_panel_dict_for_new_install( metadata_dict[ 'tools' ], tool_section ) - sample_files = metadata_dict.get( 'sample_files', [] ) + if 'tools' in irmm_metadata_dict: + tool_panel_dict = self.tpm.generate_tool_panel_dict_for_new_install( irmm_metadata_dict[ 'tools' ], tool_section ) + sample_files = irmm_metadata_dict.get( 'sample_files', [] ) tool_index_sample_files = tdtm.get_tool_index_sample_files( sample_files ) tool_util.copy_sample_files( self.app, tool_index_sample_files, tool_path=tool_path ) sample_files_copied = [ str( s ) for s in tool_index_sample_files ] - repository_tools_tups = irmm.get_repository_tools_tups( metadata_dict ) + repository_tools_tups = irmm.get_repository_tools_tups() if repository_tools_tups: # Handle missing data table entries for tool parameters that are dynamically generated select lists. repository_tools_tups = tdtm.handle_missing_data_table_entry( relative_install_dir, @@ -575,15 +577,15 @@ shed_tool_conf=shed_tool_conf, tool_panel_dict=tool_panel_dict, new_install=True ) - if 'data_manager' in metadata_dict: + if 'data_manager' in irmm_metadata_dict: dmh = data_manager.DataManagerHandler( self.app ) new_data_managers = dmh.install_data_managers( self.app.config.shed_data_manager_config_file, - metadata_dict, + irmm_metadata_dict, shed_config_dict, relative_install_dir, tool_shed_repository, repository_tools_tups ) - if 'datatypes' in metadata_dict: + if 'datatypes' in irmm_metadata_dict: tool_shed_repository.status = self.install_model.ToolShedRepository.installation_status.LOADING_PROPRIETARY_DATATYPES if not tool_shed_repository.includes_datatypes: tool_shed_repository.includes_datatypes = True @@ -604,7 +606,7 @@ name=tool_shed_repository.name, owner=tool_shed_repository.owner, installed_changeset_revision=tool_shed_repository.installed_changeset_revision, - tool_dicts=metadata_dict.get( 'tools', [] ), + tool_dicts=irmm_metadata_dict.get( 'tools', [] ), converter_path=converter_path, display_path=display_path ) if converter_path: diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/tool_shed/galaxy_install/installed_repository_manager.py --- a/lib/tool_shed/galaxy_install/installed_repository_manager.py +++ b/lib/tool_shed/galaxy_install/installed_repository_manager.py @@ -82,9 +82,12 @@ repository.status = self.install_model.ToolShedRepository.installation_status.INSTALLED if repository.includes_tools_for_display_in_tool_panel: tpm = tool_panel_manager.ToolPanelManager( self.app ) - irmm = InstalledRepositoryMetadataManager( self.app, tpm ) - metadata = repository.metadata - repository_tools_tups = irmm.get_repository_tools_tups( metadata ) + irmm = InstalledRepositoryMetadataManager( app=self.app, + tpm=tpm, + repository=repository, + changeset_revision=repository.changeset_revision, + metadata_dict=repository.metadata ) + repository_tools_tups = irmm.get_repository_tools_tups() # Reload tools into the appropriate tool panel section. tool_panel_dict = repository.metadata[ 'tool_panel_section' ] tpm.add_to_tool_panel( repository.name, @@ -101,7 +104,7 @@ data_manager_relative_install_dir = os.path.join( data_manager_relative_install_dir, repository.name ) dmh = data_manager.DataManagerHandler( self.app ) new_data_managers = dmh.install_data_managers( self.app.config.shed_data_manager_config_file, - metadata, + repository.metadata, repository.get_shed_config_dict( self.app ), data_manager_relative_install_dir, repository, diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/tool_shed/galaxy_install/metadata/installed_repository_metadata_manager.py --- a/lib/tool_shed/galaxy_install/metadata/installed_repository_metadata_manager.py +++ b/lib/tool_shed/galaxy_install/metadata/installed_repository_metadata_manager.py @@ -19,9 +19,16 @@ class InstalledRepositoryMetadataManager( metadata_generator.MetadataGenerator ): - def __init__( self, app, tpm=None ): - super( InstalledRepositoryMetadataManager, self ).__init__( app ) - self.app = app + def __init__( self, app, tpm=None, repository=None, changeset_revision=None, repository_clone_url=None, + shed_config_dict=None, relative_install_dir=None, repository_files_dir=None, + resetting_all_metadata_on_repository=False, updating_installed_repository=False, + persist=False, metadata_dict=None ): + super( InstalledRepositoryMetadataManager, self ).__init__( app, repository, changeset_revision, + repository_clone_url, shed_config_dict, + relative_install_dir, repository_files_dir, + resetting_all_metadata_on_repository, + updating_installed_repository, persist, + metadata_dict=metadata_dict, user=None ) if tpm is None: self.tpm = tool_panel_manager.ToolPanelManager( self.app ) else: @@ -53,15 +60,15 @@ return self.app.install_model.context.query( self.app.install_model.ToolShedRepository ) \ .filter( self.app.install_model.ToolShedRepository.table.c.uninstalled == False ) - def get_repository_tools_tups( self, metadata_dict ): + def get_repository_tools_tups( self ): """ Return a list of tuples of the form (relative_path, guid, tool) for each tool defined in the received tool shed repository metadata. """ repository_tools_tups = [] - index, shed_conf_dict = self.tpm.get_shed_tool_conf_dict( metadata_dict.get( 'shed_config_filename' ) ) - if 'tools' in metadata_dict: - for tool_dict in metadata_dict[ 'tools' ]: + index, shed_conf_dict = self.tpm.get_shed_tool_conf_dict( self.metadata_dict.get( 'shed_config_filename' ) ) + if 'tools' in self.metadata_dict: + for tool_dict in self.metadata_dict[ 'tools' ]: load_relative_path = relative_path = tool_dict.get( 'tool_config', None ) if shed_conf_dict.get( 'tool_path' ): load_relative_path = os.path.join( shed_conf_dict.get( 'tool_path' ), relative_path ) @@ -74,36 +81,21 @@ repository_tools_tups.append( ( relative_path, guid, tool ) ) return repository_tools_tups - def reset_all_metadata_on_installed_repository( self, id ): + def reset_all_metadata_on_installed_repository( self ): """Reset all metadata on a single tool shed repository installed into a Galaxy instance.""" - invalid_file_tups = [] - metadata_dict = {} - repository = repository_util.get_installed_tool_shed_repository( self.app, id ) - repository_clone_url = common_util.generate_clone_url_for_installed_repository( self.app, repository ) - tool_path, relative_install_dir = repository.get_tool_relative_path( self.app ) - if relative_install_dir: - original_metadata_dict = repository.metadata - metadata_dict, invalid_file_tups = \ - self.generate_metadata_for_changeset_revision( repository=repository, - changeset_revision=repository.changeset_revision, - repository_clone_url=repository_clone_url, - shed_config_dict = repository.get_shed_config_dict( self.app ), - relative_install_dir=relative_install_dir, - repository_files_dir=None, - resetting_all_metadata_on_repository=False, - updating_installed_repository=False, - persist=False ) - repository.metadata = metadata_dict - if metadata_dict != original_metadata_dict: - self.update_in_shed_tool_config( repository ) - self.app.install_model.context.add( repository ) + if self.relative_install_dir: + original_metadata_dict = self.repository.metadata + self.generate_metadata_for_changeset_revision() + if self.metadata_dict != original_metadata_dict: + self.repository.metadata = self.metadata_dict + self.update_in_shed_tool_config() + self.app.install_model.context.add( self.repository ) self.app.install_model.context.flush() - log.debug( 'Metadata has been reset on repository %s.' % repository.name ) + log.debug( 'Metadata has been reset on repository %s.' % self.repository.name ) else: - log.debug( 'Metadata did not need to be reset on repository %s.' % repository.name ) + log.debug( 'Metadata did not need to be reset on repository %s.' % self.repository.name ) else: - log.debug( 'Error locating installation directory for repository %s.' % repository.name ) - return invalid_file_tups, metadata_dict + log.debug( 'Error locating installation directory for repository %s.' % self.repository.name ) def reset_metadata_on_selected_repositories( self, user, **kwd ): """ @@ -119,19 +111,19 @@ for repository_id in repository_ids: try: repository = repository_util.get_installed_tool_shed_repository( self.app, repository_id ) - owner = str( repository.owner ) - invalid_file_tups, metadata_dict = \ - self.reset_all_metadata_on_installed_repository( repository_id ) - if invalid_file_tups: + self.set_repository( repository ) + self.reset_all_metadata_on_installed_repository() + if self.invalid_file_tups: message = tool_util.generate_message_for_invalid_tools( self.app, - invalid_file_tups, + self.invalid_file_tups, repository, None, as_html=False ) log.debug( message ) unsuccessful_count += 1 else: - log.debug( "Successfully reset metadata on repository %s owned by %s" % ( str( repository.name ), owner ) ) + log.debug( "Successfully reset metadata on repository %s owned by %s" % \ + ( str( repository.name ), str( repository.owner ) ) ) successful_count += 1 except: log.exception( "Error attempting to reset metadata on repository %s", str( repository.name ) ) @@ -146,32 +138,37 @@ status = 'error' return message, status - def tool_shed_from_repository_clone_url( self, repository_clone_url ): + def set_repository( self, repository ): + super( InstalledRepositoryMetadataManager, self ).set_repository( repository ) + self.repository_clone_url = common_util.generate_clone_url_for_installed_repository( self.app, repository ) + + def tool_shed_from_repository_clone_url( self ): """Given a repository clone URL, return the tool shed that contains the repository.""" - return common_util.remove_protocol_and_user_from_clone_url( repository_clone_url ).split( '/repos/' )[ 0 ].rstrip( '/' ) + cleaned_repository_clone_url = common_util.remove_protocol_and_user_from_clone_url( self.repository_clone_url ) + return common_util.remove_protocol_and_user_from_clone_url( cleaned_repository_clone_url ).split( '/repos/' )[ 0 ].rstrip( '/' ) - def update_in_shed_tool_config( self, repository ): + def update_in_shed_tool_config( self ): """ A tool shed repository is being updated so change the shed_tool_conf file. Parse the config file to generate the entire list of config_elems instead of using the in-memory list. """ - shed_conf_dict = repository.get_shed_config_dict( self.app ) + shed_conf_dict = self.repository.get_shed_config_dict( self.app ) shed_tool_conf = shed_conf_dict[ 'config_filename' ] tool_path = shed_conf_dict[ 'tool_path' ] - tool_panel_dict = self.tpm.generate_tool_panel_dict_from_shed_tool_conf_entries( repository ) - repository_tools_tups = self.get_repository_tools_tups( repository.metadata ) - clone_url = common_util.generate_clone_url_for_installed_repository( self.app, repository ) - cleaned_repository_clone_url = common_util.remove_protocol_and_user_from_clone_url( clone_url ) - tool_shed = self.tool_shed_from_repository_clone_url( cleaned_repository_clone_url ) - owner = repository.owner + tool_panel_dict = self.tpm.generate_tool_panel_dict_from_shed_tool_conf_entries( self.repository ) + repository_tools_tups = self.get_repository_tools_tups() + clone_url = common_util.generate_clone_url_for_installed_repository( self.app, self.repository ) + tool_shed = self.tool_shed_from_repository_clone_url() + owner = self.repository.owner if not owner: + cleaned_repository_clone_url = common_util.remove_protocol_and_user_from_clone_url( clone_url ) owner = suc.get_repository_owner( cleaned_repository_clone_url ) guid_to_tool_elem_dict = {} for tool_config_filename, guid, tool in repository_tools_tups: guid_to_tool_elem_dict[ guid ] = self.tpm.generate_tool_elem( tool_shed, - repository.name, - repository.changeset_revision, - repository.owner or '', + self.repository.name, + self.repository.changeset_revision, + self.repository.owner or '', tool_config_filename, tool, None ) diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/tool_shed/galaxy_install/tool_migration_manager.py --- a/lib/tool_shed/galaxy_install/tool_migration_manager.py +++ b/lib/tool_shed/galaxy_install/tool_migration_manager.py @@ -416,21 +416,23 @@ log.exception( "Exception attempting to filter and persist non-shed-related tool panel configs:\n%s" % str( e ) ) finally: lock.release() - irmm = InstalledRepositoryMetadataManager( self.app, self.tpm ) - metadata_dict, invalid_file_tups = \ - irmm.generate_metadata_for_changeset_revision( repository=tool_shed_repository, - changeset_revision=tool_shed_repository.changeset_revision, - repository_clone_url=repository_clone_url, - shed_config_dict = self.shed_config_dict, - relative_install_dir=relative_install_dir, - repository_files_dir=None, - resetting_all_metadata_on_repository=False, - updating_installed_repository=False, - persist=True ) - tool_shed_repository.metadata = metadata_dict + irmm = InstalledRepositoryMetadataManager( app=self.app, + tpm=self.tpm, + repository=tool_shed_repository, + changeset_revision=tool_shed_repository.changeset_revision, + repository_clone_url=repository_clone_url, + shed_config_dict = self.shed_config_dict, + relative_install_dir=relative_install_dir, + repository_files_dir=None, + resetting_all_metadata_on_repository=False, + updating_installed_repository=False, + persist=True ) + irmm.generate_metadata_for_changeset_revision() + irmm_metadata_dict = irmm.get_metadata_dict() + tool_shed_repository.metadata = irmm_metadata_dict self.app.install_model.context.add( tool_shed_repository ) self.app.install_model.context.flush() - has_tool_dependencies = self.__has_tool_dependencies( metadata_dict ) + has_tool_dependencies = self.__has_tool_dependencies( irmm_metadata_dict ) if has_tool_dependencies: # All tool_dependency objects must be created before the tools are processed even if no # tool dependencies will be installed. @@ -440,14 +442,14 @@ set_status=True ) else: tool_dependencies = None - if 'tools' in metadata_dict: + if 'tools' in irmm_metadata_dict: tdtm = data_table_manager.ToolDataTableManager( self.app ) - sample_files = metadata_dict.get( 'sample_files', [] ) + sample_files = irmm_metadata_dict.get( 'sample_files', [] ) sample_files = [ str( s ) for s in sample_files ] tool_index_sample_files = tdtm.get_tool_index_sample_files( sample_files ) tool_util.copy_sample_files( self.app, tool_index_sample_files, tool_path=self.tool_path ) sample_files_copied = [ s for s in tool_index_sample_files ] - repository_tools_tups = irmm.get_repository_tools_tups( metadata_dict ) + repository_tools_tups = irmm.get_repository_tools_tups() if repository_tools_tups: # Handle missing data table entries for tool parameters that are dynamically # generated select lists. @@ -491,7 +493,7 @@ if installed_tool_dependency.status == self.app.install_model.ToolDependency.installation_status.ERROR: print '\nThe ToolMigrationManager returned the following error while installing tool dependency ', installed_tool_dependency.name, ':' print installed_tool_dependency.error_message, '\n\n' - if 'datatypes' in metadata_dict: + if 'datatypes' in irmm_metadata_dict: cdl = custom_datatype_manager.CustomDatatypeLoader( self.app ) tool_shed_repository.status = self.app.install_model.ToolShedRepository.installation_status.LOADING_PROPRIETARY_DATATYPES if not tool_shed_repository.includes_datatypes: @@ -514,7 +516,7 @@ name=tool_shed_repository.name, owner=self.repository_owner, installed_changeset_revision=tool_shed_repository.installed_changeset_revision, - tool_dicts=metadata_dict.get( 'tools', [] ), + tool_dicts=irmm_metadata_dict.get( 'tools', [] ), converter_path=converter_path, display_path=display_path ) if converter_path: diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/tool_shed/metadata/metadata_generator.py --- a/lib/tool_shed/metadata/metadata_generator.py +++ b/lib/tool_shed/metadata/metadata_generator.py @@ -27,31 +27,89 @@ class MetadataGenerator( object ): - def __init__( self, app ): + def __init__( self, app, repository=None, changeset_revision=None, repository_clone_url=None, + shed_config_dict=None, relative_install_dir=None, repository_files_dir=None, + resetting_all_metadata_on_repository=False, updating_installed_repository=False, + persist=False, metadata_dict=None, user=None ): self.app = app + self.user = user + self.repository = repository + if self.app.name == 'galaxy': + if changeset_revision is None and self.repository is not None: + self.changeset_revision = self.repository.changeset_revision + else: + self.changeset_revision = changeset_revision + + if repository_clone_url is None and self.repository is not None: + self.repository_clone_url = common_util.generate_clone_url_for_installed_repository( self.app, self.repository ) + else: + self.repository_clone_url = repository_clone_url + if shed_config_dict is None: + if self.repository is not None: + self.shed_config_dict = self.repository.get_shed_config_dict( self.app ) + else: + self.shed_config_dict = {} + else: + self.shed_config_dict = shed_config_dict + if relative_install_dir is None and self.repository is not None: + tool_path, relative_install_dir = self.repository.get_tool_relative_path( self.app ) + if repository_files_dir is None and self.repository is not None: + repository_files_dir = self.repository.repo_files_directory( self.app ) + if metadata_dict is None: + # Shed related tool panel configs are only relevant to Galaxy. + self.metadata_dict = { 'shed_config_filename' : self.shed_config_dict.get( 'config_filename', None ) } + else: + self.metadata_dict = metadata_dict + else: + # We're in the Tool Shed. + if changeset_revision is None and self.repository is not None: + self.changeset_revision = self.repository.tip( self.app ) + else: + self.changeset_revision = changeset_revision + if repository_clone_url is None and self.repository is not None: + self.repository_clone_url = \ + common_util.generate_clone_url_for_repository_in_tool_shed( self.user, self.repository ) + else: + self.repository_clone_url = repository_clone_url + if shed_config_dict is None: + self.shed_config_dict = {} + else: + self.shed_config_dict = shed_config_dict + if relative_install_dir is None and self.repository is not None: + relative_install_dir = self.repository.repo_path( self.app ) + if repository_files_dir is None and self.repository is not None: + repository_files_dir = self.repository.repo_path( self.app ) + if metadata_dict is None: + self.metadata_dict = {} + else: + self.metadata_dict = metadata_dict + self.relative_install_dir = relative_install_dir + self.repository_files_dir = repository_files_dir + self.resetting_all_metadata_on_repository = resetting_all_metadata_on_repository + self.updating_installed_repository = updating_installed_repository + self.persist = persist + self.invalid_file_tups = [] self.sa_session = app.model.context.current self.NOT_TOOL_CONFIGS = [ suc.DATATYPES_CONFIG_FILENAME, - rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME, - rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME, - suc.REPOSITORY_DATA_MANAGER_CONFIG_FILENAME ] + rt_util.REPOSITORY_DEPENDENCY_DEFINITION_FILENAME, + rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME, + suc.REPOSITORY_DATA_MANAGER_CONFIG_FILENAME ] - def generate_data_manager_metadata( self, repository, repo_dir, data_manager_config_filename, metadata_dict, + def generate_data_manager_metadata( self, repo_dir, data_manager_config_filename, metadata_dict, shed_config_dict=None ): """ Update the received metadata_dict with information from the parsed data_manager_config_filename. """ if data_manager_config_filename is None: return metadata_dict - repo_path = repository.repo_path( self.app ) + repo_path = self.repository.repo_path( self.app ) try: # Galaxy Side. - repo_files_directory = repository.repo_files_directory( self.app ) + repo_files_directory = self.repository.repo_files_directory( self.app ) repo_dir = repo_files_directory - repository_clone_url = common_util.generate_clone_url_for_installed_repository( self.app, repository ) except AttributeError: # Tool Shed side. repo_files_directory = repo_path - repository_clone_url = common_util.generate_clone_url_for_repository_in_tool_shed( None, repository ) relative_data_manager_dir = util.relpath( os.path.split( data_manager_config_filename )[0], repo_dir ) rel_data_manager_config_filename = os.path.join( relative_data_manager_dir, os.path.split( data_manager_config_filename )[1] ) @@ -91,7 +149,7 @@ # FIXME: default behavior is to fall back to tool.name. data_manager_name = data_manager_elem.get( 'name', data_manager_id ) version = data_manager_elem.get( 'version', DataManager.DEFAULT_VERSION ) - guid = self.generate_guid_for_object( repository_clone_url, DataManager.GUID_TYPE, data_manager_id, version ) + guid = self.generate_guid_for_object( DataManager.GUID_TYPE, data_manager_id, version ) data_tables = [] if tool_file is None: log.error( 'Data Manager entry is missing tool_file attribute in "%s".' % ( data_manager_config_filename ) ) @@ -130,8 +188,7 @@ log.debug( 'Loaded Data Manager tool_files: %s' % ( tool_file ) ) return metadata_dict - def generate_datatypes_metadata( self, tv, repository, repository_clone_url, repository_files_dir, datatypes_config, - metadata_dict ): + def generate_datatypes_metadata( self, tv, repository_files_dir, datatypes_config, metadata_dict ): """Update the received metadata_dict with information from the parsed datatypes_config.""" tree, error_message = xml_util.parse_xml( datatypes_config ) if tree is None: @@ -180,11 +237,11 @@ tool_config_path = hg_util.get_config_from_disk( tool_config, repository_files_dir ) full_path = os.path.abspath( tool_config_path ) tool, valid, error_message = \ - tv.load_tool_from_config( self.app.security.encode_id( repository.id ), full_path ) + tv.load_tool_from_config( self.app.security.encode_id( self.repository.id ), full_path ) if tool is None: guid = None else: - guid = suc.generate_tool_guid( repository_clone_url, tool ) + guid = suc.generate_tool_guid( self.repository_clone_url, tool ) converter_dict = dict( tool_config=tool_config, guid=guid, target_datatype=target_datatype ) @@ -226,76 +283,70 @@ valid_tool_dependencies_dict[ 'set_environment' ] = [ requirements_dict ] return valid_tool_dependencies_dict - def generate_guid_for_object( self, repository_clone_url, guid_type, obj_id, version ): - tmp_url = common_util.remove_protocol_and_user_from_clone_url( repository_clone_url ) + def generate_guid_for_object( self, guid_type, obj_id, version ): + tmp_url = common_util.remove_protocol_and_user_from_clone_url( self.repository_clone_url ) return '%s/%s/%s/%s' % ( tmp_url, guid_type, obj_id, version ) - def generate_metadata_for_changeset_revision( self, repository, changeset_revision, repository_clone_url, - shed_config_dict=None, relative_install_dir=None, repository_files_dir=None, - resetting_all_metadata_on_repository=False, updating_installed_repository=False, - persist=False ): + def generate_metadata_for_changeset_revision( self ): """ Generate metadata for a repository using its files on disk. To generate metadata for changeset revisions older than the repository tip, the repository will have been cloned to a temporary location and updated to a specified changeset revision to access - that changeset revision's disk files, so the value of repository_files_dir will not - always be repository.repo_path( self.app ) (it could be an absolute path to a temporary - directory containing a clone). If it is an absolute path, the value of relative_install_dir + that changeset revision's disk files, so the value of self.repository_files_dir will not + always be self.repository.repo_path( self.app ) (it could be an absolute path to a temporary + directory containing a clone). If it is an absolute path, the value of self.relative_install_dir must contain repository.repo_path( self.app ). - The value of persist will be True when the installed repository contains a valid + The value of self.persist will be True when the installed repository contains a valid tool_data_table_conf.xml.sample file, in which case the entries should ultimately be persisted to the file referred to by self.app.config.shed_tool_data_table_config. """ tv = tool_validator.ToolValidator( self.app ) - if shed_config_dict is None: - shed_config_dict = {} - if updating_installed_repository: + if self.shed_config_dict is None: + self.shed_config_dict = {} + if self.updating_installed_repository: # Keep the original tool shed repository metadata if setting metadata on a repository # installed into a local Galaxy instance for which we have pulled updates. - original_repository_metadata = repository.metadata + original_repository_metadata = self.repository.metadata else: original_repository_metadata = None - readme_file_names = readme_util.get_readme_file_names( str( repository.name ) ) + readme_file_names = readme_util.get_readme_file_names( str( self.repository.name ) ) if self.app.name == 'galaxy': # Shed related tool panel configs are only relevant to Galaxy. - metadata_dict = { 'shed_config_filename' : shed_config_dict.get( 'config_filename' ) } + metadata_dict = { 'shed_config_filename' : self.shed_config_dict.get( 'config_filename' ) } else: metadata_dict = {} readme_files = [] - invalid_file_tups = [] invalid_tool_configs = [] tool_dependencies_config = None original_tool_data_path = self.app.config.tool_data_path original_tool_data_table_config_path = self.app.config.tool_data_table_config_path - if resetting_all_metadata_on_repository: - if not relative_install_dir: - raise Exception( "The value of repository.repo_path must be sent when resetting all metadata on a repository." ) + if self.resetting_all_metadata_on_repository: + if not self.relative_install_dir: + raise Exception( "The value of self.repository.repo_path must be set when resetting all metadata on a repository." ) # Keep track of the location where the repository is temporarily cloned so that we can - # strip the path when setting metadata. The value of repository_files_dir is the full - # path to the temporary directory to which the repository was cloned. - work_dir = repository_files_dir - files_dir = repository_files_dir + # strip the path when setting metadata. The value of self.repository_files_dir is the + # full path to the temporary directory to which self.repository was cloned. + work_dir = self.repository_files_dir + files_dir = self.repository_files_dir # Since we're working from a temporary directory, we can safely copy sample files included # in the repository to the repository root. - self.app.config.tool_data_path = repository_files_dir - self.app.config.tool_data_table_config_path = repository_files_dir + self.app.config.tool_data_path = self.repository_files_dir + self.app.config.tool_data_table_config_path = self.repository_files_dir else: # Use a temporary working directory to copy all sample files. work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-gmfcr" ) # All other files are on disk in the repository's repo_path, which is the value of - # relative_install_dir. - files_dir = relative_install_dir - if shed_config_dict.get( 'tool_path' ): - files_dir = os.path.join( shed_config_dict[ 'tool_path' ], files_dir ) + # self.relative_install_dir. + files_dir = self.relative_install_dir + if self.shed_config_dict.get( 'tool_path' ): + files_dir = os.path.join( self.shed_config_dict[ 'tool_path' ], files_dir ) self.app.config.tool_data_path = work_dir #FIXME: Thread safe? self.app.config.tool_data_table_config_path = work_dir # Handle proprietary datatypes, if any. datatypes_config = hg_util.get_config_from_disk( suc.DATATYPES_CONFIG_FILENAME, files_dir ) if datatypes_config: metadata_dict = self.generate_datatypes_metadata( tv, - repository, - repository_clone_url, files_dir, datatypes_config, metadata_dict ) @@ -303,9 +354,8 @@ # the repository's metadata. sample_file_metadata_paths, sample_file_copy_paths = \ self.get_sample_files_from_disk( repository_files_dir=files_dir, - tool_path=shed_config_dict.get( 'tool_path' ), - relative_install_dir=relative_install_dir, - resetting_all_metadata_on_repository=resetting_all_metadata_on_repository ) + tool_path=self.shed_config_dict.get( 'tool_path' ), + relative_install_dir=self.relative_install_dir ) if sample_file_metadata_paths: metadata_dict[ 'sample_files' ] = sample_file_metadata_paths # Copy all sample files included in the repository to a single directory location so we @@ -322,7 +372,7 @@ shed_tool_data_table_config=self.app.config.shed_tool_data_table_config, persist=False ) if error_message: - invalid_file_tups.append( ( filename, error_message ) ) + self.invalid_file_tups.append( ( filename, error_message ) ) for root, dirs, files in os.walk( files_dir ): if root.find( '.hg' ) < 0 and root.find( 'hgrc' ) < 0: if '.hg' in dirs: @@ -333,18 +383,16 @@ path_to_repository_dependencies_config = os.path.join( root, name ) metadata_dict, error_message = \ self.generate_repository_dependency_metadata( path_to_repository_dependencies_config, - metadata_dict, - updating_installed_repository=updating_installed_repository ) + metadata_dict ) if error_message: - invalid_file_tups.append( ( name, error_message ) ) + self.invalid_file_tups.append( ( name, error_message ) ) # See if we have one or more READ_ME files. elif name.lower() in readme_file_names: relative_path_to_readme = self.get_relative_path_to_repository_file( root, name, - relative_install_dir, + self.relative_install_dir, work_dir, - shed_config_dict, - resetting_all_metadata_on_repository ) + self.shed_config_dict ) readme_files.append( relative_path_to_readme ) # See if we have a tool config. elif name not in self.NOT_TOOL_CONFIGS and name.endswith( '.xml' ): @@ -365,12 +413,12 @@ is_tool = element_tree_root.tag == 'tool' if is_tool: tool, valid, error_message = \ - tv.load_tool_from_config( self.app.security.encode_id( repository.id ), + tv.load_tool_from_config( self.app.security.encode_id( self.repository.id ), full_path ) if tool is None: if not valid: invalid_tool_configs.append( name ) - invalid_file_tups.append( ( name, error_message ) ) + self.invalid_file_tups.append( ( name, error_message ) ) else: invalid_files_and_errors_tups = \ tv.check_tool_input_params( files_dir, @@ -387,17 +435,15 @@ relative_path_to_tool_config = \ self.get_relative_path_to_repository_file( root, name, - relative_install_dir, + self.relative_install_dir, work_dir, - shed_config_dict, - resetting_all_metadata_on_repository ) + self.shed_config_dict ) metadata_dict = self.generate_tool_metadata( relative_path_to_tool_config, tool, - repository_clone_url, metadata_dict ) else: for tup in invalid_files_and_errors_tups: - invalid_file_tups.append( tup ) + self.invalid_file_tups.append( tup ) # Find all exported workflows. elif name.endswith( '.ga' ): relative_path = os.path.join( root, name ) @@ -421,11 +467,10 @@ metadata_dict ) # Handle any data manager entries data_manager_config = hg_util.get_config_from_disk( suc.REPOSITORY_DATA_MANAGER_CONFIG_FILENAME, files_dir ) - metadata_dict = self.generate_data_manager_metadata( repository, - files_dir, + metadata_dict = self.generate_data_manager_metadata( files_dir, data_manager_config, metadata_dict, - shed_config_dict=shed_config_dict ) + shed_config_dict=self.shed_config_dict ) if readme_files: metadata_dict[ 'readme_files' ] = readme_files @@ -433,21 +478,18 @@ tool_dependencies_config = hg_util.get_config_from_disk( rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME, files_dir ) if tool_dependencies_config: metadata_dict, error_message = \ - self.generate_tool_dependency_metadata( repository, - changeset_revision, - repository_clone_url, - tool_dependencies_config, + self.generate_tool_dependency_metadata( tool_dependencies_config, metadata_dict, original_repository_metadata=original_repository_metadata ) if error_message: - invalid_file_tups.append( ( rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME, error_message ) ) + self.invalid_file_tups.append( ( rt_util.TOOL_DEPENDENCY_DEFINITION_FILENAME, error_message ) ) if invalid_tool_configs: metadata_dict [ 'invalid_tools' ] = invalid_tool_configs + self.metadata_dict = metadata_dict # Reset the value of the app's tool_data_path and tool_data_table_config_path to their respective original values. self.app.config.tool_data_path = original_tool_data_path self.app.config.tool_data_table_config_path = original_tool_data_table_config_path basic_util.remove_dir( work_dir ) - return metadata_dict, invalid_file_tups def generate_package_dependency_metadata( self, elem, valid_tool_dependencies_dict, invalid_tool_dependencies_dict ): """ @@ -475,8 +517,7 @@ # where a tool dependency definition is considered invalid. repository_dependency_tup, repository_dependency_is_valid, error_message = \ self.handle_repository_elem( repository_elem=sub_elem, - only_if_compiling_contained_td=False, - updating_installed_repository=False ) + only_if_compiling_contained_td=False ) elif sub_elem.tag == 'install': package_install_version = sub_elem.get( 'version', '1.0' ) if package_install_version == '1.0': @@ -509,8 +550,7 @@ # We have a complex repository dependency. repository_dependency_tup, repository_dependency_is_valid, error_message = \ self.handle_repository_elem( repository_elem=sub_action_elem, - only_if_compiling_contained_td=True, - updating_installed_repository=False ) + only_if_compiling_contained_td=True ) elif action_elem.tag == 'action': # <action type="set_environment_for_install"> # <repository changeset_revision="b107b91b3574" name="package_readline_6_2" owner="devteam" prior_installation_required="True" toolshed="http://localhost:9009"> @@ -522,8 +562,7 @@ # We have a complex repository dependency. repository_dependency_tup, repository_dependency_is_valid, error_message = \ self.handle_repository_elem( repository_elem=sub_action_elem, - only_if_compiling_contained_td=True, - updating_installed_repository=False ) + only_if_compiling_contained_td=True ) if requirements_dict: dependency_key = '%s/%s' % ( package_name, package_version ) if repository_dependency_is_valid: @@ -538,8 +577,7 @@ repository_dependency_is_valid, \ error_message - def generate_repository_dependency_metadata( self, repository_dependencies_config, metadata_dict, - updating_installed_repository=False ): + def generate_repository_dependency_metadata( self, repository_dependencies_config, metadata_dict ): """ Generate a repository dependencies dictionary based on valid information defined in the received repository_dependencies_config. This method is called from the tool shed as well as from Galaxy. @@ -560,8 +598,7 @@ for repository_elem in root.findall( 'repository' ): repository_dependency_tup, repository_dependency_is_valid, err_msg = \ self.handle_repository_elem( repository_elem, - only_if_compiling_contained_td=False, - updating_installed_repository=updating_installed_repository ) + only_if_compiling_contained_td=False ) if repository_dependency_is_valid: valid_repository_dependency_tups.append( repository_dependency_tup ) else: @@ -585,10 +622,10 @@ metadata_dict[ 'repository_dependencies' ] = valid_repository_dependencies_dict return metadata_dict, error_message - def generate_tool_metadata( self, tool_config, tool, repository_clone_url, metadata_dict ): + def generate_tool_metadata( self, tool_config, tool, metadata_dict ): """Update the received metadata_dict with changes that have been applied to the received tool.""" # Generate the guid. - guid = suc.generate_tool_guid( repository_clone_url, tool ) + guid = suc.generate_tool_guid( self.repository_clone_url, tool ) # Handle tool.requirements. tool_requirements = [] for tool_requirement in tool.requirements: @@ -667,11 +704,10 @@ metadata_dict[ 'tools' ] = [ tool_dict ] return metadata_dict - def generate_tool_dependency_metadata( self, repository, changeset_revision, repository_clone_url, tool_dependencies_config, - metadata_dict, original_repository_metadata=None ): + def generate_tool_dependency_metadata( self, tool_dependencies_config, metadata_dict, original_repository_metadata=None ): """ If the combination of name, version and type of each element is defined in the <requirement> tag for - at least one tool in the repository, then update the received metadata_dict with information from the + at least one tool in self.repository, then update the received metadata_dict with information from the parsed tool_dependencies_config. """ error_message = '' @@ -738,7 +774,7 @@ # into a Galaxy instance, so handle changes to tool dependencies appropriately. irm = self.app.installed_repository_manager updated_tool_dependency_names, deleted_tool_dependency_names = \ - irm.handle_existing_tool_dependencies_that_changed_in_update( repository, + irm.handle_existing_tool_dependencies_that_changed_in_update( self.repository, original_valid_tool_dependencies_dict, valid_tool_dependencies_dict ) metadata_dict[ 'tool_dependencies' ] = valid_tool_dependencies_dict @@ -769,9 +805,14 @@ metadata_dict[ 'workflows' ] = [ ( relative_path, exported_workflow_dict ) ] return metadata_dict - def get_relative_path_to_repository_file( self, root, name, relative_install_dir, work_dir, shed_config_dict, - resetting_all_metadata_on_repository ): - if resetting_all_metadata_on_repository: + def get_invalid_file_tups( self ): + return self.invalid_file_tups + + def get_metadata_dict( self ): + return self.metadata_dict + + def get_relative_path_to_repository_file( self, root, name, relative_install_dir, work_dir, shed_config_dict ): + if self.resetting_all_metadata_on_repository: full_path_to_file = os.path.join( root, name ) stripped_path_to_file = full_path_to_file.replace( work_dir, '' ) if stripped_path_to_file.startswith( '/' ): @@ -785,9 +826,8 @@ relative_path_to_file = relative_path_to_file[ len( shed_config_dict.get( 'tool_path' ) ) + 1: ] return relative_path_to_file - def get_sample_files_from_disk( self, repository_files_dir, tool_path=None, relative_install_dir=None, - resetting_all_metadata_on_repository=False ): - if resetting_all_metadata_on_repository: + def get_sample_files_from_disk( self, repository_files_dir, tool_path=None, relative_install_dir=None ): + if self.resetting_all_metadata_on_repository: # Keep track of the location where the repository is temporarily cloned so that we can strip # it when setting metadata. work_dir = repository_files_dir @@ -797,7 +837,7 @@ if root.find( '.hg' ) < 0: for name in files: if name.endswith( '.sample' ): - if resetting_all_metadata_on_repository: + if self.resetting_all_metadata_on_repository: full_path_to_sample_file = os.path.join( root, name ) stripped_path_to_sample_file = full_path_to_sample_file.replace( work_dir, '' ) if stripped_path_to_sample_file.startswith( '/' ): @@ -816,7 +856,7 @@ sample_file_metadata_paths.append( relative_path_to_sample_file ) return sample_file_metadata_paths, sample_file_copy_paths - def handle_repository_elem( self, repository_elem, only_if_compiling_contained_td=False, updating_installed_repository=False ): + def handle_repository_elem( self, repository_elem, only_if_compiling_contained_td=False ): """ Process the received repository_elem which is a <repository> tag either from a repository_dependencies.xml file or a tool_dependencies.xml file. If the former, @@ -832,7 +872,7 @@ changeset_revision = repository_elem.get( 'changeset_revision', None ) prior_installation_required = str( repository_elem.get( 'prior_installation_required', False ) ) if self.app.name == 'galaxy': - if updating_installed_repository: + if self.updating_installed_repository: pass else: # We're installing a repository into Galaxy, so make sure its contained repository @@ -889,12 +929,12 @@ updated_changeset_revision ) if repository: return repository_dependency_tup, is_valid, error_message - if updating_installed_repository: + if self.updating_installed_repository: # The repository dependency was included in an update to the installed # repository, so it will not yet be installed. Return the tuple for later # installation. return repository_dependency_tup, is_valid, error_message - if updating_installed_repository: + if self.updating_installed_repository: # The repository dependency was included in an update to the installed repository, # so it will not yet be installed. Return the tuple for later installation. return repository_dependency_tup, is_valid, error_message @@ -1006,6 +1046,46 @@ return False return True + def set_changeset_revision( self, changeset_revision ): + self.changeset_revision = changeset_revision + + def set_relative_install_dir( self, relative_install_dir ): + self.relative_install_dir = relative_install_dir + + def set_repository( self, repository, relative_install_dir=None, changeset_revision=None ): + self.repository = repository + # Shed related tool panel configs are only relevant to Galaxy. + if self.app.name == 'galaxy': + if relative_install_dir is None and self.repository is not None: + tool_path, relative_install_dir = self.repository.get_tool_relative_path( self.app ) + if changeset_revision is None and self.repository is not None: + self.set_changeset_revision( self.repository.changeset_revision ) + else: + self.set_changeset_revision( changeset_revision ) + self.shed_config_dict = repository.get_shed_config_dict( self.app ) + self.metadata_dict = { 'shed_config_filename' : self.shed_config_dict.get( 'config_filename', None ) } + else: + if relative_install_dir is None and self.repository is not None: + relative_install_dir = repository.repo_path( self.app ) + if changeset_revision is None and self.repository is not None: + self.set_changeset_revision( self.repository.tip( self.app ) ) + else: + self.set_changeset_revision( changeset_revision ) + self.shed_config_dict = {} + self.metadata_dict = {} + self.set_relative_install_dir( relative_install_dir ) + self.set_repository_files_dir() + self.resetting_all_metadata_on_repository = False + self.updating_installed_repository = False + self.persist = False + self.invalid_file_tups = [] + + def set_repository_clone_url( self, repository_clone_url ): + self.repository_clone_url = repository_clone_url + + def set_repository_files_dir( self, repository_files_dir=None ): + self.repository_files_dir = repository_files_dir + def update_repository_dependencies_metadata( self, metadata, repository_dependency_tups, is_valid, description ): if is_valid: repository_dependencies_dict = metadata.get( 'repository_dependencies', None ) diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c lib/tool_shed/metadata/repository_metadata_manager.py --- a/lib/tool_shed/metadata/repository_metadata_manager.py +++ b/lib/tool_shed/metadata/repository_metadata_manager.py @@ -22,8 +22,16 @@ class RepositoryMetadataManager( metadata_generator.MetadataGenerator ): - def __init__( self, app, user ): - super( RepositoryMetadataManager, self ).__init__( app ) + def __init__( self, app, user, repository=None, changeset_revision=None, repository_clone_url=None, + shed_config_dict=None, relative_install_dir=None, repository_files_dir=None, + resetting_all_metadata_on_repository=False, updating_installed_repository=False, + persist=False, metadata_dict=None ): + super( RepositoryMetadataManager, self ).__init__( app, repository, changeset_revision, + repository_clone_url, shed_config_dict, + relative_install_dir, repository_files_dir, + resetting_all_metadata_on_repository, + updating_installed_repository, persist, + metadata_dict=metadata_dict, user=user ) self.app = app self.user = user # Repository metadata comparisons for changeset revisions. @@ -62,7 +70,7 @@ repositories_select_field.add_option( option_label, option_value ) return repositories_select_field - def clean_repository_metadata( self, id, changeset_revisions ): + def clean_repository_metadata( self, changeset_revisions ): # Delete all repository_metadata records associated with the repository that have # a changeset_revision that is not in changeset_revisions. We sometimes see multiple # records with the same changeset revision value - no idea how this happens. We'll @@ -71,7 +79,7 @@ changeset_revisions_checked = [] for repository_metadata in \ self.sa_session.query( self.app.model.RepositoryMetadata ) \ - .filter( self.app.model.RepositoryMetadata.table.c.repository_id == self.app.security.decode_id( id ) ) \ + .filter( self.app.model.RepositoryMetadata.table.c.repository_id == self.repository.id ) \ .order_by( self.app.model.RepositoryMetadata.table.c.changeset_revision, self.app.model.RepositoryMetadata.table.c.update_time.desc() ): changeset_revision = repository_metadata.changeset_revision @@ -79,15 +87,14 @@ self.sa_session.delete( repository_metadata ) self.sa_session.flush() - def compare_changeset_revisions( self, ancestor_changeset_revision, ancestor_metadata_dict, - current_changeset_revision, current_metadata_dict ): + def compare_changeset_revisions( self, ancestor_changeset_revision, ancestor_metadata_dict ): """ Compare the contents of two changeset revisions to determine if a new repository metadata revision should be created. """ # The metadata associated with ancestor_changeset_revision is ancestor_metadata_dict. - # This changeset_revision is an ancestor of current_changeset_revision which is associated - # with current_metadata_dict. A new repository_metadata record will be created only + # This changeset_revision is an ancestor of self.changeset_revision which is associated + # with self.metadata_dict. A new repository_metadata record will be created only # when this method returns the constant value self.NOT_EQUAL_AND_NOT_SUBSET. ancestor_datatypes = ancestor_metadata_dict.get( 'datatypes', [] ) ancestor_tools = ancestor_metadata_dict.get( 'tools', [] ) @@ -99,16 +106,16 @@ ancestor_tool_dependencies = ancestor_metadata_dict.get( 'tool_dependencies', {} ) ancestor_workflows = ancestor_metadata_dict.get( 'workflows', [] ) ancestor_data_manager = ancestor_metadata_dict.get( 'data_manager', {} ) - current_datatypes = current_metadata_dict.get( 'datatypes', [] ) - current_tools = current_metadata_dict.get( 'tools', [] ) + current_datatypes = self.metadata_dict.get( 'datatypes', [] ) + current_tools = self.metadata_dict.get( 'tools', [] ) current_guids = [ tool_dict[ 'guid' ] for tool_dict in current_tools ] current_guids.sort() - current_readme_files = current_metadata_dict.get( 'readme_files', [] ) - current_repository_dependencies_dict = current_metadata_dict.get( 'repository_dependencies', {} ) + current_readme_files = self.metadata_dict.get( 'readme_files', [] ) + current_repository_dependencies_dict = self.metadata_dict.get( 'repository_dependencies', {} ) current_repository_dependencies = current_repository_dependencies_dict.get( 'repository_dependencies', [] ) - current_tool_dependencies = current_metadata_dict.get( 'tool_dependencies', {} ) - current_workflows = current_metadata_dict.get( 'workflows', [] ) - current_data_manager = current_metadata_dict.get( 'data_manager', {} ) + current_tool_dependencies = self.metadata_dict.get( 'tool_dependencies', {} ) + current_workflows = self.metadata_dict.get( 'workflows', [] ) + current_data_manager = self.metadata_dict.get( 'data_manager', {} ) # Handle case where no metadata exists for either changeset. no_datatypes = not ancestor_datatypes and not current_datatypes no_readme_files = not ancestor_readme_files and not current_readme_files @@ -334,7 +341,7 @@ return self.SUBSET return self.NOT_EQUAL_AND_NOT_SUBSET - def create_or_update_repository_metadata( self, id, repository, changeset_revision, metadata_dict ): + def create_or_update_repository_metadata( self, changeset_revision, metadata_dict ): """Create or update a repository_metadatqa record in the tool shed.""" has_repository_dependencies = False has_repository_dependencies_only_if_compiling_contained_td = False @@ -364,7 +371,9 @@ downloadable = True else: downloadable = False - repository_metadata = suc.get_repository_metadata_by_changeset_revision( self.app, id, changeset_revision ) + repository_metadata = suc.get_repository_metadata_by_changeset_revision( self.app, + self.app.security.encode_id( self.repository.id ), + changeset_revision ) if repository_metadata: # A repository metadata record already exists with the received changeset_revision, # so we don't need to check the skip_tool_test table. @@ -381,7 +390,7 @@ # need to update the skip_tool_test table. check_skip_tool_test = True repository_metadata = \ - self.app.model.RepositoryMetadata( repository_id=repository.id, + self.app.model.RepositoryMetadata( repository_id=self.repository.id, changeset_revision=changeset_revision, metadata=metadata_dict, downloadable=downloadable, @@ -407,7 +416,7 @@ # if it is contained in the skip_tool_test table. If it is, but is not associated # with a repository_metadata record, reset that skip_tool_test record to the newly # created repository_metadata record. - repo = hg_util.get_repo_for_repository( self.app, repository=repository, repo_path=None, create=False ) + repo = hg_util.get_repo_for_repository( self.app, repository=self.repository, repo_path=None, create=False ) for changeset in repo.changelog: changeset_hash = str( repo.changectx( changeset ) ) skip_tool_test = self.get_skip_tool_test_by_changeset_revision( changeset_hash ) @@ -531,16 +540,16 @@ .filter( self.app.model.SkipToolTest.table.c.initial_changeset_revision == changeset_revision ) \ .first() - def new_datatypes_metadata_required( self, repository_metadata, metadata_dict ): + def new_datatypes_metadata_required( self, repository_metadata ): """ Compare the last saved metadata for each datatype in the repository with the new metadata - in metadata_dict to determine if a new repository_metadata table record is required or if - the last saved metadata record can be updated for datatypes instead. + in self.metadata_dict to determine if a new repository_metadata table record is required + or if the last saved metadata record can be updated for datatypes instead. """ # Datatypes are stored in metadata as a list of dictionaries that looks like: # [{'dtype': 'galaxy.datatypes.data:Text', 'subclass': 'True', 'extension': 'acedb'}] - if 'datatypes' in metadata_dict: - current_datatypes = metadata_dict[ 'datatypes' ] + if 'datatypes' in self.metadata_dict: + current_datatypes = self.metadata_dict[ 'datatypes' ] if repository_metadata: metadata = repository_metadata.metadata if metadata: @@ -564,37 +573,31 @@ # There is no stored repository metadata, so we need to create a new repository_metadata # table record. return True - # The received metadata_dict includes no metadata for datatypes, so a new repository_metadata + # self.metadata_dict includes no metadata for datatypes, so a new repository_metadata # table record is not needed. return False - def new_metadata_required_for_utilities( self, repository, new_tip_metadata_dict ): + def new_metadata_required_for_utilities( self ): """ - Galaxy utilities currently consist of datatypes, repository_dependency definitions, - tools, tool_dependency definitions and exported Galaxy workflows. This method compares - the last stored repository_metadata record associated with the received repository against - the contents of the received new_tip_metadata_dict and returns True or False for the union - set of Galaxy utilities contained in both metadata dictionaries. The metadata contained - in new_tip_metadata_dict may not be a subset of that contained in the last stored - repository_metadata record associated with the received repository because one or more - Galaxy utilities may have been deleted from the repository in the new tip. + This method compares the last stored repository_metadata record associated with self.repository + against the contents of self.metadata_dict and returns True or False for the union set of Galaxy + utilities contained in both metadata dictionaries. The metadata contained in self.metadata_dict + may not be a subset of that contained in the last stored repository_metadata record associated with + self.repository because one or more Galaxy utilities may have been deleted from self.repository in + the new tip. """ repository_metadata = metadata_util.get_latest_repository_metadata( self.app, - repository.id, + self.repository.id, downloadable=False ) - datatypes_required = self.new_datatypes_metadata_required( repository_metadata, - new_tip_metadata_dict ) + datatypes_required = self.new_datatypes_metadata_required( repository_metadata ) # Uncomment the following if we decide that README files should affect how installable # repository revisions are defined. See the NOTE in the compare_readme_files() method. - # readme_files_required = sewlf.new_readme_files_metadata_required( repository_metadata, - # new_tip_metadata_dict ) + # readme_files_required = sewlf.new_readme_files_metadata_required( repository_metadata ) repository_dependencies_required = \ - self.new_repository_dependency_metadata_required( repository_metadata, - new_tip_metadata_dict ) - tools_required = self.new_tool_metadata_required( repository_metadata, new_tip_metadata_dict ) - tool_dependencies_required = self.new_tool_dependency_metadata_required( repository_metadata, - new_tip_metadata_dict ) - workflows_required = self.new_workflow_metadata_required( repository_metadata, new_tip_metadata_dict ) + self.new_repository_dependency_metadata_required( repository_metadata ) + tools_required = self.new_tool_metadata_required( repository_metadata ) + tool_dependencies_required = self.new_tool_dependency_metadata_required( repository_metadata ) + workflows_required = self.new_workflow_metadata_required( repository_metadata ) if datatypes_required or \ repository_dependencies_required or \ tools_required or \ @@ -603,17 +606,17 @@ return True return False - def new_readme_files_metadata_required( self, repository_metadata, metadata_dict ): + def new_readme_files_metadata_required( self, repository_metadata ): """ Compare the last saved metadata for each readme file in the repository with the new metadata - in metadata_dict to determine if a new repository_metadata table record is required or if the - last saved metadata record can be updated for readme files instead. + in self.metadata_dict to determine if a new repository_metadata table record is required or + if the last saved metadata record can be updated for readme files instead. """ # Repository README files are kind of a special case because they have no effect on reproducibility. # We'll simply inspect the file names to determine if any that exist in the saved metadata are - # eliminated from the new metadata in the received metadata_dict. - if 'readme_files' in metadata_dict: - current_readme_files = metadata_dict[ 'readme_files' ] + # eliminated from the new metadata in self.metadata_dict. + if 'readme_files' in self.metadata_dict: + current_readme_files = self.metadata_dict[ 'readme_files' ] if repository_metadata: metadata = repository_metadata.metadata if metadata: @@ -638,14 +641,14 @@ # There is no stored repository metadata, so we need to create a new repository_metadata # table record. return True - # The received metadata_dict includes no metadata for readme_files, so a new repository_metadata + # self.metadata_dict includes no metadata for readme_files, so a new repository_metadata # table record is not needed. return False - def new_repository_dependency_metadata_required( self, repository_metadata, metadata_dict ): + def new_repository_dependency_metadata_required( self, repository_metadata ): """ Compare the last saved metadata for each repository dependency in the repository - with the new metadata in metadata_dict to determine if a new repository_metadata + with the new metadata in self.metadata_dict to determine if a new repository_metadata table record is required or if the last saved metadata record can be updated for repository_dependencies instead. """ @@ -653,9 +656,9 @@ metadata = repository_metadata.metadata if 'repository_dependencies' in metadata: saved_repository_dependencies = metadata[ 'repository_dependencies' ][ 'repository_dependencies' ] - new_repository_dependencies_metadata = metadata_dict.get( 'repository_dependencies', None ) + new_repository_dependencies_metadata = self.metadata_dict.get( 'repository_dependencies', None ) if new_repository_dependencies_metadata: - new_repository_dependencies = metadata_dict[ 'repository_dependencies' ][ 'repository_dependencies' ] + new_repository_dependencies = self.metadata_dict[ 'repository_dependencies' ][ 'repository_dependencies' ] # TODO: We used to include the following here to handle the case where repository # dependency definitions were deleted. However this erroneously returned True in # cases where is should not have done so. This usually occurred where multiple single @@ -682,21 +685,21 @@ else: return False else: - if 'repository_dependencies' in metadata_dict: + if 'repository_dependencies' in self.metadata_dict: # There is no saved repository metadata, so we need to create a new repository_metadata record. return True else: - # The received metadata_dict includes no metadata for repository dependencies, so - # a new repository_metadata record is not needed. + # self.metadata_dict includes no metadata for repository dependencies, so a new repository_metadata + # record is not needed. return False - def new_tool_metadata_required( self, repository_metadata, metadata_dict ): + def new_tool_metadata_required( self, repository_metadata ): """ Compare the last saved metadata for each tool in the repository with the new metadata in - metadata_dict to determine if a new repository_metadata table record is required, or if + self.metadata_dict to determine if a new repository_metadata table record is required, or if the last saved metadata record can be updated instead. """ - if 'tools' in metadata_dict: + if 'tools' in self.metadata_dict: if repository_metadata: metadata = repository_metadata.metadata if metadata: @@ -704,9 +707,9 @@ saved_tool_ids = [] # The metadata for one or more tools was successfully generated in the past # for this repository, so we first compare the version string for each tool id - # in metadata_dict with what was previously saved to see if we need to create + # in self.metadata_dict with what was previously saved to see if we need to create # a new table record or if we can simply update the existing record. - for new_tool_metadata_dict in metadata_dict[ 'tools' ]: + for new_tool_metadata_dict in self.metadata_dict[ 'tools' ]: for saved_tool_metadata_dict in metadata[ 'tools' ]: if saved_tool_metadata_dict[ 'id' ] not in saved_tool_ids: saved_tool_ids.append( saved_tool_metadata_dict[ 'id' ] ) @@ -714,10 +717,10 @@ if new_tool_metadata_dict[ 'version' ] != saved_tool_metadata_dict[ 'version' ]: return True # So far, a new metadata record is not required, but we still have to check to see if - # any new tool ids exist in metadata_dict that are not in the saved metadata. We do + # any new tool ids exist in self.metadata_dict that are not in the saved metadata. We do # this because if a new tarball was uploaded to a repository that included tools, it # may have removed existing tool files if they were not included in the uploaded tarball. - for new_tool_metadata_dict in metadata_dict[ 'tools' ]: + for new_tool_metadata_dict in self.metadata_dict[ 'tools' ]: if new_tool_metadata_dict[ 'id' ] not in saved_tool_ids: return True return False @@ -733,22 +736,22 @@ # There is no stored repository metadata, so we need to create a new repository_metadata # table record. return True - # The received metadata_dict includes no metadata for tools, so a new repository_metadata table + # self.metadata_dict includes no metadata for tools, so a new repository_metadata table # record is not needed. return False - def new_tool_dependency_metadata_required( self, repository_metadata, metadata_dict ): + def new_tool_dependency_metadata_required( self, repository_metadata ): """ Compare the last saved metadata for each tool dependency in the repository with the new - metadata in metadata_dict to determine if a new repository_metadata table record is required - or if the last saved metadata record can be updated for tool_dependencies instead. + metadata in self.metadata_dict to determine if a new repository_metadata table record is + required or if the last saved metadata record can be updated for tool_dependencies instead. """ if repository_metadata: metadata = repository_metadata.metadata if metadata: if 'tool_dependencies' in metadata: saved_tool_dependencies = metadata[ 'tool_dependencies' ] - new_tool_dependencies = metadata_dict.get( 'tool_dependencies', None ) + new_tool_dependencies = self.metadata_dict.get( 'tool_dependencies', None ) if new_tool_dependencies: # TODO: We used to include the following here to handle the case where # tool dependency definitions were deleted. However, this erroneously @@ -777,22 +780,22 @@ # in the repository, so we can update the existing repository metadata. return False else: - if 'tool_dependencies' in metadata_dict: + if 'tool_dependencies' in self.metadata_dict: # There is no saved repository metadata, so we need to create a new repository_metadata # record. return True else: - # The received metadata_dict includes no metadata for tool dependencies, so a new - # repository_metadata record is not needed. + # self.metadata_dict includes no metadata for tool dependencies, so a new repository_metadata + # record is not needed. return False - def new_workflow_metadata_required( self, repository_metadata, metadata_dict ): + def new_workflow_metadata_required( self, repository_metadata ): """ Currently everything about an exported workflow except the name is hard-coded, so there's no real way to differentiate versions of exported workflows. If this changes at some future time, this method should be enhanced accordingly. """ - if 'workflows' in metadata_dict: + if 'workflows' in self.metadata_dict: if repository_metadata: # The repository has metadata, so update the workflows value - # no new record is needed. @@ -801,17 +804,17 @@ # There is no saved repository metadata, so we need to create a # new repository_metadata table record. return True - # The received metadata_dict includes no metadata for workflows, so a new + # self.metadata_dict includes no metadata for workflows, so a new # repository_metadata table record is not needed. return False - def reset_all_metadata_on_repository_in_tool_shed( self, id ): + def reset_all_metadata_on_repository_in_tool_shed( self ): """Reset all metadata on a single repository in a tool shed.""" - repository = suc.get_repository_in_tool_shed( self.app, id ) - log.debug( "Resetting all metadata on repository: %s" % repository.name ) - repo_dir = repository.repo_path( self.app ) - repo = hg_util.get_repo_for_repository( self.app, repository=None, repo_path=repo_dir, create=False ) - repository_clone_url = common_util.generate_clone_url_for_repository_in_tool_shed( self.user, repository ) + log.debug( "Resetting all metadata on repository: %s" % self.repository.name ) + repo = hg_util.get_repo_for_repository( self.app, + repository=None, + repo_path=self.repository.repo_path( self.app ), + create=False ) # The list of changeset_revisions refers to repository_metadata records that have been created # or updated. When the following loop completes, we'll delete all repository_metadata records # for this repository that do not have a changeset_revision value in this list. @@ -822,33 +825,22 @@ metadata_dict = None ancestor_changeset_revision = None ancestor_metadata_dict = None - invalid_file_tups = [] - for changeset in repository.get_changesets_for_setting_metadata( self.app ): + for changeset in self.repository.get_changesets_for_setting_metadata( self.app ): work_dir = tempfile.mkdtemp( prefix="tmp-toolshed-ramorits" ) - current_changeset_revision = str( repo.changectx( changeset ) ) ctx = repo.changectx( changeset ) log.debug( "Cloning repository changeset revision: %s", str( ctx.rev() ) ) - cloned_ok, error_message = hg_util.clone_repository( repository_clone_url, work_dir, str( ctx.rev() ) ) + cloned_ok, error_message = hg_util.clone_repository( self.repository_clone_url, work_dir, str( ctx.rev() ) ) if cloned_ok: log.debug( "Generating metadata for changset revision: %s", str( ctx.rev() ) ) - current_metadata_dict, invalid_tups = \ - self.generate_metadata_for_changeset_revision( repository=repository, - changeset_revision=current_changeset_revision, - repository_clone_url=repository_clone_url, - relative_install_dir=repo_dir, - repository_files_dir=work_dir, - resetting_all_metadata_on_repository=True, - updating_installed_repository=False, - persist=False ) - # We'll only display error messages for the repository tip (it may be better to display error - # messages for each installable changeset revision). - if current_changeset_revision == repository.tip( self.app ): - invalid_file_tups.extend( invalid_tups ) - if current_metadata_dict: + self.set_changeset_revision( str( repo.changectx( changeset ) ) ) + self.set_relative_install_dir( work_dir ) + self.set_repository_files_dir( work_dir ) + self.generate_metadata_for_changeset_revision() + if self.metadata_dict: if metadata_changeset_revision is None and metadata_dict is None: # We're at the first change set in the change log. - metadata_changeset_revision = current_changeset_revision - metadata_dict = current_metadata_dict + metadata_changeset_revision = self.changeset_revision + metadata_dict = self.metadata_dict if ancestor_changeset_revision: # Compare metadata from ancestor and current. The value of comparison will be one of: # self.NO_METADATA - no metadata for either ancestor or current, so continue from current @@ -856,66 +848,56 @@ # self.SUBSET - ancestor metadata is a subset of current metadata, so continue from current # self.NOT_EQUAL_AND_NOT_SUBSET - ancestor metadata is neither equal to nor a subset of current # metadata, so persist ancestor metadata. - comparison = self.compare_changeset_revisions( ancestor_changeset_revision, - ancestor_metadata_dict, - current_changeset_revision, - current_metadata_dict ) + comparison = self.compare_changeset_revisions( ancestor_changeset_revision, ancestor_metadata_dict ) if comparison in [ self.NO_METADATA, self.EQUAL, self.SUBSET ]: - ancestor_changeset_revision = current_changeset_revision - ancestor_metadata_dict = current_metadata_dict + ancestor_changeset_revision = self.changeset_revision + ancestor_metadata_dict = self.metadata_dict elif comparison == self.NOT_EQUAL_AND_NOT_SUBSET: metadata_changeset_revision = ancestor_changeset_revision metadata_dict = ancestor_metadata_dict - repository_metadata = self.create_or_update_repository_metadata( id, - repository, - metadata_changeset_revision, - metadata_dict ) + repository_metadata = self.create_or_update_repository_metadata( metadata_changeset_revision, metadata_dict ) changeset_revisions.append( metadata_changeset_revision ) - ancestor_changeset_revision = current_changeset_revision - ancestor_metadata_dict = current_metadata_dict + ancestor_changeset_revision = self.changeset_revision + ancestor_metadata_dict = self.metadata_dict else: # We're at the beginning of the change log. - ancestor_changeset_revision = current_changeset_revision - ancestor_metadata_dict = current_metadata_dict + ancestor_changeset_revision = self.changeset_revision + ancestor_metadata_dict = self.metadata_dict if not ctx.children(): - metadata_changeset_revision = current_changeset_revision - metadata_dict = current_metadata_dict + metadata_changeset_revision = self.changeset_revision + metadata_dict = self.metadata_dict # We're at the end of the change log. - repository_metadata = self.create_or_update_repository_metadata( id, - repository, - metadata_changeset_revision, - metadata_dict ) + repository_metadata = self.create_or_update_repository_metadata( metadata_changeset_revision, metadata_dict ) changeset_revisions.append( metadata_changeset_revision ) ancestor_changeset_revision = None ancestor_metadata_dict = None elif ancestor_metadata_dict: - # We reach here only if current_metadata_dict is empty and ancestor_metadata_dict is not. + # We reach here only if self.metadata_dict is empty and ancestor_metadata_dict is not. if not ctx.children(): # We're at the end of the change log. - repository_metadata = self.create_or_update_repository_metadata( id, - repository, - metadata_changeset_revision, - metadata_dict ) + repository_metadata = self.create_or_update_repository_metadata( metadata_changeset_revision, metadata_dict ) changeset_revisions.append( metadata_changeset_revision ) ancestor_changeset_revision = None ancestor_metadata_dict = None basic_util.remove_dir( work_dir ) # Delete all repository_metadata records for this repository that do not have a changeset_revision # value in changeset_revisions. - self.clean_repository_metadata( id, changeset_revisions ) + self.clean_repository_metadata( changeset_revisions ) # Set tool version information for all downloadable changeset revisions. Get the list of changeset # revisions from the changelog. - self.reset_all_tool_versions( id, repo ) + self.reset_all_tool_versions( repo ) # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file. self.app.tool_data_tables.data_tables = {} - return invalid_file_tups, metadata_dict - def reset_all_tool_versions( self, id, repo ): + def reset_all_tool_versions( self, repo ): """Reset tool version lineage for those changeset revisions that include valid tools.""" + encoded_repository_id = self.app.security.encode_id( self.repository.id ) changeset_revisions_that_contain_tools = [] for changeset in repo.changelog: changeset_revision = str( repo.changectx( changeset ) ) - repository_metadata = suc.get_repository_metadata_by_changeset_revision( self.app, id, changeset_revision ) + repository_metadata = suc.get_repository_metadata_by_changeset_revision( self.app, + encoded_repository_id, + changeset_revision ) if repository_metadata: metadata = repository_metadata.metadata if metadata: @@ -926,7 +908,9 @@ # { 'tool id' : 'parent tool id' } pairs for each tool in each changeset revision. for index, changeset_revision in enumerate( changeset_revisions_that_contain_tools ): tool_versions_dict = {} - repository_metadata = suc.get_repository_metadata_by_changeset_revision( self.app, id, changeset_revision ) + repository_metadata = suc.get_repository_metadata_by_changeset_revision( self.app, + encoded_repository_id, + changeset_revision ) metadata = repository_metadata.metadata tool_dicts = metadata[ 'tools' ] if index == 0: @@ -937,7 +921,7 @@ tool_versions_dict[ tool_dict[ 'guid' ] ] = tool_dict[ 'id' ] else: for tool_dict in tool_dicts: - parent_id = self.get_parent_id( id, + parent_id = self.get_parent_id( encoded_repository_id, tool_dict[ 'id' ], tool_dict[ 'version' ], tool_dict[ 'guid' ], @@ -962,11 +946,11 @@ for repository_id in repository_ids: try: repository = suc.get_repository_in_tool_shed( self.app, repository_id ) - invalid_file_tups, metadata_dict = \ - self.reset_all_metadata_on_repository_in_tool_shed( repository_id ) - if invalid_file_tups: + self.set_repository( repository ) + self.reset_all_metadata_on_repository_in_tool_shed() + if self.invalid_file_tups: message = tool_util.generate_message_for_invalid_tools( self.app, - invalid_file_tups, + self.invalid_file_tups, repository, None, as_html=False ) @@ -974,7 +958,7 @@ unsuccessful_count += 1 else: log.debug( "Successfully reset metadata on repository %s owned by %s" % \ - ( str( repository.name ), str( repository.user.username ) ) ) + ( str( repository.name ), str( repository.user.username ) ) ) successful_count += 1 except: log.exception( "Error attempting to reset metadata on repository %s" % str( repository.name ) ) @@ -989,75 +973,68 @@ status = 'error' return message, status - def set_repository_metadata( self, host, repository, content_alert_str='', **kwd ): + def set_repository( self, repository ): + super( RepositoryMetadataManager, self ).set_repository( repository ) + self.repository_clone_url = common_util.generate_clone_url_for_repository_in_tool_shed( self.user, repository ) + + def set_repository_metadata( self, host, content_alert_str='', **kwd ): """ - Set metadata using the repository's current disk files, returning specific error + Set metadata using the self.repository's current disk files, returning specific error messages (if any) to alert the repository owner that the changeset has problems. """ message = '' status = 'done' - encoded_id = self.app.security.encode_id( repository.id ) - repository_clone_url = common_util.generate_clone_url_for_repository_in_tool_shed( self.user, repository ) - repo_dir = repository.repo_path( self.app ) + encoded_id = self.app.security.encode_id( self.repository.id ) + repo_dir = self.repository.repo_path( self.app ) repo = hg_util.get_repo_for_repository( self.app, repository=None, repo_path=repo_dir, create=False ) - metadata_dict, invalid_file_tups = \ - self.generate_metadata_for_changeset_revision( repository=repository, - changeset_revision=repository.tip( self.app ), - repository_clone_url=repository_clone_url, - relative_install_dir=repo_dir, - repository_files_dir=None, - resetting_all_metadata_on_repository=False, - updating_installed_repository=False, - persist=False ) - if metadata_dict: + self.generate_metadata_for_changeset_revision() + if self.metadata_dict: repository_metadata = None - repository_type_class = self.app.repository_types_registry.get_class_by_label( repository.type ) + repository_type_class = self.app.repository_types_registry.get_class_by_label( self.repository.type ) tip_only = isinstance( repository_type_class, TipOnly ) - if not tip_only and self.new_metadata_required_for_utilities( repository, metadata_dict ): + if not tip_only and self.new_metadata_required_for_utilities(): # Create a new repository_metadata table row. - repository_metadata = self.create_or_update_repository_metadata( encoded_id, - repository, - repository.tip( self.app ), - metadata_dict ) + repository_metadata = self.create_or_update_repository_metadata( self.repository.tip( self.app ), + self.metadata_dict ) # If this is the first record stored for this repository, see if we need to send any email alerts. - if len( repository.downloadable_revisions ) == 1: + if len( self.repository.downloadable_revisions ) == 1: suc.handle_email_alerts( self.app, host, - repository, + self.repository, content_alert_str='', new_repo_alert=True, admin_only=False ) else: - # Update the latest stored repository metadata with the contents and attributes of metadata_dict. + # Update the latest stored repository metadata with the contents and attributes of self.metadata_dict. repository_metadata = metadata_util.get_latest_repository_metadata( self.app, - repository.id, + self.repository.id, downloadable=False ) if repository_metadata: - downloadable = metadata_util.is_downloadable( metadata_dict ) + downloadable = metadata_util.is_downloadable( self.metadata_dict ) # Update the last saved repository_metadata table row. - repository_metadata.changeset_revision = repository.tip( self.app ) - repository_metadata.metadata = metadata_dict + repository_metadata.changeset_revision = self.repository.tip( self.app ) + repository_metadata.metadata = self.metadata_dict repository_metadata.downloadable = downloadable - if 'datatypes' in metadata_dict: + if 'datatypes' in self.metadata_dict: repository_metadata.includes_datatypes = True else: repository_metadata.includes_datatypes = False # We don't store information about the special type of repository dependency that is needed only for # compiling a tool dependency defined for the dependent repository. - repository_dependencies_dict = metadata_dict.get( 'repository_dependencies', {} ) + repository_dependencies_dict = self.metadata_dict.get( 'repository_dependencies', {} ) repository_dependencies = repository_dependencies_dict.get( 'repository_dependencies', [] ) has_repository_dependencies, has_repository_dependencies_only_if_compiling_contained_td = \ suc.get_repository_dependency_types( repository_dependencies ) repository_metadata.has_repository_dependencies = has_repository_dependencies - if 'tool_dependencies' in metadata_dict: + if 'tool_dependencies' in self.metadata_dict: repository_metadata.includes_tool_dependencies = True else: repository_metadata.includes_tool_dependencies = False - if 'tools' in metadata_dict: + if 'tools' in self.metadata_dict: repository_metadata.includes_tools = True else: repository_metadata.includes_tools = False - if 'workflows' in metadata_dict: + if 'workflows' in self.metadata_dict: repository_metadata.includes_workflows = True else: repository_metadata.includes_workflows = False @@ -1070,11 +1047,9 @@ self.sa_session.flush() else: # There are no metadata records associated with the repository. - repository_metadata = self.create_or_update_repository_metadata( encoded_id, - repository, - repository.tip( self.app ), - metadata_dict ) - if 'tools' in metadata_dict and repository_metadata and status != 'error': + repository_metadata = self.create_or_update_repository_metadata( self.repository.tip( self.app ), + self.metadata_dict ) + if 'tools' in self.metadata_dict and repository_metadata and status != 'error': # Set tool versions on the new downloadable change set. The order of the list of changesets is # critical, so we use the repo's changelog. changeset_revisions = [] @@ -1083,25 +1058,22 @@ if suc.get_repository_metadata_by_changeset_revision( self.app, encoded_id, changeset_revision ): changeset_revisions.append( changeset_revision ) self.add_tool_versions( encoded_id, repository_metadata, changeset_revisions ) - elif len( repo ) == 1 and not invalid_file_tups: + elif len( repo ) == 1 and not self.invalid_file_tups: message = "Revision <b>%s</b> includes no Galaxy utilities for which metadata can " % \ - str( repository.tip( self.app ) ) + str( self.repository.tip( self.app ) ) message += "be defined so this revision cannot be automatically installed into a local Galaxy instance." status = "error" - if invalid_file_tups: + if self.invalid_file_tups: message = tool_util.generate_message_for_invalid_tools( self.app, - invalid_file_tups, - repository, - metadata_dict ) + self.invalid_file_tups, + self.repository, + self.metadata_dict ) status = 'error' # Reset the tool_data_tables by loading the empty tool_data_table_conf.xml file. self.app.tool_data_tables.data_tables = {} return message, status - def set_repository_metadata_due_to_new_tip( self, host, repository, content_alert_str=None, **kwd ): - """Set metadata on the repository tip in the tool shed.""" - error_message, status = self.set_repository_metadata( host, - repository, - content_alert_str=content_alert_str, - **kwd ) + def set_repository_metadata_due_to_new_tip( self, host, content_alert_str=None, **kwd ): + """Set metadata on the tip of self.repository in the tool shed.""" + error_message, status = self.set_repository_metadata( host, content_alert_str=content_alert_str, **kwd ) return status, error_message diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c run_galaxy_listener.sh --- a/run_galaxy_listener.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd `dirname $0` -python scripts/galaxy_messaging/server/amqp_consumer.py --config-file=universe_wsgi.ini --http-server-section=server:main 2>&1 \ No newline at end of file diff -r aeca0d388e83f87ca74bbbb093746b2f74f93b86 -r 14d243c7069ea33df70635bb4208b39970db951c scripts/cleanup_datasets/delete_datasets_main.sh --- a/scripts/cleanup_datasets/delete_datasets_main.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -cd `dirname $0`/../.. -python ./scripts/cleanup_datasets/cleanup_datasets.py ./universe_wsgi.ini -d 60 -6 -r $@ >> ./scripts/cleanup_datasets/delete_datasets.log This diff is so big that we needed to truncate the remainder. https://bitbucket.org/galaxy/galaxy-central/commits/87a5903ec2c4/ Changeset: 87a5903ec2c4 User: natefoo Date: 2014-09-16 04:12:18 Summary: Don't create galaxy.ini, just use the sample if galaxy.ini does not exist. Also fix startup when galaxy.ini.sample. Fixes the universe_wsgi.ini skipping problem Dannon found. Affected #: 4 files diff -r 14d243c7069ea33df70635bb4208b39970db951c -r 87a5903ec2c4fafbee4b75c903de6ab594f91384 lib/galaxy/config.py --- a/lib/galaxy/config.py +++ b/lib/galaxy/config.py @@ -408,7 +408,6 @@ break else: path = defaults[-1] - print( "Using config '%s' at: %s" % ( var, path ) ) setattr( self, var, resolve_path( path, self.root ) ) for var, defaults in listify_defaults.items(): @@ -425,7 +424,6 @@ break else: paths = listify( defaults[-1] ) - print( "Using config '%s' at: %s" % ( var, ', '.join( paths ) ) ) setattr( self, var, [ resolve_path( x, self.root ) for x in paths ] ) # Backwards compatibility for names used in too many places to fix diff -r 14d243c7069ea33df70635bb4208b39970db951c -r 87a5903ec2c4fafbee4b75c903de6ab594f91384 rolling_restart.sh --- a/rolling_restart.sh +++ b/rolling_restart.sh @@ -17,12 +17,12 @@ } -CONFIG_FILE=config/galaxy.ini -if [ ! -f $CONFIG_FILE ]; then - CONFIG_FILE=universe_wsgi.ini +GALAXY_CONFIG_FILE=config/galaxy.ini +if [ ! -f $GALAXY_CONFIG_FILE ]; then + GALAXY_CONFIG_FILE=universe_wsgi.ini fi -servers=`sed -n 's/^\[server:\(.*\)\]/\1/ p' $CONFIG_FILE | xargs echo` +servers=`sed -n 's/^\[server:\(.*\)\]/\1/ p' $GALAXY_CONFIG_FILE | xargs echo` for server in $servers; do # If there's a pid @@ -37,7 +37,7 @@ echo "$server not running" fi # Start the server (and background) (should this be nohup'd?) - python ./scripts/paster.py serve $CONFIG_FILE --server-name=$server --pid-file=$server.pid --log-file=$server.log --daemon $@ + python ./scripts/paster.py serve $GALAXY_CONFIG_FILE --server-name=$server --pid-file=$server.pid --log-file=$server.log --daemon $@ # Wait for the server to start sleep 1 # Grab the new pid diff -r 14d243c7069ea33df70635bb4208b39970db951c -r 87a5903ec2c4fafbee4b75c903de6ab594f91384 run.sh --- a/run.sh +++ b/run.sh @@ -18,13 +18,19 @@ python ./scripts/build_universe_config.py "$GALAXY_UNIVERSE_CONFIG_DIR" fi -CONFIG_FILE=config/galaxy.ini -if [ ! -f $CONFIG_FILE ]; then - CONFIG_FILE=universe_wsgi.ini +if [ -z "$GALAXY_CONFIG_FILE" ]; then + if [ -f universe_wsgi.ini ]; then + GALAXY_CONFIG_FILE=universe_wsgi.ini + elif [ -f config/galaxy.ini ]; then + GALAXY_CONFIG_FILE=universe_wsgi.ini + else + GALAXY_CONFIG_FILE=config/galaxy.ini.sample + fi + export GALAXY_CONFIG_FILE fi if [ -n "$GALAXY_RUN_ALL" ]; then - servers=`sed -n 's/^\[server:\(.*\)\]/\1/ p' $CONFIG_FILE | xargs echo` + servers=`sed -n 's/^\[server:\(.*\)\]/\1/ p' $GALAXY_CONFIG_FILE | xargs echo` daemon=`echo "$@" | grep -q daemon` if [ $? -ne 0 ]; then echo 'ERROR: $GALAXY_RUN_ALL cannot be used without the `--daemon` or `--stop-daemon` arguments to run.sh' @@ -32,8 +38,8 @@ fi for server in $servers; do echo "Handling $server with log file $server.log..." - python ./scripts/paster.py serve $CONFIG_FILE --server-name=$server --pid-file=$server.pid --log-file=$server.log $@ + python ./scripts/paster.py serve $GALAXY_CONFIG_FILE --server-name=$server --pid-file=$server.pid --log-file=$server.log $@ done else - python ./scripts/paster.py serve $CONFIG_FILE $@ + python ./scripts/paster.py serve $GALAXY_CONFIG_FILE $@ fi diff -r 14d243c7069ea33df70635bb4208b39970db951c -r 87a5903ec2c4fafbee4b75c903de6ab594f91384 scripts/common_startup.sh --- a/scripts/common_startup.sh +++ b/scripts/common_startup.sh @@ -10,7 +10,6 @@ done SAMPLES=" - config/galaxy.ini.sample config/migrated_tools_conf.xml.sample config/shed_tool_conf.xml.sample config/shed_tool_data_table_conf.xml.sample @@ -35,11 +34,13 @@ done fi +: ${GALAXY_CONFIG_FILE:=config/galaxy.ini.sample} + if [ $FETCH_EGGS -eq 1 ]; then - python ./scripts/check_eggs.py -q + python ./scripts/check_eggs.py -q -c $GALAXY_CONFIG_FILE if [ $? -ne 0 ]; then echo "Some eggs are out of date, attempting to fetch..." - python ./scripts/fetch_eggs.py + python ./scripts/fetch_eggs.py -c $GALAXY_CONFIG_FILE if [ $? -eq 0 ]; then echo "Fetch successful." else https://bitbucket.org/galaxy/galaxy-central/commits/5f90a490e997/ Changeset: 5f90a490e997 User: dannon Date: 2014-09-16 15:08:35 Summary: Merged in natefoo/galaxy-central (pull request #495) Clean up the root directory and rename universe_wsgi.ini Affected #: 116 files diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd README.txt --- a/README.txt +++ b/README.txt @@ -22,13 +22,13 @@ http://localhost:8080 You may wish to make changes from the default configuration. This can be done -in the universe_wsgi.ini file. Tools can be either installed from the Tool Shed +in the config/galaxy.ini file. Tools can be either installed from the Tool Shed or added manually. For details please see the Galaxy wiki: -https://wiki.galaxyproject.org/Admin/Tools/AddToolFromToolShedTutorial. +https://wiki.galaxyproject.org/Admin/Tools/AddToolFromToolShedTutorial Not all dependencies are included for the tools provided in the sample tool_conf.xml. A full list of external dependencies is available at: -https://wiki.galaxyproject.org/Admin/Tools/ToolDependencies \ No newline at end of file +https://wiki.galaxyproject.org/Admin/Tools/ToolDependencies diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd buildbot_setup.sh --- a/buildbot_setup.sh +++ b/buildbot_setup.sh @@ -43,15 +43,10 @@ " SAMPLES=" -tool_conf.xml.sample -datatypes_conf.xml.sample -universe_wsgi.ini.sample -tool_data_table_conf.xml.sample -tool_sheds_conf.xml.sample -shed_tool_data_table_conf.xml.sample -migrated_tools_conf.xml.sample -data_manager_conf.xml.sample -shed_data_manager_conf.xml.sample +config/galaxy.ini.sample +config/shed_tool_data_table_conf.xml.sample +config/migrated_tools_conf.xml.sample +config/shed_data_manager_conf.xml.sample tool-data/shared/igv/igv_build_sites.txt.sample tool-data/shared/rviewer/rviewer_build_sites.txt.sample tool-data/shared/ucsc/builds.txt.sample diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd config/data_manager_conf.xml.sample --- /dev/null +++ b/config/data_manager_conf.xml.sample @@ -0,0 +1,3 @@ +<?xml version="1.0"?> +<data_managers> +</data_managers> diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd config/datatypes_conf.xml.sample --- /dev/null +++ b/config/datatypes_conf.xml.sample @@ -0,0 +1,333 @@ +<?xml version="1.0"?> +<datatypes> + <registration converters_path="lib/galaxy/datatypes/converters" display_path="display_applications"> + <datatype extension="ab1" type="galaxy.datatypes.binary:Ab1" mimetype="application/octet-stream" display_in_upload="true" description="A binary sequence file in 'ab1' format with a '.ab1' file extension. You must manually select this 'File Format' when uploading the file." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Ab1"/> + <datatype extension="afg" type="galaxy.datatypes.assembly:Amos" display_in_upload="false" /> + <datatype extension="asn1" type="galaxy.datatypes.data:GenericAsn1" mimetype="text/plain" display_in_upload="true" /> + <datatype extension="asn1-binary" type="galaxy.datatypes.binary:GenericAsn1Binary" mimetype="application/octet-stream" display_in_upload="true" /> + <datatype extension="axt" type="galaxy.datatypes.sequence:Axt" display_in_upload="true" description="blastz pairwise alignment format. Each alignment block in an axt file contains three lines: a summary line and 2 sequence lines. Blocks are separated from one another by blank lines. The summary line contains chromosomal position and size information about the alignment. It consists of 9 required fields." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Axt"/> + <datatype extension="fli" type="galaxy.datatypes.tabular:FeatureLocationIndex" display_in_upload="false"/> + <datatype extension="bam" type="galaxy.datatypes.binary:Bam" mimetype="application/octet-stream" display_in_upload="true" description="A binary file compressed in the BGZF format with a '.bam' file extension." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#BAM"> + <converter file="bam_to_bai.xml" target_datatype="bai"/> + <converter file="bam_to_bigwig_converter.xml" target_datatype="bigwig"/> + <display file="ucsc/bam.xml" /> + <display file="ensembl/ensembl_bam.xml" /> + <display file="igv/bam.xml" /> + <display file="igb/bam.xml" /> + </datatype> + <datatype extension="bed" type="galaxy.datatypes.interval:Bed" display_in_upload="true" description="BED format provides a flexible way to define the data lines that are displayed in an annotation track. BED lines have three required columns and nine additional optional columns. The three required columns are chrom, chromStart and chromEnd." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Bed"> + <converter file="bed_to_gff_converter.xml" target_datatype="gff"/> + <converter file="bed_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="bed_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + <converter file="bed_to_fli_converter.xml" target_datatype="fli"/> + <!-- <display file="ucsc/interval_as_bed.xml" /> --> + <display file="igb/bed.xml" /> + </datatype> + <datatype extension="bedgraph" type="galaxy.datatypes.interval:BedGraph" display_in_upload="true"> + <converter file="bedgraph_to_bigwig_converter.xml" target_datatype="bigwig"/> + <display file="igb/bedgraph.xml" /> + </datatype> + <datatype extension="bedstrict" type="galaxy.datatypes.interval:BedStrict" /> + <datatype extension="bed6" type="galaxy.datatypes.interval:Bed6"> + </datatype> + <datatype extension="bed12" type="galaxy.datatypes.interval:Bed12" /> + <datatype extension="len" type="galaxy.datatypes.chrominfo:ChromInfo" display_in_upload="true"> + <converter file="len_to_linecount.xml" target_datatype="linecount" /> + </datatype> + <datatype extension="bigbed" type="galaxy.datatypes.binary:BigBed" mimetype="application/octet-stream" display_in_upload="true"> + <display file="ucsc/bigbed.xml" /> + <display file="igb/bb.xml" /> + </datatype> + <datatype extension="bigwig" type="galaxy.datatypes.binary:BigWig" mimetype="application/octet-stream" display_in_upload="true"> + <display file="ucsc/bigwig.xml" /> + <display file="igb/bigwig.xml" /> + </datatype> + <datatype extension="chrint" type="galaxy.datatypes.interval:ChromatinInteractions" display_in_upload="True"> + <converter file="interval_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="interval_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + </datatype> + <!-- MSI added Datatypes --> + <datatype extension="csv" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="true" /><!-- FIXME: csv is 'tabular'ized data, but not 'tab-delimited'; the class used here is intended for 'tab-delimited' --> + <!-- End MSI added Datatypes --> + <datatype extension="customtrack" type="galaxy.datatypes.interval:CustomTrack"/> + <datatype extension="bowtie_color_index" type="galaxy.datatypes.ngsindex:BowtieColorIndex" mimetype="text/html" display_in_upload="False"/> + <datatype extension="bowtie_base_index" type="galaxy.datatypes.ngsindex:BowtieBaseIndex" mimetype="text/html" display_in_upload="False"/> + <datatype extension="csfasta" type="galaxy.datatypes.sequence:csFasta" display_in_upload="true"/> + <datatype extension="data" type="galaxy.datatypes.data:Data" mimetype="application/octet-stream" max_optional_metadata_filesize="1048576" /> + <datatype extension="data_manager_json" type="galaxy.datatypes.data:Text" mimetype="application/json" subclass="True" display_in_upload="False"/> + <datatype extension="fasta" type="galaxy.datatypes.sequence:Fasta" display_in_upload="true" description="A sequence in FASTA format consists of a single-line description, followed by lines of sequence data. The first character of the description line is a greater-than ('>') symbol in the first column. All lines should be shorter than 80 characters." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Fasta"> + <converter file="fasta_to_tabular_converter.xml" target_datatype="tabular"/> + <converter file="fasta_to_bowtie_base_index_converter.xml" target_datatype="bowtie_base_index"/> + <converter file="fasta_to_bowtie_color_index_converter.xml" target_datatype="bowtie_color_index"/> + <converter file="fasta_to_2bit.xml" target_datatype="twobit"/> + <converter file="fasta_to_len.xml" target_datatype="len"/> + </datatype> + <datatype extension="fastq" type="galaxy.datatypes.sequence:Fastq" display_in_upload="true" description="FASTQ format is a text-based format for storing both a biological sequence (usually nucleotide sequence) and its corresponding quality scores." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Fastq"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqsanger" type="galaxy.datatypes.sequence:FastqSanger" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqsolexa" type="galaxy.datatypes.sequence:FastqSolexa" display_in_upload="true" description="FastqSolexa is the Illumina (Solexa) variant of the Fastq format, which stores sequences and quality scores in a single file." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#FastqSolexa"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqcssanger" type="galaxy.datatypes.sequence:FastqCSSanger" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fastqillumina" type="galaxy.datatypes.sequence:FastqIllumina" display_in_upload="true"> + <converter file="fastq_to_fqtoc.xml" target_datatype="fqtoc"/> + </datatype> + <datatype extension="fqtoc" type="galaxy.datatypes.sequence:SequenceSplitLocations" display_in_upload="true"/> + <datatype extension="eland" type="galaxy.datatypes.tabular:Eland" display_in_upload="true"/> + <datatype extension="elandmulti" type="galaxy.datatypes.tabular:ElandMulti" display_in_upload="true"/> + <datatype extension="genetrack" type="galaxy.datatypes.tracks:GeneTrack"> + <!-- <display file="genetrack.xml" /> --> + </datatype> + <datatype extension="gff" type="galaxy.datatypes.interval:Gff" display_in_upload="true" description="GFF lines have nine required fields that must be tab-separated." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#GFF"> + <converter file="gff_to_bed_converter.xml" target_datatype="bed"/> + <converter file="gff_to_interval_index_converter.xml" target_datatype="interval_index"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + <converter file="gff_to_fli_converter.xml" target_datatype="fli"/> + <display file="ensembl/ensembl_gff.xml" inherit="True"/> + <!-- <display file="gbrowse/gbrowse_gff.xml" inherit="True" /> --> + </datatype> + <datatype extension="gff3" type="galaxy.datatypes.interval:Gff3" display_in_upload="true" description="The GFF3 format addresses the most common extensions to GFF, while preserving backward compatibility with previous formats." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#GFF3"/> + <datatype extension="gif" type="galaxy.datatypes.images:Gif" mimetype="image/gif"/> + <datatype extension="gmaj.zip" type="galaxy.datatypes.images:Gmaj" mimetype="application/zip"/> + <datatype extension="gtf" type="galaxy.datatypes.interval:Gtf" display_in_upload="true"> + <converter file="gff_to_interval_index_converter.xml" target_datatype="interval_index"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + <display file="igb/gtf.xml" /> + </datatype> + <datatype extension="toolshed.gz" type="galaxy.datatypes.binary:Binary" mimetype="multipart/x-gzip" subclass="True" /> + <datatype extension="h5" type="galaxy.datatypes.binary:Binary" mimetype="application/octet-stream" subclass="True" /> + <datatype extension="html" type="galaxy.datatypes.images:Html" mimetype="text/html"/> + <datatype extension="interval" type="galaxy.datatypes.interval:Interval" display_in_upload="true" description="File must start with definition line in the following format (columns may be in any order)." > + <converter file="interval_to_bed_converter.xml" target_datatype="bed"/> + <converter file="interval_to_bedstrict_converter.xml" target_datatype="bedstrict"/> + <converter file="interval_to_bed6_converter.xml" target_datatype="bed6"/> + <converter file="interval_to_bed12_converter.xml" target_datatype="bed12"/> + <converter file="interval_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="interval_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="interval_to_bigwig_converter.xml" target_datatype="bigwig"/> + <!-- <display file="ucsc/interval_as_bed.xml" inherit="True" /> --> + <display file="ensembl/ensembl_interval_as_bed.xml" inherit="True"/> + <display file="gbrowse/gbrowse_interval_as_bed.xml" inherit="True"/> + <display file="rviewer/bed.xml" inherit="True"/> + </datatype> + <datatype extension="picard_interval_list" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="True"> + <converter file="picard_interval_list_to_bed6_converter.xml" target_datatype="bed6"/> + </datatype> + <datatype extension="gatk_interval" type="galaxy.datatypes.data:Text" subclass="True" display_in_upload="True"/> + <datatype extension="gatk_report" type="galaxy.datatypes.data:Text" subclass="True" display_in_upload="True"/> + <datatype extension="gatk_dbsnp" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="True"/> + <datatype extension="gatk_tranche" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="True"/> + <datatype extension="gatk_recal" type="galaxy.datatypes.tabular:Tabular" subclass="True" display_in_upload="True"/> + <datatype extension="jpg" type="galaxy.datatypes.images:Jpg" mimetype="image/jpeg"/> + <datatype extension="tiff" type="galaxy.datatypes.images:Tiff" mimetype="image/tiff"/> + <datatype extension="bmp" type="galaxy.datatypes.images:Bmp" mimetype="image/bmp"/> + <datatype extension="im" type="galaxy.datatypes.images:Im" mimetype="image/im"/> + <datatype extension="pcd" type="galaxy.datatypes.images:Pcd" mimetype="image/pcd"/> + <datatype extension="pcx" type="galaxy.datatypes.images:Pcx" mimetype="image/pcx"/> + <datatype extension="ppm" type="galaxy.datatypes.images:Ppm" mimetype="image/ppm"/> + <datatype extension="psd" type="galaxy.datatypes.images:Psd" mimetype="image/psd"/> + <datatype extension="xbm" type="galaxy.datatypes.images:Xbm" mimetype="image/xbm"/> + <datatype extension="xpm" type="galaxy.datatypes.images:Xpm" mimetype="image/xpm"/> + <datatype extension="rgb" type="galaxy.datatypes.images:Rgb" mimetype="image/rgb"/> + <datatype extension="pbm" type="galaxy.datatypes.images:Pbm" mimetype="image/pbm"/> + <datatype extension="pgm" type="galaxy.datatypes.images:Pgm" mimetype="image/pgm"/> + <datatype extension="eps" type="galaxy.datatypes.images:Eps" mimetype="image/eps"/> + <datatype extension="rast" type="galaxy.datatypes.images:Rast" mimetype="image/rast"/> + <datatype extension="laj" type="galaxy.datatypes.images:Laj"/> + <datatype extension="lav" type="galaxy.datatypes.sequence:Lav" display_in_upload="true" description="Lav is the primary output format for BLASTZ. The first line of a .lav file begins with #:lav.." /> + <datatype extension="maf" type="galaxy.datatypes.sequence:Maf" display_in_upload="true" description="TBA and multiz multiple alignment format. The first line of a .maf file begins with ##maf. This word is followed by white-space-separated 'variable=value' pairs. There should be no white space surrounding the '='." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#MAF"> + <converter file="maf_to_fasta_converter.xml" target_datatype="fasta"/> + <converter file="maf_to_interval_converter.xml" target_datatype="interval"/> + </datatype> + <datatype extension="mafcustomtrack" type="galaxy.datatypes.sequence:MafCustomTrack"> + <display file="ucsc/maf_customtrack.xml" /> + </datatype> + <datatype extension="encodepeak" type="galaxy.datatypes.interval:ENCODEPeak" display_in_upload="True"> + <converter file="encodepeak_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="encodepeak_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + </datatype> + <datatype extension="pdf" type="galaxy.datatypes.images:Pdf" mimetype="application/pdf"/> + <datatype extension="pileup" type="galaxy.datatypes.tabular:Pileup" display_in_upload="true"> + <converter file="interval_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="interval_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + </datatype> + <datatype extension="png" type="galaxy.datatypes.images:Png" mimetype="image/png"/> + <datatype extension="qual" type="galaxy.datatypes.qualityscore:QualityScore" /> + <datatype extension="qualsolexa" type="galaxy.datatypes.qualityscore:QualityScoreSolexa" display_in_upload="true"/> + <datatype extension="qualillumina" type="galaxy.datatypes.qualityscore:QualityScoreIllumina" display_in_upload="true"/> + <datatype extension="qualsolid" type="galaxy.datatypes.qualityscore:QualityScoreSOLiD" display_in_upload="true"/> + <datatype extension="qual454" type="galaxy.datatypes.qualityscore:QualityScore454" display_in_upload="true"/> + <datatype extension="Roadmaps" type="galaxy.datatypes.assembly:Roadmaps" display_in_upload="false"/> + <datatype extension="sam" type="galaxy.datatypes.tabular:Sam" display_in_upload="true"> + <converter file="sam_to_bam.xml" target_datatype="bam"/> + <converter file="sam_to_bigwig_converter.xml" target_datatype="bigwig"/> + </datatype> + <datatype extension="scf" type="galaxy.datatypes.binary:Scf" mimetype="application/octet-stream" display_in_upload="true" description="A binary sequence file in 'scf' format with a '.scf' file extension. You must manually select this 'File Format' when uploading the file." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Scf"/> + <datatype extension="Sequences" type="galaxy.datatypes.assembly:Sequences" display_in_upload="false"/> + <datatype extension="sff" type="galaxy.datatypes.binary:Sff" mimetype="application/octet-stream" display_in_upload="true" description="A binary file in 'Standard Flowgram Format' with a '.sff' file extension." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Sff"/> + <datatype extension="svg" type="galaxy.datatypes.images:Image" mimetype="image/svg+xml"/> + <datatype extension="taxonomy" type="galaxy.datatypes.tabular:Taxonomy" display_in_upload="true"/> + <datatype extension="tabular" type="galaxy.datatypes.tabular:Tabular" display_in_upload="true" description="Any data in tab delimited format (tabular)." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Tabular_.28tab_delimited.29"/> + <datatype extension="twobit" type="galaxy.datatypes.binary:TwoBit" mimetype="application/octet-stream" display_in_upload="true"/> + <datatype extension="sqlite" type="galaxy.datatypes.binary:SQlite" mimetype="application/octet-stream" display_in_upload="true"/> + <datatype extension="txt" type="galaxy.datatypes.data:Text" display_in_upload="true" description="Any text file." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Plain_text"/> + <datatype extension="linecount" type="galaxy.datatypes.data:LineCount" display_in_upload="false"/> + <datatype extension="memexml" type="galaxy.datatypes.xml:MEMEXml" mimetype="application/xml" display_in_upload="true"/> + <datatype extension="cisml" type="galaxy.datatypes.xml:CisML" mimetype="application/xml" display_in_upload="true"/> + <datatype extension="xml" type="galaxy.datatypes.xml:GenericXml" mimetype="application/xml" display_in_upload="true"/> + <datatype extension="vcf" type="galaxy.datatypes.tabular:Vcf" display_in_upload="true"> + <converter file="vcf_to_bgzip_converter.xml" target_datatype="bgzip"/> + <converter file="vcf_to_vcf_bgzip_converter.xml" target_datatype="vcf_bgzip"/> + <converter file="vcf_to_tabix_converter.xml" target_datatype="tabix" depends_on="bgzip"/> + <converter file="bed_gff_or_vcf_to_bigwig_converter.xml" target_datatype="bigwig"/> + <display file="ucsc/vcf.xml" /> + <display file="igv/vcf.xml" /> + <display file="rviewer/vcf.xml" inherit="True"/> + </datatype> + <datatype extension="bcf" type="galaxy.datatypes.binary:Bcf" mimetype="application/octet-stream" display_in_upload="True"/> + <datatype extension="velvet" type="galaxy.datatypes.assembly:Velvet" display_in_upload="false"/> + <datatype extension="wig" type="galaxy.datatypes.interval:Wiggle" display_in_upload="true" description="The wiggle format is line-oriented. Wiggle data is preceded by a track definition line, which adds a number of options for controlling the default display of this track." description_url="https://wiki.galaxyproject.org/Learn/Datatypes#Wig"> + <converter file="wig_to_bigwig_converter.xml" target_datatype="bigwig"/> + <converter file="wiggle_to_simple_converter.xml" target_datatype="interval"/> + <!-- <display file="gbrowse/gbrowse_wig.xml" /> --> + <display file="igb/wig.xml" /> + </datatype> + <datatype extension="interval_index" type="galaxy.datatypes.binary:Binary" subclass="True" /> + <datatype extension="tabix" type="galaxy.datatypes.binary:Binary" subclass="True" /> + <datatype extension="bgzip" type="galaxy.datatypes.binary:Binary" subclass="True" /> + <datatype extension="vcf_bgzip" type_extension="bgzip" subclass="True" > + <display file="igv/vcf.xml" /> + <converter file="vcf_bgzip_to_tabix_converter.xml" target_datatype="tabix"/> + </datatype> + <!-- Phylogenetic tree datatypes --> + <datatype extension="phyloxml" type="galaxy.datatypes.xml:Phyloxml" display_in_upload="true" /> + <datatype extension="nhx" type="galaxy.datatypes.data:Newick" display_in_upload="true" /> + <datatype extension="nex" type="galaxy.datatypes.data:Nexus" display_in_upload="true" /> + <!-- Start RGenetics Datatypes --> + <datatype extension="affybatch" type="galaxy.datatypes.genetics:Affybatch" display_in_upload="true"/> + <!-- eigenstrat pedigree input file --> + <datatype extension="eigenstratgeno" type="galaxy.datatypes.genetics:Eigenstratgeno"/> + <!-- eigenstrat pca output file for adjusted eigenQTL eg --> + <datatype extension="eigenstratpca" type="galaxy.datatypes.genetics:Eigenstratpca"/> + <datatype extension="eset" type="galaxy.datatypes.genetics:Eset" display_in_upload="true" /> + <!-- fbat/pbat format pedigree (header row of marker names) --> + <datatype extension="fped" type="galaxy.datatypes.genetics:Fped" display_in_upload="true"/> + <!-- phenotype file - fbat format --> + <datatype extension="fphe" type="galaxy.datatypes.genetics:Fphe" display_in_upload="true" mimetype="text/html"/> + <!-- genome graphs ucsc file - first col is always marker then numeric values to plot --> + <datatype extension="gg" type="galaxy.datatypes.genetics:GenomeGraphs"/> + <!-- part of linkage format pedigree --> + <!-- information redundancy (LD) filtered plink pbed --> + <datatype extension="ldindep" type="galaxy.datatypes.genetics:ldIndep" display_in_upload="true"> + </datatype> + <datatype extension="malist" type="galaxy.datatypes.genetics:MAlist" display_in_upload="true"/> + <!-- linkage format pedigree (separate .map file) --> + <datatype extension="lped" type="galaxy.datatypes.genetics:Lped" display_in_upload="true"> + <converter file="lped_to_fped_converter.xml" target_datatype="fped"/> + <converter file="lped_to_pbed_converter.xml" target_datatype="pbed"/> + </datatype> + <!-- plink compressed file - has bed extension unfortunately --> + <datatype extension="pbed" type="galaxy.datatypes.genetics:Pbed" display_in_upload="true"> + <converter file="pbed_to_lped_converter.xml" target_datatype="lped"/> + <converter file="pbed_ldreduced_converter.xml" target_datatype="ldindep"/> + </datatype> + <datatype extension="pheno" type="galaxy.datatypes.genetics:Pheno"/> + <!-- phenotype file - plink format --> + <datatype extension="pphe" type="galaxy.datatypes.genetics:Pphe" display_in_upload="true" mimetype="text/html"/> + <datatype extension="rexpbase" type="galaxy.datatypes.genetics:RexpBase"/> + <datatype extension="rgenetics" type="galaxy.datatypes.genetics:Rgenetics"/> + <datatype extension="snptest" type="galaxy.datatypes.genetics:Snptest" display_in_upload="true"/> + <datatype extension="snpmatrix" type="galaxy.datatypes.genetics:SNPMatrix" display_in_upload="true"/> + <datatype extension="xls" type="galaxy.datatypes.tabular:Tabular"/> + <!-- End RGenetics Datatypes --> + <!-- graph datatypes --> + <datatype extension="xgmml" type="galaxy.datatypes.graph:Xgmml" display_in_upload="true"/> + <datatype extension="sif" type="galaxy.datatypes.graph:Sif" display_in_upload="true"/> + <datatype extension="rdf" type="galaxy.datatypes.graph:Rdf" display_in_upload="true"/> + </registration> + <sniffers> + <!-- + The order in which Galaxy attempts to determine data types is + important because some formats are much more loosely defined + than others. The following list should be the most rigidly + defined format first, followed by next-most rigidly defined, + and so on. + --> + <sniffer type="galaxy.datatypes.tabular:Vcf"/> + <sniffer type="galaxy.datatypes.binary:TwoBit"/> + <sniffer type="galaxy.datatypes.binary:SQlite"/> + <sniffer type="galaxy.datatypes.binary:Bam"/> + <sniffer type="galaxy.datatypes.binary:Sff"/> + <sniffer type="galaxy.datatypes.xml:Phyloxml"/> + <sniffer type="galaxy.datatypes.xml:GenericXml"/> + <sniffer type="galaxy.datatypes.sequence:Maf"/> + <sniffer type="galaxy.datatypes.sequence:Lav"/> + <sniffer type="galaxy.datatypes.sequence:csFasta"/> + <sniffer type="galaxy.datatypes.qualityscore:QualityScoreSOLiD"/> + <sniffer type="galaxy.datatypes.qualityscore:QualityScore454"/> + <sniffer type="galaxy.datatypes.sequence:Fasta"/> + <sniffer type="galaxy.datatypes.sequence:Fastq"/> + <sniffer type="galaxy.datatypes.interval:Wiggle"/> + <sniffer type="galaxy.datatypes.images:Html"/> + <sniffer type="galaxy.datatypes.images:Pdf"/> + <sniffer type="galaxy.datatypes.sequence:Axt"/> + <sniffer type="galaxy.datatypes.interval:Bed"/> + <sniffer type="galaxy.datatypes.interval:CustomTrack"/> + <sniffer type="galaxy.datatypes.interval:Gtf"/> + <sniffer type="galaxy.datatypes.interval:Gff"/> + <sniffer type="galaxy.datatypes.interval:Gff3"/> + <sniffer type="galaxy.datatypes.tabular:Pileup"/> + <sniffer type="galaxy.datatypes.interval:Interval"/> + <sniffer type="galaxy.datatypes.tabular:Sam"/> + <sniffer type="galaxy.datatypes.data:Newick"/> + <sniffer type="galaxy.datatypes.data:Nexus"/> + <sniffer type="galaxy.datatypes.images:Jpg"/> + <sniffer type="galaxy.datatypes.images:Png"/> + <sniffer type="galaxy.datatypes.images:Tiff"/> + <sniffer type="galaxy.datatypes.images:Bmp"/> + <sniffer type="galaxy.datatypes.images:Gif"/> + <sniffer type="galaxy.datatypes.images:Im"/> + <sniffer type="galaxy.datatypes.images:Pcd"/> + <sniffer type="galaxy.datatypes.images:Pcx"/> + <sniffer type="galaxy.datatypes.images:Ppm"/> + <sniffer type="galaxy.datatypes.images:Psd"/> + <sniffer type="galaxy.datatypes.images:Xbm"/> + <sniffer type="galaxy.datatypes.images:Rgb"/> + <sniffer type="galaxy.datatypes.images:Pbm"/> + <sniffer type="galaxy.datatypes.images:Pgm"/> + <sniffer type="galaxy.datatypes.images:Xpm"/> + <sniffer type="galaxy.datatypes.images:Eps"/> + <sniffer type="galaxy.datatypes.images:Rast"/> + <!-- + Keep this commented until the sniff method in the assembly.py + module is fixed to not read the entire file. + <sniffer type="galaxy.datatypes.assembly:Amos"/> + --> + </sniffers> + <build_sites> + <!-- + Build sites define the builds (dbkeys) available at sites used by display + applications and the URL to those sites. + + The `display` attributes on the `ucsc` and `gbrowse` sites replace the + `ucsc_display_sites` and `gbrowse_display_sites` options in galaxy.ini. + Because these are used by "old-style" display applications, their types + cannot change if you want the old-style display links for these sites to + work. + --> + <site type="ucsc" file="tool-data/shared/ucsc/ucsc_build_sites.txt" display="main,test,archaea,ucla"/> + <site type="gbrowse" file="tool-data/shared/gbrowse/gbrowse_build_sites.txt" display="modencode,sgd_yeast,tair,wormbase,wormbase_ws120,wormbase_ws140,wormbase_ws170,wormbase_ws180,wormbase_ws190,wormbase_ws200,wormbase_ws204,wormbase_ws210,wormbase_ws220,wormbase_ws225"/> + <site type="ensembl" file="tool-data/shared/ensembl/ensembl_sites.txt"/> + <site type="ensembl_data_url" file="tool-data/shared/ensembl/ensembl_sites_data_URL.txt"/> + <site type="igv" file="tool-data/shared/igv/igv_build_sites.txt"/> + <site type="rviewer" file="tool-data/shared/rviewer/rviewer_build_sites.txt"/> + </build_sites> +</datatypes> diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd config/demo_sequencer_wsgi.ini.sample --- /dev/null +++ b/config/demo_sequencer_wsgi.ini.sample @@ -0,0 +1,66 @@ +# ---- HTTP Server ---------------------------------------------------------- + +[server:main] + +use = egg:Paste#http +port = 9011 +host = 0.0.0.0 +use_threadpool = true +threadpool_workers = 10 + +# ---- Galaxy Demo Sequencer Emulator Interface ------------------------------------------------- + +[app:main] + +# Specifies the factory for the universe WSGI application +paste.app_factory = galaxy.webapps.demo_sequencer.buildapp:app_factory +log_level = DEBUG + +# Where dataset files are saved +file_path = database/demo_sequencer_files +# Temporary storage for additional datasets, this should be shared through the cluster +new_file_path = database/tmp + +# Sequencer emulator actions +sequencer_actions_config_file = %(here)s/lib/galaxy/webapps/demo_sequencer/sequencer_actions.xml + +# Session support (beaker) +use_beaker_session = True +session_type = memory +session_data_dir = %(here)s/database/beaker_sessions +session_key = galaxysessions +session_secret = changethisinproduction + +# Galaxy session security +id_secret = changethisinproductiontoo + +# Configuration for debugging middleware +debug = true +use_lint = false + +# NEVER enable this on a public site (even test or QA) +# use_interactive = true + +# this should be a comma-separated list of valid Galaxy users +#admin_users = test@bx.psu.edu + +# Force everyone to log in (disable anonymous access) +require_login = False + +# Write thread status periodically to 'heartbeat.log' (careful, uses disk space rapidly!) +## use_heartbeat = True + +# Profiling middleware (cProfile based) +## use_profile = True + +# Use the new iframe / javascript based layout +use_new_layout = true + +# Serving static files (needed if running standalone) +static_enabled = True +static_cache_time = 360 +static_dir = %(here)s/static/ +static_images_dir = %(here)s/static/images +static_favicon_dir = %(here)s/static/favicon.ico +static_scripts_dir = %(here)s/static/scripts/ +static_style_dir = %(here)s/static/june_2007_style/blue diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd config/external_service_types_conf.xml.sample --- /dev/null +++ b/config/external_service_types_conf.xml.sample @@ -0,0 +1,5 @@ +<?xml version="1.0"?> +<external_service_types> + <external_service_type file="simple_unknown_sequencer.xml" visible="False"/> + <external_service_type file="applied_biosystems_solid.xml" visible="True"/> +</external_service_types> diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd config/galaxy.ini.sample --- /dev/null +++ b/config/galaxy.ini.sample @@ -0,0 +1,889 @@ +# +# Galaxy is configured by default to be useable in a single-user development +# environment. To tune the application for a multi-user production +# environment, see the documentation at: +# +# http://usegalaxy.org/production +# + +# Throughout this sample configuration file, except where stated otherwise, +# uncommented values override the default if left unset, whereas commented +# values are set to the default value. Relative paths are relative to the root +# Galaxy directory. +# +# Examples of many of these options are explained in more detail in the wiki: +# +# https://wiki.galaxyproject.org/Admin/Config +# +# Config hackers are encouraged to check there before asking for help. + +# ---- HTTP Server ---------------------------------------------------------- + +# Configuration of the internal HTTP server. + +[server:main] + +# The internal HTTP server to use. Currently only Paste is provided. This +# option is required. +use = egg:Paste#http + +# The port on which to listen. +#port = 8080 + +# The address on which to listen. By default, only listen to localhost (Galaxy +# will not be accessible over the network). Use '0.0.0.0' to listen on all +# available network interfaces. +#host = 127.0.0.1 + +# Use a threadpool for the web server instead of creating a thread for each +# request. +use_threadpool = True + +# Number of threads in the web server thread pool. +#threadpool_workers = 10 + +# Set the number of seconds a thread can work before you should kill it (assuming it will never finish) to 3 hours. +threadpool_kill_thread_limit = 10800 + +# ---- Filters -------------------------------------------------------------- + +# Filters sit between Galaxy and the HTTP server. + +# These filters are disabled by default. They can be enabled with +# 'filter-with' in the [app:main] section below. + +# Define the gzip filter. +[filter:gzip] +use = egg:Paste#gzip + +# Define the proxy-prefix filter. +[filter:proxy-prefix] +use = egg:PasteDeploy#prefix +prefix = /galaxy + +# ---- Galaxy --------------------------------------------------------------- + +# Configuration of the Galaxy application. + +[app:main] + +# -- Application and filtering + +# The factory for the WSGI application. This should not be changed. +paste.app_factory = galaxy.web.buildapp:app_factory + +# If not running behind a proxy server, you may want to enable gzip compression +# to decrease the size of data transferred over the network. If using a proxy +# server, please enable gzip compression there instead. +#filter-with = gzip + +# If running behind a proxy server and Galaxy is served from a subdirectory, +# enable the proxy-prefix filter and set the prefix in the +# [filter:proxy-prefix] section above. +#filter-with = proxy-prefix + +# If proxy-prefix is enabled and you're running more than one Galaxy instance +# behind one hostname, you will want to set this to the same path as the prefix +# in the filter above. This value becomes the "path" attribute set in the +# cookie so the cookies from each instance will not clobber each other. +#cookie_path = None + +# -- Database + +# By default, Galaxy uses a SQLite database at 'database/universe.sqlite'. You +# may use a SQLAlchemy connection string to specify an external database +# instead. This string takes many options which are explained in detail in the +# config file documentation. +#database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE + +# If the server logs errors about not having enough database pool connections, +# you will want to increase these values, or consider running more Galaxy +# processes. +#database_engine_option_pool_size = 5 +#database_engine_option_max_overflow = 10 + +# If using MySQL and the server logs the error "MySQL server has gone away", +# you will want to set this to some positive value (7200 should work). +#database_engine_option_pool_recycle = -1 + +# If large database query results are causing memory or response time issues in +# the Galaxy process, leave the result on the server instead. This option is +# only available for PostgreSQL and is highly recommended. +#database_engine_option_server_side_cursors = False + +# Log all database transactions, can be useful for debugging and performance +# profiling. Logging is done via Python's 'logging' module under the qualname +# 'galaxy.model.orm.logging_connection_proxy' +#database_query_profiling_proxy = False + +# By default, Galaxy will use the same database to track user data and +# tool shed install data. There are many situtations in which it is +# valuable to seperate these - for instance bootstrapping fresh Galaxy +# instances with pretested installs. The following optin can be used to +# separate the tool shed install database (all other options listed above +# but prefixed with install_ are also available). +#install_database_connection = sqlite:///./database/universe.sqlite?isolation_level=IMMEDIATE + +# -- Files and directories + +# Dataset files are stored in this directory. +#file_path = database/files + +# Temporary files are stored in this directory. +#new_file_path = database/tmp + +# Tool config files, defines what tools are available in Galaxy. +# Tools can be locally developed or installed from Galaxy tool sheds. +# (config/tool_conf.xml.sample will be used if left unset and +# config/tool_conf.xml does not exist). +#tool_config_file = config/tool_conf.xml,shed_tool_conf.xml + +# Enable / disable checking if any tools defined in the above non-shed tool_config_files +# (i.e., tool_conf.xml) have been migrated from the Galaxy code distribution to the Tool +# Shed. This setting should generally be set to False only for development Galaxy environments +# that are often rebuilt from scratch where migrated tools do not need to be available in the +# Galaxy tool panel. If the following setting remains commented, the default setting will be True. +#check_migrate_tools = True + +# Tool config maintained by tool migration scripts. If you use the migration +# scripts to install tools that have been migrated to the tool shed upon a new +# release, they will be added to this tool config file. +#migrated_tools_config = migrated_tools_conf.xml + +# File that contains the XML section and tool tags from all tool panel config +# files integrated into a single file that defines the tool panel layout. This +# file can be changed by the Galaxy administrator to alter the layout of the +# tool panel. If not present, Galaxy will create it. +#integrated_tool_panel_config = integrated_tool_panel.xml + +# Default path to the directory containing the tools defined in tool_conf.xml. +# Other tool config files must include the tool_path as an attribute in the <toolbox> tag. +#tool_path = tools + +# Path to the directory in which tool dependencies are placed. This is used by +# the tool shed to install dependencies and can also be used by administrators +# to manually install or link to dependencies. For details, see: +# https://wiki.galaxyproject.org/Admin/Config/ToolDependencies +# If this option is not set to a valid path, installing tools with dependencies +# from the Tool Shed will fail. +#tool_dependency_dir = None + +# File containing the Galaxy Tool Sheds that should be made available to +# install from in the admin interface (.sample used if default does not exist). +#tool_sheds_config_file = config/tool_sheds_conf.xml + +# Enable automatic polling of relative tool sheds to see if any updates +# are available for installed repositories. Ideally only one Galaxy +# server process should be able to check for repository updates. The +# setting for hours_between_check should be an integer between 1 and 24. +#enable_tool_shed_check = False +#hours_between_check = 12 + +# Enable use of an in-memory registry with bi-directional relationships +# between repositories (i.e., in addition to lists of dependencies for a +# repository, keep an in-memory registry of dependent items for each repository. +#manage_dependency_relationships = False + +# XML config file that contains data table entries for the +# ToolDataTableManager. This file is manually # maintained by the Galaxy +# administrator (.sample used if default does not exist). +#tool_data_table_config_path = config/tool_data_table_conf.xml + +# XML config file that contains additional data table entries for the ToolDataTableManager. This file +# is automatically generated based on the current installed tool shed repositories that contain valid +# tool_data_table_conf.xml.sample files. At the time of installation, these entries are automatically +# added to the following file, which is parsed and applied to the ToolDataTableManager at server start up. +#shed_tool_data_table_config = config/shed_tool_data_table_conf.xml + +# Directory where data used by tools is located, see the samples in that +# directory and the wiki for help: +# https://wiki.galaxyproject.org/Admin/DataIntegration +#tool_data_path = tool-data + +# File containing old-style genome builds +#builds_file_path = tool-data/shared/ucsc/builds.txt + +# Directory where chrom len files are kept, currently mainly used by trackster +#len_file_path = tool-data/shared/ucsc/chrom + +# Datatypes config file, defines what data (file) types are available in +# Galaxy (.sample is used if default does not exist). +#datatypes_config_file = config/datatypes_conf.xml + +# Disable the 'Auto-detect' option for file uploads +#datatypes_disable_auto = False + +# Visualizations config directory: where to look for individual visualization plugins. +# The path is relative to the Galaxy root dir. To use an absolute path begin the path +# with '/'. Defaults to "config/plugins/visualizations". +#visualization_plugins_directory = config/plugins/visualizations + +# Each job is given a unique empty directory as its current working directory. +# This option defines in what parent directory those directories will be +# created. +#job_working_directory = database/job_working_directory + +# If using a cluster, Galaxy will write job scripts and stdout/stderr to this +# directory. +#cluster_files_directory = database/pbs + +# Mako templates are compiled as needed and cached for reuse, this directory is +# used for the cache +#template_cache_path = database/compiled_templates + +# Citation related caching. Tool citations information maybe fetched from +# external sources such as http://dx.doi.org/ by Galaxy - the following parameters +# can be used to control the caching used to store this information. +#citation_cache_type = file +#citation_cache_data_dir = database/citations/data +#citation_cache_lock_dir = database/citations/lock + +# External service types config file, defines what types of external_services configurations +# are available in Galaxy (.sample is used if default does not exist). +#external_service_type_config_file = config/external_service_types_conf.xml + +# Path to the directory containing the external_service_types defined in the config. +#external_service_type_path = external_service_types + +# Tools with a number of outputs not known until runtime can write these +# outputs to a directory for collection by Galaxy when the job is done. +# Previously, this directory was new_file_path, but using one global directory +# can cause performance problems, so using job_working_directory ('.' or cwd +# when a job is run) is encouraged. By default, both are checked to avoid +# breaking existing tools. +#collect_outputs_from = new_file_path,job_working_directory + +# -- Data Storage (Object Store) +# +# Configuration file for the object store +# If this is set and exists, it overrides any other objectstore settings. +# object_store_config_file = config/object_store_conf.xml + +# Object store backend module (valid options are: disk, s3, swift, irods, +# distributed, hierarchical) +#object_store = disk + +# *Extremely* old Galaxy instances created datasets at the root of the +# `file_path` defined above. If your Galaxy instance has datasets at the root +# (instead of in directories composed by hashing the dataset id), you should +# enable this option to allow Galaxy to find them. +#object_store_check_old_style = False + +# Credentials used by certain (s3, swift) object store backends +#os_access_key = <your cloud object store access key> +#os_secret_key = <your cloud object store secret key> +#os_bucket_name = <name of an existing object store bucket or container> + +# If using 'swift' object store, you must specify the following connection +# properties +#os_host = swift.rc.nectar.org.au +#os_port = 8888 +#os_is_secure = False +#os_conn_path = / + +# Reduced redundancy can be used only with the 's3' object store +#os_use_reduced_redundancy = False + +# Path to cache directory for object store backends that utilize a cache (s3, +# swift, irods) +#object_store_cache_path = database/files/ + +# Size (in GB) that the cache used by object store should be limited to. +# If the value is not specified, the cache size will be limited only by the +# file system size. +#object_store_cache_size = 100 + +# Configuration file for the distributed object store, if object_store = +# distributed. See the sample at distributed_object_store_conf.xml.sample +#distributed_object_store_config_file = None + + +# -- Mail and notification + +# Galaxy sends mail for various things: Subscribing users to the mailing list +# if they request it, emailing password resets, notification from the Galaxy +# Sample Tracking system, reporting dataset errors, and sending activation emails. +# To do this, it needs to send mail through an SMTP server, which you may define here (host:port). +# Galaxy will automatically try STARTTLS but will continue upon failure. +#smtp_server = None + +# If your SMTP server requires a username and password, you can provide them +# here (password in cleartext here, but if your server supports STARTTLS it +# will be sent over the network encrypted). +#smtp_username = None +#smtp_password = None + +# If your SMTP server requires SSL from the beginning of the connection +# smtp_ssl = False + +# On the user registration form, users may choose to join the mailing list. +# This is the address of the list they'll be subscribed to. +#mailing_join_addr = galaxy-announce-join@bx.psu.edu + +# Datasets in an error state include a link to report the error. Those reports +# will be sent to this address. Error reports are disabled if no address is set. +# Also this email is shown as a contact to user in case of Galaxy misconfiguration and other events user may encounter. +#error_email_to = None + +# Activation email is used as a sender ('from' field) for the account activation mail. +# We recommend using string in the following format: Galaxy Project <galaxy-no-reply@example.com> +#activation_email = None + +# URL of the support resource for the galaxy instance. Used in activation emails. +#instance_resource_url = http://wiki.galaxyproject.org/ + +# E-mail domains blacklist is used for filtering out users that are using disposable email address +# during the registration. If their address domain matches any domain in the BL they are refused the registration. +#blacklist_file = config/disposable_email_blacklist.conf + +# Registration warning message is used to discourage people from registering multiple accounts. Applies mostly for the main Galaxy instance. +# If no message specified the warning box will not be shown. +#registration_warning_message = Please register only one account - we provide this service free of charge and have limited computational resources. Multi-accounts are tracked and will be subjected to account termination and data deletion. + +# When users opt to reset passwords, new ones are created, this option +# specifies the length of these passwords. +#reset_password_length = 15 + + +# -- Account activation + +# This is user account activation feature global flag. If set to "False" the rest of the Account +# activation configuration is ignored and user activation is disabled (a.k.a. accounts are active since registration). +# Note the activation is also not working in case the smtp server is not defined. +#user_activation_on = False + +# Activation grace period. Activation is not forced (login is not disabled) until +# grace period has passed. Users under grace period can't run jobs (see inactivity_box_content). +# In hours. Default is 3. Enter 0 to disable grace period. +# Users with OpenID logins have grace period forever. +#activation_grace_period = 0 + +# Used for warning box for inactive accounts (unable to run jobs). +# In use only if activation_grace_period is set. +#inactivity_box_content = Your account has not been activated yet. Feel free to browse around and see what's available, but you won't be able to upload data or run jobs until you have verified your email address. + + +# -- Display sites + +# Galaxy can display data at various external browsers. These options specify +# which browsers should be available. URLs and builds available at these +# browsers are defined in the specifield files. + +# If use_remote_user = True, display application servers will be denied access +# to Galaxy and so displaying datasets in these sites will fail. +# display_servers contains a list of hostnames which should be allowed to +# bypass security to display datasets. Please be aware that there are security +# implications if this is allowed. More details (including required changes to +# the proxy server config) are available in the Apache proxy documentation on +# the wiki. +# +# The list of servers in this sample config are for the UCSC Main, Test and +# Archaea browsers, but the default if left commented is to not allow any +# display sites to bypass security (you must uncomment the line below to allow +# them). +#display_servers = hgw1.cse.ucsc.edu,hgw2.cse.ucsc.edu,hgw3.cse.ucsc.edu,hgw4.cse.ucsc.edu,hgw5.cse.ucsc.edu,hgw6.cse.ucsc.edu,hgw7.cse.ucsc.edu,hgw8.cse.ucsc.edu,lowepub.cse.ucsc.edu + +# To disable the old-style display applications that are hardcoded into datatype classes, +# set enable_old_display_applications = False. +# This may be desirable due to using the new-style, XML-defined, display applications that +# have been defined for many of the datatypes that have the old-style. +# There is also a potential security concern with the old-style applications, +# where a malicious party could provide a link that appears to reference the Galaxy server, +# but contains a redirect to a third-party server, tricking a Galaxy user to access said +# site. +#enable_old_display_applications = True + +# -- Next gen LIMS interface on top of existing Galaxy Sample/Request management code. + +use_nglims = False +nglims_config_file = tool-data/nglims.yaml + +# -- UI Localization + +# Show a message box under the masthead. +#message_box_visible = False +#message_box_content = None +#message_box_class = info + +# Append "/{brand}" to the "Galaxy" text in the masthead. +#brand = None + +# Format string used when showing date and time information. +# The string may contain: +# - the directives used by Python time.strftime() function (see http://docs.python.org/2/library/time.html#time.strftime ), +# - $locale (complete format string for the server locale), +# - $iso8601 (complete format string as specified by ISO 8601 international standard). +# pretty_datetime_format = $locale (UTC) + +# The URL of the page to display in Galaxy's middle pane when loaded. This can be +# an absolute or relative URL. +#welcome_url = /static/welcome.html + +# The URL linked by the "Galaxy/brand" text. +#logo_url = / + +# The URL linked by the "Wiki" link in the "Help" menu. +#wiki_url = http://wiki.galaxyproject.org/ + +# The URL linked by the "Support" link in the "Help" menu. +#support_url = http://wiki.galaxyproject.org/Support + +# The URL linked by the "How to Cite Galaxy" link in the "Help" menu. +#citation_url = http://wiki.galaxyproject.org/CitingGalaxy + +#The URL linked by the "Search" link in the "Help" menu. +#search_url = http://galaxyproject.org/search/usegalaxy/ + +#The URL linked by the "Mailing Lists" link in the "Help" menu. +#mailing_lists_url = http://wiki.galaxyproject.org/MailingLists + +#The URL linked by the "Videos" link in the "Help" menu. +#videos_url = http://vimeo.com/galaxyproject + +# The URL linked by the "Terms and Conditions" link in the "Help" menu, as well +# as on the user registration and login forms and in the activation emails. +#terms_url = None + +# The URL linked by the "Galaxy Q&A" link in the "Help" menu +# The Galaxy Q&A site is under development; when the site is done, this URL +# will be set and uncommented. +#qa_url = + +# Serve static content, which must be enabled if you're not serving it via a +# proxy server. These options should be self explanatory and so are not +# documented individually. You can use these paths (or ones in the proxy +# server) to point to your own styles. +#static_enabled = True +#static_cache_time = 360 +#static_dir = static/ +#static_images_dir = static/images +#static_favicon_dir = static/favicon.ico +#static_scripts_dir = static/scripts/ +#static_style_dir = static/june_2007_style/blue +#static_robots_txt = static/robots.txt + +# Pack javascript at launch (/static/scripts/*.js) +# This only happens if the modified timestamp of the source .js is newer +# than the version (if it exists) in /static/scripts/packed/ +# Note that this requires java > 1.4 for executing yuicompressor.jar +#pack_scripts = False + +# Cloud Launch + +#enable_cloud_launch = False +#cloudlaunch_default_ami = ami-a7dbf6ce + +# Incremental Display Options + +#display_chunk_size = 65536 + +# -- Advanced proxy features + +# For help on configuring the Advanced proxy features, see: +# http://usegalaxy.org/production + +# Apache can handle file downloads (Galaxy-to-user) via mod_xsendfile. Set +# this to True to inform Galaxy that mod_xsendfile is enabled upstream. +#apache_xsendfile = False + +# The same download handling can be done by nginx using X-Accel-Redirect. This +# should be set to the path defined in the nginx config as an internal redirect +# with access to Galaxy's data files (see documentation linked above). +#nginx_x_accel_redirect_base = False + +# nginx can make use of mod_zip to create zip files containing multiple library +# files. If using X-Accel-Redirect, this can be the same value as that option. +#nginx_x_archive_files_base = False + +# If using compression in the upstream proxy server, use this option to disable +# gzipping of library .tar.gz and .zip archives, since the proxy server will do +# it faster on the fly. +#upstream_gzip = False + +# nginx can also handle file uploads (user-to-Galaxy) via nginx_upload_module. +# Configuration for this is complex and explained in detail in the +# documentation linked above. The upload store is a temporary directory in +# which files uploaded by the upload module will be placed. +#nginx_upload_store = False + +# This value overrides the action set on the file upload form, e.g. the web +# path where the nginx_upload_module has been configured to intercept upload +# requests. +#nginx_upload_path = False + +# -- Logging and Debugging + +# Verbosity of console log messages. Acceptable values can be found here: +# http://docs.python.org/library/logging.html#logging-levels +#log_level = DEBUG + +# Print database operations to the server log (warning, quite verbose!). +#database_engine_option_echo = False + +# Print database pool operations to the server log (warning, quite verbose!). +#database_engine_option_echo_pool = False + +# Turn on logging of application events and some user events to the database. +#log_events = True + +# Turn on logging of user actions to the database. Actions currently logged are +# grid views, tool searches, and use of "recently" used tools menu. The +# log_events and log_actions functionality will eventually be merged. +#log_actions = True + +# Sanitize All HTML Tool Output +# By default, all tool output served as 'text/html' will be sanitized +# thoroughly. This can be disabled if you have special tools that require +# unaltered output. +#sanitize_all_html = True + +# By default Galaxy will serve non-HTML tool output that may potentially +# contain browser executable JavaScript content as plain text. This will for +# instance cause SVG datasets to not render properly and so may be disabled +# by setting the following option to True. +#serve_xss_vulnerable_mimetypes = False + +# Debug enables access to various config options useful for development and +# debugging: use_lint, use_profile, use_printdebug and use_interactive. It +# also causes the files used by PBS/SGE (submission script, output, and error) +# to remain on disk after the job is complete. Debug mode is disabled if +# commented, but is uncommented by default in the sample config. +debug = True + +# Check for WSGI compliance. +#use_lint = False + +# Run the Python profiler on each request. +#use_profile = False + +# Intercept print statements and show them on the returned page. +#use_printdebug = True + +# Enable live debugging in your browser. This should NEVER be enabled on a +# public site. Enabled in the sample config for development. +use_interactive = True + +# Write thread status periodically to 'heartbeat.log', (careful, uses disk +# space rapidly!). Useful to determine why your processes may be consuming a +# lot of CPU. +#use_heartbeat = False + +# Enable the memory debugging interface (careful, negatively impacts server +# performance). +#use_memdump = False + +# -- Data Libraries + +# These library upload options are described in much more detail in the wiki: +# https://wiki.galaxyproject.org/Admin/DataLibraries/UploadingLibraryFiles + +# Add an option to the library upload form which allows administrators to +# upload a directory of files. +#library_import_dir = None + +# Add an option to the library upload form which allows authorized +# non-administrators to upload a directory of files. The configured directory +# must contain sub-directories named the same as the non-admin user's Galaxy +# login ( email ). The non-admin user is restricted to uploading files or +# sub-directories of files contained in their directory. +#user_library_import_dir = None + +# Add an option to the admin library upload tool allowing admins to paste +# filesystem paths to files and directories in a box, and these paths will be +# added to a library. Set to True to enable. Please note the security +# implication that this will give Galaxy Admins access to anything your Galaxy +# user has access to. +#allow_library_path_paste = False + +# Users may choose to download multiple files from a library in an archive. By +# default, Galaxy allows users to select from a few different archive formats +# if testing shows that Galaxy is able to create files using these formats. +# Specific formats can be disabled with this option, separate more than one +# format with commas. Available formats are currently 'zip', 'gz', and 'bz2'. +#disable_library_comptypes = + +# Some sequencer integration features in beta allow you to automatically +# transfer datasets. This is done using a lightweight transfer manager which +# runs outside of Galaxy (but is spawned by it automatically). Galaxy will +# communicate with this manager over the port specified here. +#transfer_manager_port = 8163 + +# Search data libraries with whoosh +#enable_whoosh_library_search = True +# Whoosh indexes are stored in this directory. +#whoosh_index_dir = database/whoosh_indexes + +# Search data libraries with lucene +#enable_lucene_library_search = False +# maxiumum file size to index for searching, in MB +#fulltext_max_size = 500 +#fulltext_noindex_filetypes = bam,sam,wig,bigwig,fasta,fastq,fastqsolexa,fastqillumina,fastqsanger +# base URL of server providing search functionality using lucene +#fulltext_url = http://localhost:8081 + +# -- Users and Security + +# Galaxy encodes various internal values when these values will be output in +# some format (for example, in a URL or cookie). You should set a key to be +# used by the algorithm that encodes and decodes these values. It can be any +# string. If left unchanged, anyone could construct a cookie that would grant +# them access to others' sessions. +#id_secret = USING THE DEFAULT IS NOT SECURE! + +# User authentication can be delegated to an upstream proxy server (usually +# Apache). The upstream proxy should set a REMOTE_USER header in the request. +# Enabling remote user disables regular logins. For more information, see: +# https://wiki.galaxyproject.org/Admin/Config/ApacheProxy +#use_remote_user = False + +# If use_remote_user is enabled and your external authentication +# method just returns bare usernames, set a default mail domain to be appended +# to usernames, to become your Galaxy usernames (email addresses). +#remote_user_maildomain = None + +# If use_remote_user is enabled, the header that the upstream proxy provides +# the remote username in defaults to HTTP_REMOTE_USER (the 'HTTP_' is prepended +# by WSGI). This option allows you to change the header. Note, you still need +# to prepend 'HTTP_' to the header in this option, but your proxy server should +# *not* include 'HTTP_' at the beginning of the header name. +#remote_user_header = HTTP_REMOTE_USER + +# If use_remote_user is enabled, you can set this to a URL that will log your +# users out. +#remote_user_logout_href = None + +# If your proxy and/or authentication source does not normalize e-mail +# addresses or user names being passed to Galaxy - set the following option +# to True to force these to lower case. +#normalize_remote_user_email = False + +# Administrative users - set this to a comma-separated list of valid Galaxy +# users (email addresses). These users will have access to the Admin section +# of the server, and will have access to create users, groups, roles, +# libraries, and more. For more information, see: +# https://wiki.galaxyproject.org/Admin/Interface +#admin_users = None + +# Force everyone to log in (disable anonymous access). +#require_login = False + +# Allow unregistered users to create new accounts (otherwise, they will have to +# be created by an admin). +#allow_user_creation = True + +# Allow administrators to delete accounts. +#allow_user_deletion = False + +# Allow administrators to log in as other users (useful for debugging) +#allow_user_impersonation = False + +# Allow users to remove their datasets from disk immediately (otherwise, +# datasets will be removed after a time period specified by an administrator in +# the cleanup scripts run via cron) +#allow_user_dataset_purge = False + +# By default, users' data will be public, but setting this to True will cause +# it to be private. Does not affect existing users and data, only ones created +# after this option is set. Users may still change their default back to +# public. +#new_user_dataset_access_role_default_private = False + +# -- Beta features + +# Enable Galaxy to communicate directly with a sequencer +#enable_sequencer_communication = False + +# Enable authentication via OpenID. Allows users to log in to their Galaxy +# account by authenticating with an OpenID provider. +#enable_openid = False +# .sample used if default does not exist +#openid_config_file = config/openid_conf.xml +#openid_consumer_cache_path = database/openid_consumer_cache + +# Optional list of email addresses of API users who can make calls on behalf of +# other users +#api_allow_run_as = None + +# Master key that allows many API admin actions to be used without actually +# having a defined admin user in the database/config. Only set this if you need +# to bootstrap Galaxy, you probably do not want to set this on public servers. +#master_api_key = changethis + +# Enable tool tags (associating tools with tags). This has its own option +# since its implementation has a few performance implications on startup for +# large servers. +#enable_tool_tags = False + +# Enable a feature when running workflows. When enabled, default datasets +# are selected for "Set at Runtime" inputs from the history such that the +# same input will not be selected twice, unless there are more inputs than +# compatible datasets in the history. +# When False, the most recently added compatible item in the history will +# be used for each "Set at Runtime" input, independent of others in the Workflow +#enable_unique_workflow_defaults = False + +# The URL to the myExperiment instance being used (omit scheme but include port) +#myexperiment_url = www.myexperiment.org:80 + +# Enable Galaxy's "Upload via FTP" interface. You'll need to install and +# configure an FTP server (we've used ProFTPd since it can use Galaxy's +# database for authentication) and set the following two options. + +# This should point to a directory containing subdirectories matching users' +# email addresses, where Galaxy will look for files. +#ftp_upload_dir = None + +# This should be the hostname of your FTP server, which will be provided to +# users in the help text. +#ftp_upload_site = None + +# Enable enforcement of quotas. Quotas can be set from the Admin interface. +#enable_quotas = False + +# This option allows users to see the full path of datasets via the "View +# Details" option in the history. Administrators can always see this. +#expose_dataset_path = False + +# Data manager configuration options +# Allow non-admin users to view available Data Manager options +#enable_data_manager_user_view = False +# File where Data Managers are configured (.sample used if default does not +# exist) +#data_manager_config_file = config/data_manager_conf.xml +# File where Tool Shed based Data Managers are configured +#shed_data_manager_config_file = config/shed_data_manager_conf.xml +# Directory to store Data Manager based tool-data; defaults to tool_data_path +#galaxy_data_manager_data_path = tool-data + +# -- Job Execution + +# To increase performance of job execution and the web interface, you can +# separate Galaxy into multiple processes. There are more than one way to do +# this, and they are explained in detail in the documentation: +# +# https://wiki.galaxyproject.org/Admin/Config/Performance/Scaling + +# By default, Galaxy manages and executes jobs from within a single process and +# notifies itself of new jobs via in-memory queues. Jobs are run locally on +# the system on which Galaxy is started. Advanced job running capabilities can +# be configured through the job configuration file. +#job_config_file = config/job_conf.xml + +# In multiprocess configurations, notification between processes about new jobs +# is done via the database. In single process configurations, this is done in +# memory, which is a bit quicker. Galaxy tries to automatically determine +# which method it should used based on your handler configuration in the job +# config file, but you can explicitly override this behavior by setting the +# following option to True or False. +#track_jobs_in_database = None + +# This enables splitting of jobs into tasks, if specified by the particular tool config. +# This is a new feature and not recommended for production servers yet. +#use_tasked_jobs = False +#local_task_queue_workers = 2 + +# Enable job recovery (if Galaxy is restarted while cluster jobs are running, +# it can "recover" them when it starts). This is not safe to use if you are +# running more than one Galaxy server using the same database. +#enable_job_recovery = True + +# Although it is fairly reliable, setting metadata can occasionally fail. In +# these instances, you can choose to retry setting it internally or leave it in +# a failed state (since retrying internally may cause the Galaxy process to be +# unresponsive). If this option is set to False, the user will be given the +# option to retry externally, or set metadata manually (when possible). +#retry_metadata_internally = True + +# If (for example) you run on a cluster and your datasets (by default, +# database/files/) are mounted read-only, this option will override tool output +# paths to write outputs to the working directory instead, and the job manager +# will move the outputs to their proper place in the dataset directory on the +# Galaxy server after the job completes. +#outputs_to_working_directory = False + +# If your network filesystem's caching prevents the Galaxy server from seeing +# the job's stdout and stderr files when it completes, you can retry reading +# these files. The job runner will retry the number of times specified below, +# waiting 1 second between tries. For NFS, you may want to try the -noac mount +# option (Linux) or -actimeo=0 (Solaris). +#retry_job_output_collection = 0 + +# Clean up various bits of jobs left on the filesystem after completion. These +# bits include the job working directory, external metadata temporary files, +# and DRM stdout and stderr files (if using a DRM). Possible values are: +# always, onsuccess, never +#cleanup_job = always + +# File to source to set up the environment when running jobs. By default, the +# environment in which the Galaxy server starts is used when running jobs +# locally, and the environment set up per the DRM's submission method and +# policy is used when running jobs on a cluster (try testing with `qsub` on the +# command line). environment_setup_file can be set to the path of a file on +# the cluster that should be sourced by the user to set up the environment +# prior to running tools. This can be especially useful for running jobs as +# the actual user, to remove the need to configure each user's environment +# individually. +#environment_setup_file = None + + +# Optional file containing job resource data entry fields definition. +# These fields will be presented to users in the tool forms and allow them to +# overwrite default job resources such as number of processors, memory, and walltime. +#job_resource_params_file = config/job_resource_params_conf.xml + +# If using job concurrency limits (configured in job_config_file), several +# extra database queries must be performed to determine the number of jobs a +# user has dispatched to a given destination. By default, these queries will +# happen for every job that is waiting to run, but if cache_user_job_count is +# set to True, it will only happen once per iteration of the handler queue. +# Although better for performance due to reduced queries, the tradeoff is a +# greater possibility that jobs will be dispatched past the configured limits +# if running many handlers. +#cache_user_job_count = False + +# ToolBox filtering +# Modules from lib/galaxy/tools/filters/ can be specified in the following lines. +# tool_* filters will be applied for all users and can not be changed by them. +# user_tool_* filters will be shown under user preferences and can be toogled +# on and off by runtime +#tool_filters +#tool_label_filters +#tool_section_filters +#user_tool_filters = examples:restrict_upload_to_admins, examples:restrict_encode +#user_tool_section_filters = examples:restrict_text +#user_tool_label_filters = examples:restrict_upload_to_admins, examples:restrict_encode + +# Galaxy Application Internal Message Queue + +# Galaxy uses AMQP internally TODO more documentation on what for. +# For examples, see http://ask.github.io/kombu/userguide/connections.html +# +# Without specifying anything here, galaxy will first attempt to use your +# specified database_connection above. If that's not specified either, Galaxy +# will automatically create and use a separate sqlite database located in your +# <galaxy>/database folder (indicated in the commented out line below). + +#amqp_internal_connection = sqlalchemy+sqlite:///./database/control.sqlite?isolation_level=IMMEDIATE + + + +# ---- Galaxy External Message Queue ------------------------------------------------- + +# Galaxy uses AMQ protocol to receive messages from external sources like +# bar code scanners. Galaxy has been tested against RabbitMQ AMQP implementation. +# For Galaxy to receive messages from a message queue the RabbitMQ server has +# to be set up with a user account and other parameters listed below. The 'host' +# and 'port' fields should point to where the RabbitMQ server is running. + +[galaxy_amqp] + +#host = 127.0.0.1 +#port = 5672 +#userid = galaxy +#password = galaxy +#virtual_host = galaxy_messaging_engine +#queue = galaxy_queue +#exchange = galaxy_exchange +#routing_key = bar_code_scanner +#rabbitmqctl_path = /path/to/rabbitmqctl + diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd config/job_conf.xml.sample_advanced --- /dev/null +++ b/config/job_conf.xml.sample_advanced @@ -0,0 +1,482 @@ +<?xml version="1.0"?> +<job_conf> + <plugins workers="4"> + <!-- "workers" is the number of threads for the runner's work queue. + The default from <plugins> is used if not defined for a <plugin>. + --> + <plugin id="local" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner"/> + <plugin id="pbs" type="runner" load="galaxy.jobs.runners.pbs:PBSJobRunner" workers="2"/> + <plugin id="drmaa" type="runner" load="galaxy.jobs.runners.drmaa:DRMAAJobRunner"> + <!-- Different DRMs handle successfully completed jobs differently, + these options can be changed to handle such differences and + are explained in detail on the Galaxy wiki. Defaults are shown --> + <param id="invalidjobexception_state">ok</param> + <param id="invalidjobexception_retries">0</param> + <param id="internalexception_state">ok</param> + <param id="internalexception_retries">0</param> + </plugin> + <plugin id="sge" type="runner" load="galaxy.jobs.runners.drmaa:DRMAAJobRunner"> + <!-- Override the $DRMAA_LIBRARY_PATH environment variable --> + <param id="drmaa_library_path">/sge/lib/libdrmaa.so</param> + </plugin> + <plugin id="cli" type="runner" load="galaxy.jobs.runners.cli:ShellJobRunner" /> + <plugin id="condor" type="runner" load="galaxy.jobs.runners.condor:CondorJobRunner" /> + <plugin id="slurm" type="runner" load="galaxy.jobs.runners.slurm:SlurmJobRunner" /> + <plugin id="dynamic" type="runner"> + <!-- The dynamic runner is not a real job running plugin and is + always loaded, so it does not need to be explicitly stated in + <plugins>. However, if you wish to change the base module + containing your dynamic rules, you can do so. + + The `load` attribute is not required (and ignored if + included). + --> + <param id="rules_module">galaxy.jobs.rules</param> + </plugin> + <!-- Pulsar runners (see more at https://pulsar.readthedocs.org) --> + <plugin id="pulsar_rest" type="runner" load="galaxy.jobs.runners.pulsar:PulsarRESTJobRunner"> + <!-- Allow optimized HTTP calls with libcurl (defaults to urllib) --> + <!-- <param id="transport">curl</param> --> + + <!-- *Experimental Caching*: Next parameter enables caching. + Likely will not work with newer features such as MQ support. + + If this is enabled be sure to specify a `file_cache_dir` in + the remote Pulsar's servers main configuration file. + --> + <!-- <param id="cache">True</param> --> + </plugin> + <plugin id="pulsar_mq" type="runner" load="galaxy.jobs.runners.pulsar:PulsarMQJobRunner"> + <!-- AMQP URL to connect to. --> + <param id="amqp_url">amqp://guest:guest@localhost:5672//</param> + <!-- URL remote Pulsar apps should transfer files to this Galaxy + instance to/from. --> + <param id="galaxy_url">http://localhost:8080</param> + <!-- Pulsar job manager to communicate with (see Pulsar + docs for information on job managers). --> + <!-- <param id="manager">_default_</param> --> + <!-- The AMQP client can provide an SSL client certificate (e.g. for + validation), the following options configure that certificate + (see for reference: + http://kombu.readthedocs.org/en/latest/reference/kombu.connection.html + ). If you simply want to use SSL but not use/validate a client + cert, just use the ?ssl=1 query on the amqp URL instead. --> + <!-- <param id="amqp_connect_ssl_ca_certs">/path/to/cacert.pem</param> --> + <!-- <param id="amqp_connect_ssl_keyfile">/path/to/key.pem</param> --> + <!-- <param id="amqp_connect_ssl_certfile">/path/to/cert.pem</param> --> + <!-- <param id="amqp_connect_ssl_cert_reqs">cert_required</param> --> + <!-- By default, the AMQP consumer uses a nonblocking connection with + a 0.2 second timeout. In testing, this works fine for + unencrypted AMQP connections, but with SSL it will cause the + client to reconnect to the server after each timeout. Set to a + higher value (in seconds) (or `None` to use blocking connections). --> + <!-- <param id="amqp_consumer_timeout">None</param> --> + </plugin> + <plugin id="pulsar_legacy" type="runner" load="galaxy.jobs.runners.pulsar:PulsarLegacyJobRunner"> + <!-- Pulsar job runner with default parameters matching those + of old LWR job runner. If your Pulsar server is running on a + Windows machine for instance this runner should still be used. + + These destinations still needs to target a Pulsar server, + older LWR plugins and destinations still work in Galaxy can + target LWR servers, but this support should be considered + deprecated and will disappear with a future release of Galaxy. + --> + </plugin> + </plugins> + <handlers default="handlers"> + <!-- Additional job handlers - the id should match the name of a + [server:<id>] in galaxy.ini. + --> + <handler id="handler0" tags="handlers"/> + <handler id="handler1" tags="handlers"/> + <!-- Handlers will load all plugins defined in the <plugins> collection + above by default, but can be limited to a subset using <plugin> + tags. This is useful for heterogenous environments where the DRMAA + plugin would need to be loaded more than once with different + configs. + --> + <handler id="sge_handler"> + <plugin id="sge"/> + </handler> + <handler id="special_handler0" tags="special_handlers"/> + <handler id="special_handler1" tags="special_handlers"/> + <handler id="trackster_handler"/> + </handlers> + <destinations default="local"> + <!-- Destinations define details about remote resources and how jobs + should be executed on those remote resources. + --> + <destination id="local" runner="local"/> + <destination id="multicore_local" runner="local"> + <param id="local_slots">4</param><!-- Specify GALAXY_SLOTS for local jobs. --> + <!-- Warning: Local slot count doesn't tie up additional worker threads, to prevent over + allocating machine define a second local runner with different name and fewer workers + to run this destination. --> + <param id="embed_metadata_in_job">True</param> + <!-- Above parameter will be default (with no option to set + to False) in an upcoming release of Galaxy, but you can + try it early - it will slightly speed up local jobs by + embedding metadata calculation in job script itself. + --> + <job_metrics /> + <!-- Above element demonstrates embedded job metrics definition - see + job_metrics_conf.xml.sample for full documentation on possible nested + elements. This empty block will simply disable job metrics for the + corresponding destination. --> + </destination> + <destination id="docker_local" runner="local"> + <param id="docker_enabled">true</param> + <!-- docker_volumes can be used to configure volumes to expose to docker, + For added isolation append :ro to the path to mount it read only. + Galaxy will attempt to infer a reasonable set of defaults which + volumes should be exposed how based on Galaxy's settings and the + destination - but be sure to add any library paths or data incides + that may be needed read-only. + --> + <!-- + <param id="docker_volumes">$defaults,/mnt/galaxyData/libraries:ro,/mnt/galaxyData/indices:ro</param> + --> + <!-- For a stock Galaxy instance and traditional job runner $defaults will + expand out as: + + $galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$default_file_path:rw + + This assumes most of what is needed is available under Galaxy's root directory, + the tool directory, and the Galaxy's file_path (if using object store creatively + you will definitely need to expand defaults). + + This configuration allows any docker instance to write to any Galaxy + file - for greater isolation set outputs_to_working_directory in + galaxy.ini. This will cause $defaults to allow writing to much + less. It will then expand as follows: + + $galaxy_root:ro,$tool_directory:ro,$working_directory:rw,$default_file_path:ro + + If using the Pulsar, defaults will be even further restricted because the + Pulsar will (by default) stage all needed inputs into the job's job_directory + (so there is not need to allow the docker container to read all the + files - let alone write over them). Defaults in this case becomes: + + $job_directory:ro,$tool_directory:ro,$job_directory/outputs:rw,$working_directory:rw + + Python string.Template is used to expand volumes and values $defaults, + $galaxy_root, $default_file_path, $tool_directory, $working_directory, + are available to all jobs and $job_directory is also available for + Pulsar jobs. + --> + <!-- One can run docker using volumes-from tag by setting the following + parameter. For more information on volumes-from check out the following + docker tutorial. https://docs.docker.com/userguide/dockervolumes/ + --> + <!-- <param id="docker_volumes_from">parent_container_name</param> --> + <!-- Control memory allocatable by docker container with following option: + --> + <!-- <param id="docker_memory">24G</param> --> + <!-- By default Docker will need to runnable by Galaxy using + password-less sudo - this can be configured by adding the + following line to the sudoers file of all compute nodes + with docker enabled: + + galaxy ALL = (root) NOPASSWD: SETENV: /usr/bin/docker + + The follow option is set to false to disable sudo (docker + must likewise be configured to allow this). + --> + <!-- <param id="docker_sudo">false</param> --> + <!-- Following option can be used to tweak sudo command used by + default. --> + <!-- <param id="docker_sudo_cmd">/usr/bin/sudo -extra_param</param> --> + <!-- By default, docker container will not have any networking + enabled. host networking can be bridged by uncommenting next option + http://docs.docker.io/reference/run/#network-settings + --> + <!-- <param id="docker_net">bridge</param> --> + <!-- Following command can be used to tweak docker command. --> + <!-- <param id="docker_cmd">/usr/local/custom_docker/docker</param> --> + <!-- Following can be used to connect to docke server in different + ways (translated as -H argument to docker client). --> + <!-- <param id="docker_host">unix:///var/run/docker.sock</param> --> + <!-- <param id="docker_host">:5555</param> --> + <!-- <param id="docker_host">:5555</param> --> + <!-- <param id="docker_host">tcp://127.0.0.1:4243</param> --> + + <!-- If deployer wants to use docker for isolation, but does not + trust tool's specified container - a destination wide override + can be set. This will cause all jobs on this destination to use + that docker image. --> + <!-- <param id="docker_container_id_override">busybox:ubuntu-14.04</param> --> + + <!-- Likewise, if deployer wants to use docker for isolation and + does trust tool's specified container - but also wants tool's not + configured to run in a container the following option can provide + a fallback. --> + <!-- <param id="docker_default_container_id">busybox:ubuntu-14.04</param> --> + + </destination> + <destination id="pbs" runner="pbs" tags="mycluster"/> + <destination id="pbs_longjobs" runner="pbs" tags="mycluster,longjobs"> + <!-- Define parameters that are native to the job runner plugin. --> + <param id="Resource_List">walltime=72:00:00</param> + </destination> + <destination id="remote_cluster" runner="drmaa" tags="longjobs"/> + <destination id="java_cluster" runner="drmaa"> + <!-- set arbitrary environment variables at runtime. General + dependencies for tools should be configured via + tool_dependency_dir and package options and these + options should be reserved for defining cluster + specific options. + --> + <env id="_JAVA_OPTIONS">-Xmx=6GB</env> + <env id="ANOTHER_OPTION" raw="true">'5'</env><!-- raw disables auto quoting --> + <env file="/mnt/java_cluster/environment_setup.sh" /><!-- will be sourced --> + <env exec="module load javastuff/2.10" /><!-- will be sourced --> + <!-- files to source and exec statements will be handled on remote + clusters. These don't need to be available on the Galaxy server + itself. + --> + </destination> + <destination id="real_user_cluster" runner="drmaa"> + <!-- TODO: The real user options should maybe not be considered runner params. --> + <param id="galaxy_external_runjob_script">scripts/drmaa_external_runner.py</param> + <param id="galaxy_external_killjob_script">scripts/drmaa_external_killer.py</param> + <param id="galaxy_external_chown_script">scripts/external_chown_script.py</param> + </destination> + <destination id="dynamic" runner="dynamic"> + <!-- A destination that represents a method in the dynamic runner. --> + <param id="function">foo</param> + </destination> + <destination id="load_balance" runner="dynamic"> + <param id="type">choose_one</param> + <!-- Randomly assign jobs to various static destination ids --> + <param id="destination_ids">cluster1,cluster2,cluster3</param> + </destination> + <destination id="load_balance_with_data_locality" runner="dynamic"> + <!-- Randomly assign jobs to various static destination ids, + but keep jobs in the same workflow invocation together and + for those jobs ran outside of workflows keep jobs in same + history together. + --> + <param id="type">choose_one</param> + <param id="destination_ids">cluster1,cluster2,cluster3</param> + <param id="hash_by">workflow_invocation,history</param> + </destination> + <destination id="burst_out" runner="dynamic"> + <!-- Burst out from static destination local_cluster_8_core to + static destination shared_cluster_8_core when there are about + 50 Galaxy jobs assigned to any of the local_cluster_XXX + destinations (either running or queued). If there are fewer + than 50 jobs, just use local_cluster_8_core destination. + + Uncomment job_state parameter to make this bursting happen when + roughly 50 jobs are queued instead. + --> + <param id="type">burst</param> + <param id="from_destination_ids">local_cluster_8_core,local_cluster_1_core,local_cluster_16_core</param> + <param id="to_destination_id">shared_cluster_8_core</param> + <param id="num_jobs">50</param> + <!-- <param id="job_states">queued</param> --> + </destination> + <destination id="docker_dispatch" runner="dynamic"> + <!-- Follow dynamic destination type will send all tool's that + support docker to static destination defined by + docker_destination_id (docker_cluster in this example) and all + other tools to default_destination_id (normal_cluster in this + example). + --> + <param id="type">docker_dispatch</param> + <param id="docker_destination_id">docker_cluster</param> + <param id="default_destination_id">normal_cluster</param> + </destination> + <destination id="secure_pulsar_rest_dest" runner="pulsar_rest"> + <param id="url">https://examle.com:8913/</param> + <!-- If set, private_token must match token in remote Pulsar's + configuration. --> + <param id="private_token">123456789changeme</param> + <!-- Uncomment the following statement to disable file staging (e.g. + if there is a shared file system between Galaxy and the Pulsar + server). Alternatively action can be set to 'copy' - to replace + http transfers with file system copies, 'remote_transfer' to cause + the Pulsar to initiate HTTP transfers instead of Galaxy, or + 'remote_copy' to cause Pulsar to initiate file system copies. + If setting this to 'remote_transfer' be sure to specify a + 'galaxy_url' attribute on the runner plugin above. --> + <!-- <param id="default_file_action">none</param> --> + <!-- The above option is just the default, the transfer behavior + none|copy|http can be configured on a per path basis via the + following file. See Pulsar documentation for more details and + examples. + --> + <!-- <param id="file_action_config">file_actions.yaml</param> --> + <!-- The non-legacy Pulsar runners will attempt to resolve Galaxy + dependencies remotely - to enable this set a tool_dependency_dir + in Pulsar's configuration (can work with all the same dependency + resolutions mechanisms as Galaxy - tool Shed installs, Galaxy + packages, etc...). To disable this behavior, set the follow parameter + to none. To generate the dependency resolution command locally + set the following parameter local. + --> + <!-- <param id="dependency_resolution">none</params> --> + <!-- Uncomment following option to enable setting metadata on remote + Pulsar server. The 'use_remote_datatypes' option is available for + determining whether to use remotely configured datatypes or local + ones (both alternatives are a little brittle). --> + <!-- <param id="remote_metadata">true</param> --> + <!-- <param id="use_remote_datatypes">false</param> --> + <!-- <param id="remote_property_galaxy_home">/path/to/remote/galaxy-central</param> --> + <!-- If remote Pulsar server is configured to run jobs as the real user, + uncomment the following line to pass the current Galaxy user + along. --> + <!-- <param id="submit_user">$__user_name__</param> --> + <!-- Various other submission parameters can be passed along to the Pulsar + whose use will depend on the remote Pulsar's configured job manager. + For instance: + --> + <!-- <param id="submit_native_specification">-P bignodes -R y -pe threads 8</param> --> + <!-- Disable parameter rewriting and rewrite generated commands + instead. This may be required if remote host is Windows machine + but probably not otherwise. + --> + <!-- <param id="rewrite_parameters">false</params> --> + </destination> + <destination id="pulsar_mq_dest" runner="amqp_pulsar" > + <!-- The RESTful Pulsar client sends a request to Pulsar + to populate various system properties. This + extra step can be disabled and these calculated here + on client by uncommenting jobs_directory and + specifying any additional remote_property_ of + interest, this is not optional when using message + queues. + --> + <param id="jobs_directory">/path/to/remote/pulsar/files/staging/</param> + <!-- Otherwise MQ and Legacy pulsar destinations can be supplied + all the same destination parameters as the RESTful client documented + above (though url and private_token are ignored when using a MQ). + --> + </destination> + <destination id="ssh_torque" runner="cli"> + <param id="shell_plugin">SecureShell</param> + <param id="job_plugin">Torque</param> + <param id="shell_username">foo</param> + <param id="shell_hostname">foo.example.org</param> + <param id="job_Resource_List">walltime=24:00:00,ncpus=4</param> + </destination> + + <!-- Example CLI Slurm runner. --> + <destination id="ssh_slurm" runner="cli"> + <param id="shell_plugin">SecureShell</param> + <param id="job_plugin">Slurm</param> + <param id="shell_username">foo</param> + <param id="shell_hostname">my_host</param> + <param id="job_time">2:00:00</param> + <param id="job_ncpus">4</param> + <param id="job_partition">my_partition</param> + </destination> + + <destination id="condor" runner="condor"> + <!-- With no params, jobs are submitted to the 'vanilla' universe with: + notification = NEVER + getenv = true + Additional/override query ClassAd params can be specified with + <param> tags. + --> + <param id="request_cpus">8</param> + </destination> + + <!-- Jobs that hit the walltime on one destination can be automatically + resubmitted to another destination. Walltime detection is + currently only implemented in the slurm runner. + + Multiple resubmit tags can be defined, the first resubmit matching + the terminal condition of a job will be used. + + The 'condition' attribute is optional, if not present, the + resubmit destination will be used for all conditions. Currently, + only the "walltime_reached" condition is implemented. + + The 'handler' tag is optional, if not present, the job's original + handler will be reused for the resubmitted job. + --> + <destination id="short_fast" runner="slurm"> + <param id="nativeSpecification">--time=00:05:00 --nodes=1</param> + <resubmit condition="walltime_reached" destination="long_slow" handler="sge_handler" /> + </destination> + <destination id="long_slow" runner="sge"> + <!-- The destination that you resubmit jobs to can be any runner type --> + <param id="nativeSpecification">-l h_rt=96:00:00</param> + </destination> + + </destinations> + <resources default="default"> + <!-- Group different parameters defined in job_resource_params_conf.xml + together and assign these groups ids. Tool section below can map + tools to different groups. This is experimental functionality! + --> + <group id="default"></group> + <group id="memoryonly">memory</group> + <group id="all">processors,memory,time,project</group> + </resources> + <tools> + <!-- Tools can be configured to use specific destinations or handlers, + identified by either the "id" or "tags" attribute. If assigned to + a tag, a handler or destination that matches that tag will be + chosen at random. + --> + <tool id="foo" handler="trackster_handler"> + <param id="source">trackster</param> + </tool> + <tool id="bar" destination="dynamic"/> + <!-- Next example defines resource group to insert into tool interface + and pass to dynamic destination (as resource_params argument). --> + <tool id="longbar" destination="dynamic" resources="all" /> + <tool id="baz" handler="special_handlers" destination="bigmem"/> + </tools> + <limits> + <!-- Certain limits can be defined. The 'concurrent_jobs' limits all + control the number of jobs that can be "active" at a time, that + is, dispatched to a runner and in the 'queued' or 'running' + states. + + A race condition exists that will allow destination_* concurrency + limits to be surpassed when multiple handlers are allowed to + handle jobs for the same destination. To prevent this, assign all + jobs for a specific destination to a single handler. + --> + <!-- registered_user_concurrent_jobs: + Limit on the number of jobs a user with a registered Galaxy + account can have active across all destinations. + --> + <limit type="registered_user_concurrent_jobs">2</limit> + <!-- anonymous_user_concurrent_jobs: + Likewise, but for unregistered/anonymous users. + --> + <limit type="anonymous_user_concurrent_jobs">1</limit> + <!-- destination_user_concurrent_jobs: + The number of jobs a user can have active in the specified + destination, or across all destinations identified by the + specified tag. (formerly: concurrent_jobs) + --> + <limit type="destination_user_concurrent_jobs" id="local">1</limit> + <limit type="destination_user_concurrent_jobs" tag="mycluster">2</limit> + <limit type="destination_user_concurrent_jobs" tag="longjobs">1</limit> + <!-- destination_total_concurrent_jobs: + The number of jobs that can be active in the specified + destination (or across all destinations identified by the + specified tag) by any/all users. + --> + <limit type="destination_total_concurrent_jobs" id="local">16</limit> + <limit type="destination_total_concurrent_jobs" tag="longjobs">100</limit> + <!-- walltime: + Amount of time a job can run (in any destination) before it + will be terminated by Galaxy. + --> + <limit type="walltime">24:00:00</limit> + <!-- output_size: + Size that any defined tool output can grow to before the job + will be terminated. This does not include temporary files + created by the job. Format is flexible, e.g.: + '10GB' = '10g' = '10240 Mb' = '10737418240' + --> + <limit type="output_size">10GB</limit> + </limits> +</job_conf> diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd config/job_conf.xml.sample_basic --- /dev/null +++ b/config/job_conf.xml.sample_basic @@ -0,0 +1,13 @@ +<?xml version="1.0"?> +<!-- A sample job config that explicitly configures job running the way it is configured by default (if there is no explicit config). --> +<job_conf> + <plugins> + <plugin id="local" type="runner" load="galaxy.jobs.runners.local:LocalJobRunner" workers="4"/> + </plugins> + <handlers> + <handler id="main"/> + </handlers> + <destinations> + <destination id="local" runner="local"/> + </destinations> +</job_conf> diff -r 76a4156aefd3e7076571f18e9199dde1fb05b0f3 -r 5f90a490e9971685945081cbed681c961932a4fd config/job_metrics_conf.xml.sample --- /dev/null +++ b/config/job_metrics_conf.xml.sample @@ -0,0 +1,124 @@ +<?xml version="1.0"?> +<!-- If job_metrics.xml exists, this file will define the default job metric + plugin used for all jobs. Individual job_conf.xml destinations can + disable metric collection by setting metrics="off" on that destination. + The metrics attribute on destination definition elements can also be + a path - in which case that XML metrics file will be loaded and used for + that destination. Finally, the destination element may contain a job_metrics + child element (with all options defined below) to define job metrics in an + embedded manner directly in the job_conf.xml file. +--> +<job_metrics> + <!-- Each element in this file corresponds to a job instrumentation plugin + used to generate metrics in lib/galaxy/jobs/metrics/instrumenters. --> + + <!-- Core plugin captures Galaxy slots, start and end of job (in seconds + since epoch) and computes runtime in seconds. --> + <core /> + + <!-- Uncomment to dump processor count for each job - linux only. --> + <!-- <cpuinfo /> --> + <!-- Uncomment to dump information about all processors for for each + job - this is likely too much data. Linux only. --> + <!-- <cpuinfo verbose="true" /> --> + + <!-- Uncomment to dump system memory information for each job - linux + only. --> + <!-- <meminfo /> --> + + <!-- Uncomment to record operating system each job is executed on - linux + only. --> + <!-- <uname /> --> + + <!-- Uncomment following to enable plugin dumping complete environment + for each job, potentially useful for debuging --> + <!-- <env /> --> + <!-- env plugin can also record more targetted, obviously useful variables + as well. --> + <!-- <env variables="HOSTNAME,SLURM_CPUS_ON_NODE,SLURM_JOBID" /> --> + + <!-- <collectl /> --> + <!-- Collectl (http://collectl.sourceforge.net/) is a powerful monitoring + utility capable of gathering numerous system and process level + statistics of running applications. The Galaxy collectl job metrics + plugin by default will grab a variety of process level metrics + aggregated across all processes corresponding to a job, this behavior + is highly customiziable - both using the attributes documented below + or simply hacking up the code in lib/galaxy/jobs/metrics. + + Warning: In order to use this plugin collectl must be available on the + compute server the job runs on and on the local Galaxy server as well + (unless in this latter case summarize_process_data is set to False). + + Attributes (the follow describes attributes that can be used with + the collectl job metrics element above to modify its behavior). + + 'summarize_process_data': Boolean indicating whether to run collectl + in playback mode after jobs complete and gather process level + statistics for the job run. These statistics can be customized + with the 'process_statistics' attribute. (defaults to True) + + 'saved_logs_path': If set (it is off by default), all collectl logs + will be saved to the specified path after jobs complete. These + logs can later be replayed using collectl offline to generate + full time-series data corresponding to a job run. + + 'subsystems': Comma separated list of collectl subystems to collect + data for. Plugin doesn't currently expose all of them or offer + summary data for any of them except 'process' but extensions + would be welcome. May seem pointless to include subsystems + beside process since they won't be processed online by Galaxy - + but if 'saved_logs_path' these files can be played back at anytime. + + Available subsystems - 'process', 'cpu', 'memory', 'network', + 'disk', 'network'. (Default 'process'). + + Warning: If you override this - be sure to include 'process' + unless 'summarize_process_data' is set to false. + + 'process_statistics': If 'summarize_process_data' this attribute can be + specified as a comma separated list to override the statistics + that are gathered. Each statistics is of the for X_Y where X + if one of 'min', 'max', 'count', 'avg', or 'sum' and Y is a + value from 'S', 'VmSize', 'VmLck', 'VmRSS', 'VmData', 'VmStk', + 'VmExe', 'VmLib', 'CPU', 'SysT', 'UsrT', 'PCT', 'AccumT' 'WKB', + 'RKBC', 'WKBC', 'RSYS', 'WSYS', 'CNCL', 'MajF', 'MinF'. Consult + lib/galaxy/jobs/metrics/collectl/processes.py for more details + on what each of these resource types means. + + Defaults to 'max_VmSize,avg_VmSize,max_VmRSS,avg_VmRSS,sum_SysT,sum_UsrT,max_PCT avg_PCT,max_AccumT,sum_RSYS,sum_WSYS' + as variety of statistics roughly describing CPU and memory + usage of the program and VERY ROUGHLY describing I/O consumption. + + 'procfilt_on': By default Galaxy will tell collectl to only collect + 'process' level data for the current user (as identified) + by 'username' (default) - this can be disabled by settting this + to 'none' - the plugin will still only aggregate process level + statistics for the jobs process tree - but the additional + information can still be used offline with 'saved_logs_path' + if set. Obsecurely, this can also be set 'uid' to identify + the current user to filter on by UID instead of username - + this may needed on some clusters(?). + + 'interval': The time (in seconds) between data collection points. + Collectl uses a variety of different defaults for different + subsystems if this is not set, but process information (likely + the most pertinent for Galaxy jobs will collect data every + 60 seconds). + + 'flush': Interval (in seconds I think) between when collectl will + flush its buffer to disk. Galaxy overrides this to disable + flushing by default if not set. + + 'local_collectl_path', 'remote_collectl_path', 'collectl_path': + By default, jobs will just assume collectl is on the PATH, but + it can be overridden with 'local_collectl_path' and + 'remote_collectl_path' (or simply 'collectl_path' if it is not + on the path but installed in the same location both locally and + remotely). + + There are more and more increasingly obsecure options including - + log_collectl_program_output, interval2, and interval3. Consult + source code for more details. + --> +</job_metrics> This diff is so big that we needed to truncate the remainder. Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
commits-noreply@bitbucket.org