galaxy-dev
Threads by month
- ----- 2024 -----
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
October 2008
- 4 participants
- 57 discussions
details: http://www.bx.psu.edu/hg/galaxy/rev/6259ebdd0e99
changeset: 1585:6259ebdd0e99
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Thu Oct 30 13:35:19 2008 -0400
description:
Fix typo on call to set_meta.
1 file(s) affected in this change:
lib/galaxy/jobs/__init__.py
diffs (12 lines):
diff -r 95bdb18ee62b -r 6259ebdd0e99 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Wed Oct 29 17:32:09 2008 -0400
+++ b/lib/galaxy/jobs/__init__.py Thu Oct 30 13:35:19 2008 -0400
@@ -423,7 +423,7 @@
dataset.blurb = "error"
elif dataset.has_data():
# Only set metadata values if they are missing...
- dataset.set_meta( overwrite_exisiting = False )
+ dataset.set_meta( overwrite = False )
dataset.set_peek()
else:
dataset.blurb = "empty"
1
0
30 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/4e44d29377e3
changeset: 1586:4e44d29377e3
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Thu Oct 30 14:46:57 2008 -0400
description:
Annotation profiler: Better handling of the case where regions extend beyond known chromosome boundaries.
1 file(s) affected in this change:
tools/annotation_profiler/annotation_profiler_for_interval.py
diffs (55 lines):
diff -r 6259ebdd0e99 -r 4e44d29377e3 tools/annotation_profiler/annotation_profiler_for_interval.py
--- a/tools/annotation_profiler/annotation_profiler_for_interval.py Thu Oct 30 13:35:19 2008 -0400
+++ b/tools/annotation_profiler/annotation_profiler_for_interval.py Thu Oct 30 14:46:57 2008 -0400
@@ -121,14 +121,27 @@
self.table_chromosome_size = {} #dict of dict of table:chrom containing total coverage of table for a chrom
self.table_chromosome_count = {} #dict of dict of table:chrom containing total number of coverage ranges of table for a chrom
self.table_regions_overlaped_count = {} #total number of table regions overlaping user's input intervals (non unique)
- self.interval_table_overlap_count = {} #total number of user input intervals which overlap table
+ self.interval_table_overlap_count = {} #total number of user input intervals which overlap table
+ self.region_size_errors = {} #dictionary of lists of invalid ranges by chromosome
def add_region( self, chrom, start, end ):
- self.total_interval_size += ( end - start )
+ chrom_length = self.chrom_lengths.get( chrom )
+ region_start = min( start, chrom_length )
+ region_end = min( end, chrom_length )
+ region_length = region_end - region_start
+
+ if region_length < 1 or region_start != start or region_end != end:
+ if chrom not in self.region_size_errors:
+ self.region_size_errors[chrom] = []
+ self.region_size_errors[chrom].append( ( start, end ) )
+ if region_length < 1: return
+
+ self.total_interval_size += region_length
self.total_interval_count += 1
if chrom not in self.chromosome_coverage:
- self.chromosome_coverage[chrom] = bx.bitset.BitSet( self.chrom_lengths.get( chrom ) )
- self.chromosome_coverage[chrom].set_range( start, end - start )
- for table_name, coverage, regions in self.coverage_reader.iter_table_coverage_regions_by_region( chrom, start, end ):
+ self.chromosome_coverage[chrom] = bx.bitset.BitSet( chrom_length )
+
+ self.chromosome_coverage[chrom].set_range( region_start, region_length )
+ for table_name, coverage, regions in self.coverage_reader.iter_table_coverage_regions_by_region( chrom, region_start, region_end ):
if table_name not in self.table_coverage:
self.table_coverage[table_name] = 0
self.table_chromosome_size[table_name] = {}
@@ -213,7 +226,17 @@
if keep_empty or total_coverage:
#only output tables that have atleast 1 base covered unless empty are requested
out.write( "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % ( table_name, table_chromosome_size, table_chromosome_count, table_region_coverage, table_region_count, total_interval_count, total_interval_size, total_coverage, table_regions_overlaped_count, interval_region_overlap_count, nr_interval_count, nr_interval_size, nr_coverage, nr_table_regions_overlaped_count, nr_interval_table_overlap_count ) )
- out.close()
+ out.close()
+
+ #report chrom size errors as needed:
+ if table_coverage_summary.region_size_errors:
+ print "Regions provided extended beyond known chromosome lengths, and have been truncated as necessary, for the following intervals:"
+ for chrom, regions in table_coverage_summary.region_size_errors.items():
+ if len( regions ) > 3:
+ extra_region_info = ", ... "
+ else:
+ extra_region_info = ""
+ print "%s has max length of %s, exceeded by %s%s." % ( chrom, chrom_lengths.get( chrom ), ", ".join( map( str, regions[:3] ) ), extra_region_info )
class ChromosomeLengths:
def __init__( self, filename ):
1
0
30 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/0da612f8a78e
changeset: 1587:0da612f8a78e
user: guru
date: Thu Oct 30 16:03:11 2008 -0400
description:
Added new functional tests to all gops tools using BitsetSafeReaderWrapper. These tests check cases where datasets have the chr,start and end values in non-standard columns.
12 file(s) affected in this change:
test-data/2_mod.bed
test-data/gops_complement_out_diffCols.dat
test-data/gops_coverage_out_diffCols.interval
test-data/gops_intersect_diffCols.bed
test-data/gops_intersect_p_diffCols.bed
test-data/gops_merge_diffCols.dat
test-data/gops_subtract_diffCols.dat
tools/new_operations/complement.xml
tools/new_operations/coverage.xml
tools/new_operations/intersect.xml
tools/new_operations/merge.xml
tools/new_operations/subtract.xml
diffs (530 lines):
diff -r 4e44d29377e3 -r 0da612f8a78e test-data/2_mod.bed
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/2_mod.bed Thu Oct 30 16:03:11 2008 -0400
@@ -0,0 +1,69 @@
+#chr name score strand start end
+chr1 NM_005997_cds_0_0_chr1_147962193_r 0 - 147962192 147962580
+chr1 BC007833_cds_0_0_chr1_147984546_f 0 + 147984545 147984630
+chr1 AJ011123_cds_0_0_chr1_148078401_r 0 - 148078400 148078582
+chr1 NM_002796_cds_0_0_chr1_148185137_f 0 + 148185136 148185276
+chr10 AY029205_cds_0_0_chr10_55251624_r 0 - 55251623 55253124
+chr11 AK057832_cds_0_0_chr11_116124408_r 0 - 116124407 116124501
+chr11 NM_000040_cds_1_0_chr11_116206509_f 0 + 116206508 116206563
+chr11 BC005380_cds_0_0_chr11_116211734_r 0 - 116211733 116212337
+chr11 AY358331_cds_0_0_chr11_130745912_f 0 + 130745911 130745993
+chr12 NM_052885_cds_0_0_chr12_38440095_r 0 - 38440094 38440321
+chr12 AY792511_cds_0_0_chr12_38905201_f 0 + 38905200 38905351
+chr13 NM_207440_cds_1_0_chr13_112381695_f 0 + 112381694 112381953
+chr13 NM_032116_cds_0_0_chr13_29680677_r 0 - 29680676 29680875
+chr14 U88895_cds_0_0_chr14_98521865_f 0 + 98521864 98521922
+chr14 NM_022898_cds_0_0_chr14_98710241_r 0 - 98710240 98712285
+chr15 BX537418_cds_0_0_chr15_41486873_r 0 - 41486872 41487060
+chr15 AK223365_cds_0_0_chr15_41673709_f 0 + 41673708 41673857
+chr15 NM_153700_cds_0_0_chr15_41679162_r 0 - 41679161 41679250
+chr15 AK223365_cds_0_0_chr15_41773541_f 0 + 41773540 41773689
+chr16 NM_005332_cds_0_0_chr16_142909_f 0 + 142908 143003
+chr16 BC065198_cds_0_0_chr16_179198_r 0 - 179197 179339
+chr16 AK057165_cds_2_0_chr16_244414_f 0 + 244413 244681
+chr16 AB016929_cds_0_0_chr16_259269_r 0 - 259268 259383
+chr18 NM_001792_cds_0_0_chr18_23786115_r 0 - 23786114 23786321
+chr18 NM_012397_cds_1_0_chr18_59406882_f 0 + 59406881 59407046
+chr18 AB046400_cds_0_0_chr18_59455933_r 0 - 59455932 59456337
+chr18 AY792326_cds_0_0_chr18_59528408_f 0 + 59528407 59528575
+chr19 BC013995_cds_1_0_chr19_59068596_f 0 + 59068595 59069564
+chr19 NM_198481_cds_0_0_chr19_59236027_r 0 - 59236026 59236146
+chr19 NM_004542_cds_0_0_chr19_59297999_f 0 + 59297998 59298008
+chr19 AK128544_cds_3_0_chr19_59318206_r 0 - 59318205 59318718
+chr2 NM_006773_cds_0_0_chr2_118288584_f 0 + 118288583 118288668
+chr2 BC005078_cds_0_0_chr2_118390396_r 0 - 118390395 118390500
+chr2 AY125465_cds_0_0_chr2_220108690_f 0 + 220108689 220109267
+chr2 NM_024536_cds_0_0_chr2_220229610_r 0 - 220229609 220230869
+chr20 NM_181466_cds_0_0_chr20_33330414_r 0 - 33330413 33330423
+chr20 BC085019_cds_1_0_chr20_33485371_f 0 + 33485370 33486123
+chr20 NM_000557_cds_1_0_chr20_33488492_r 0 - 33488491 33489122
+chr20 AF022655_cds_1_0_chr20_33513607_f 0 + 33513606 33513792
+chr21 NM_032910_cds_0_0_chr21_32687403_f 0 + 32687402 32687588
+chr21 NM_018277_cds_3_0_chr21_32869642_r 0 - 32869641 32870022
+chr21 NM_005806_cds_1_0_chr21_33321041_f 0 + 33321040 33322012
+chr21 AK129657_cds_0_0_chr21_33728359_r 0 - 33728358 33728724
+chr22 NM_004147_cds_0_0_chr22_30120224_f 0 + 30120223 30120265
+chr22 BC032941_cds_0_0_chr22_30160420_r 0 - 30160419 30160661
+chr22 NM_001007467_cds_1_0_chr22_30228825_f 0 + 30228824 30228916
+chr22 CR456540_cds_0_0_chr22_30340152_r 0 - 30340151 30340376
+chr5 AF099740_cds_11_0_chr5_131311207_r 0 - 131311206 131311254
+chr5 NM_000588_cds_0_0_chr5_131424299_f 0 + 131424298 131424460
+chr5 BC035813_cds_0_0_chr5_131556602_r 0 - 131556601 131556672
+chr5 BC003096_cds_0_0_chr5_131621327_f 0 + 131621326 131621419
+chr6 NM_007214_cds_0_0_chr6_108299601_r 0 - 108299600 108299744
+chr6 NM_003269_cds_0_0_chr6_108594663_f 0 + 108594662 108594687
+chr6 NM_003795_cds_0_0_chr6_108640046_r 0 - 108640045 108640151
+chr6 NM_145315_cds_0_0_chr6_108722977_f 0 + 108722976 108723115
+chr7 AF467257_cds_1_0_chr7_113660518_f 0 + 113660517 113660685
+chr7 NM_003391_cds_0_0_chr7_116512160_r 0 - 116512159 116512389
+chr7 NM_000492_cds_0_0_chr7_116714100_f 0 + 116714099 116714152
+chr7 AF377960_cds_0_0_chr7_116945542_r 0 - 116945541 116945787
+chr8 NM_000127_cds_0_0_chr8_118881132_r 0 - 118881131 118881317
+chr9 BC051300_cds_0_0_chr9_128764157_f 0 + 128764156 128764189
+chr9 NM_014908_cds_0_0_chr9_128787520_r 0 - 128787519 128789136
+chr9 NM_015354_cds_0_0_chr9_128789553_f 0 + 128789552 128789584
+chr9 AB058751_cds_0_0_chr9_128850517_r 0 - 128850516 128850624
+chrX NM_001167_cds_1_0_chrX_122745048_f 0 + 122745047 122745924
+chrX NM_000425_cds_0_0_chrX_152648965_r 0 - 152648964 152649196
+chrX AF101728_cds_0_0_chrX_152691447_f 0 + 152691446 152691471
+chrX BC052303_cds_0_0_chrX_152694030_r 0 - 152694029 152694263
diff -r 4e44d29377e3 -r 0da612f8a78e test-data/gops_complement_out_diffCols.dat
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/gops_complement_out_diffCols.dat Thu Oct 30 16:03:11 2008 -0400
@@ -0,0 +1,88 @@
+chr7 . . + 0 113660517
+chr7 . . + 113660685 116512159
+chr7 . . + 116512389 116714099
+chr7 . . + 116714152 116945541
+chr7 . . + 116945787 536870912
+chr6 . . + 0 108299600
+chr6 . . + 108299744 108594662
+chr6 . . + 108594687 108640045
+chr6 . . + 108640151 108722976
+chr6 . . + 108723115 536870912
+chr5 . . + 0 131311206
+chr5 . . + 131311254 131424298
+chr5 . . + 131424460 131556601
+chr5 . . + 131556672 131621326
+chr5 . . + 131621419 536870912
+chrX . . + 0 122745047
+chrX . . + 122745924 152648964
+chrX . . + 152649196 152691446
+chrX . . + 152691471 152694029
+chrX . . + 152694263 536870912
+chr2 . . + 0 118288583
+chr2 . . + 118288668 118390395
+chr2 . . + 118390500 220108689
+chr2 . . + 220109267 220229609
+chr2 . . + 220230869 536870912
+chr1 . . + 0 147962192
+chr1 . . + 147962580 147984545
+chr1 . . + 147984630 148078400
+chr1 . . + 148078582 148185136
+chr1 . . + 148185276 536870912
+chr21 . . + 0 32687402
+chr21 . . + 32687588 32869641
+chr21 . . + 32870022 33321040
+chr21 . . + 33322012 33728358
+chr21 . . + 33728724 536870912
+chr9 . . + 0 128764156
+chr9 . . + 128764189 128787519
+chr9 . . + 128789136 128789552
+chr9 . . + 128789584 128850516
+chr9 . . + 128850624 536870912
+chr8 . . + 0 118881131
+chr8 . . + 118881317 536870912
+chr13 . . + 0 29680676
+chr13 . . + 29680875 112381694
+chr13 . . + 112381953 536870912
+chr12 . . + 0 38440094
+chr12 . . + 38440321 38905200
+chr12 . . + 38905351 536870912
+chr11 . . + 0 116124407
+chr11 . . + 116124501 116206508
+chr11 . . + 116206563 116211733
+chr11 . . + 116212337 130745911
+chr11 . . + 130745993 536870912
+chr10 . . + 0 55251623
+chr10 . . + 55253124 536870912
+chr22 . . + 0 30120223
+chr22 . . + 30120265 30160419
+chr22 . . + 30160661 30228824
+chr22 . . + 30228916 30340151
+chr22 . . + 30340376 536870912
+chr16 . . + 0 142908
+chr16 . . + 143003 179197
+chr16 . . + 179339 244413
+chr16 . . + 244681 259268
+chr16 . . + 259383 536870912
+chr15 . . + 0 41486872
+chr15 . . + 41487060 41673708
+chr15 . . + 41673857 41679161
+chr15 . . + 41679250 41773540
+chr15 . . + 41773689 536870912
+chr14 . . + 0 98521864
+chr14 . . + 98521922 98710240
+chr14 . . + 98712285 536870912
+chr20 . . + 0 33330413
+chr20 . . + 33330423 33485370
+chr20 . . + 33486123 33488491
+chr20 . . + 33489122 33513606
+chr20 . . + 33513792 536870912
+chr19 . . + 0 59068595
+chr19 . . + 59069564 59236026
+chr19 . . + 59236146 59297998
+chr19 . . + 59298008 59318205
+chr19 . . + 59318718 536870912
+chr18 . . + 0 23786114
+chr18 . . + 23786321 59406881
+chr18 . . + 59407046 59455932
+chr18 . . + 59456337 59528407
+chr18 . . + 59528575 536870912
diff -r 4e44d29377e3 -r 0da612f8a78e test-data/gops_coverage_out_diffCols.interval
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/gops_coverage_out_diffCols.interval Thu Oct 30 16:03:11 2008 -0400
@@ -0,0 +1,65 @@
+chr1 147962192 147962580 CCDS989.1_cds_0_0_chr1_147962193_r 0 - 388 1.0
+chr1 147984545 147984630 CCDS990.1_cds_0_0_chr1_147984546_f 0 + 85 1.0
+chr1 148078400 148078582 CCDS993.1_cds_0_0_chr1_148078401_r 0 - 182 1.0
+chr1 148185136 148185276 CCDS996.1_cds_0_0_chr1_148185137_f 0 + 140 1.0
+chr10 55251623 55253124 CCDS7248.1_cds_0_0_chr10_55251624_r 0 - 1501 1.0
+chr11 116124407 116124501 CCDS8374.1_cds_0_0_chr11_116124408_r 0 - 94 1.0
+chr11 116206508 116206563 CCDS8377.1_cds_0_0_chr11_116206509_f 0 + 55 1.0
+chr11 116211733 116212337 CCDS8378.1_cds_0_0_chr11_116211734_r 0 - 604 1.0
+chr11 1812377 1812407 CCDS7726.1_cds_0_0_chr11_1812378_f 0 + 0 0.0
+chr12 38440094 38440321 CCDS8736.1_cds_0_0_chr12_38440095_r 0 - 227 1.0
+chr13 112381694 112381953 CCDS9526.1_cds_0_0_chr13_112381695_f 0 + 259 1.0
+chr14 98710240 98712285 CCDS9949.1_cds_0_0_chr14_98710241_r 0 - 2045 1.0
+chr15 41486872 41487060 CCDS10096.1_cds_0_0_chr15_41486873_r 0 - 188 1.0
+chr15 41673708 41673857 CCDS10097.1_cds_0_0_chr15_41673709_f 0 + 149 1.0
+chr15 41679161 41679250 CCDS10098.1_cds_0_0_chr15_41679162_r 0 - 89 1.0
+chr15 41826029 41826196 CCDS10101.1_cds_0_0_chr15_41826030_f 0 + 0 0.0
+chr16 142908 143003 CCDS10397.1_cds_0_0_chr16_142909_f 0 + 95 1.0
+chr16 179963 180135 CCDS10401.1_cds_0_0_chr16_179964_r 0 - 0 0.0
+chr16 244413 244681 CCDS10402.1_cds_0_0_chr16_244414_f 0 + 268 1.0
+chr16 259268 259383 CCDS10403.1_cds_0_0_chr16_259269_r 0 - 115 1.0
+chr18 23786114 23786321 CCDS11891.1_cds_0_0_chr18_23786115_r 0 - 207 1.0
+chr18 59406881 59407046 CCDS11985.1_cds_0_0_chr18_59406882_f 0 + 165 1.0
+chr18 59455932 59456337 CCDS11986.1_cds_0_0_chr18_59455933_r 0 - 405 1.0
+chr18 59600586 59600754 CCDS11988.1_cds_0_0_chr18_59600587_f 0 + 0 0.0
+chr19 59068595 59069564 CCDS12866.1_cds_0_0_chr19_59068596_f 0 + 969 1.0
+chr19 59236026 59236146 CCDS12872.1_cds_0_0_chr19_59236027_r 0 - 120 1.0
+chr19 59297998 59298008 CCDS12877.1_cds_0_0_chr19_59297999_f 0 + 10 1.0
+chr19 59302168 59302288 CCDS12878.1_cds_0_0_chr19_59302169_r 0 - 0 0.0
+chr2 118288583 118288668 CCDS2120.1_cds_0_0_chr2_118288584_f 0 + 85 1.0
+chr2 118394148 118394202 CCDS2121.1_cds_0_0_chr2_118394149_r 0 - 0 0.0
+chr2 220190202 220190242 CCDS2441.1_cds_0_0_chr2_220190203_f 0 + 0 0.0
+chr2 220229609 220230869 CCDS2443.1_cds_0_0_chr2_220229610_r 0 - 1260 1.0
+chr20 33330413 33330423 CCDS13249.1_cds_0_0_chr20_33330414_r 0 - 10 1.0
+chr20 33513606 33513792 CCDS13255.1_cds_0_0_chr20_33513607_f 0 + 186 1.0
+chr20 33579500 33579527 CCDS13256.1_cds_0_0_chr20_33579501_r 0 - 0 0.0
+chr20 33593260 33593348 CCDS13257.1_cds_0_0_chr20_33593261_f 0 + 0 0.0
+chr21 32707032 32707192 CCDS13614.1_cds_0_0_chr21_32707033_f 0 + 0 0.0
+chr21 32869641 32870022 CCDS13615.1_cds_0_0_chr21_32869642_r 0 - 381 1.0
+chr21 33321040 33322012 CCDS13620.1_cds_0_0_chr21_33321041_f 0 + 972 1.0
+chr21 33744994 33745040 CCDS13625.1_cds_0_0_chr21_33744995_r 0 - 0 0.0
+chr22 30120223 30120265 CCDS13897.1_cds_0_0_chr22_30120224_f 0 + 42 1.0
+chr22 30160419 30160661 CCDS13898.1_cds_0_0_chr22_30160420_r 0 - 242 1.0
+chr22 30665273 30665360 CCDS13901.1_cds_0_0_chr22_30665274_f 0 + 0 0.0
+chr22 30939054 30939266 CCDS13903.1_cds_0_0_chr22_30939055_r 0 - 0 0.0
+chr5 131424298 131424460 CCDS4149.1_cds_0_0_chr5_131424299_f 0 + 162 1.0
+chr5 131556601 131556672 CCDS4151.1_cds_0_0_chr5_131556602_r 0 - 71 1.0
+chr5 131621326 131621419 CCDS4152.1_cds_0_0_chr5_131621327_f 0 + 93 1.0
+chr5 131847541 131847666 CCDS4155.1_cds_0_0_chr5_131847542_r 0 - 0 0.0
+chr6 108299600 108299744 CCDS5061.1_cds_0_0_chr6_108299601_r 0 - 144 1.0
+chr6 108594662 108594687 CCDS5063.1_cds_0_0_chr6_108594663_f 0 + 25 1.0
+chr6 108640045 108640151 CCDS5064.1_cds_0_0_chr6_108640046_r 0 - 106 1.0
+chr6 108722976 108723115 CCDS5067.1_cds_0_0_chr6_108722977_f 0 + 139 1.0
+chr7 113660517 113660685 CCDS5760.1_cds_0_0_chr7_113660518_f 0 + 168 1.0
+chr7 116512159 116512389 CCDS5771.1_cds_0_0_chr7_116512160_r 0 - 230 1.0
+chr7 116714099 116714152 CCDS5773.1_cds_0_0_chr7_116714100_f 0 + 53 1.0
+chr7 116945541 116945787 CCDS5774.1_cds_0_0_chr7_116945542_r 0 - 246 1.0
+chr8 118881131 118881317 CCDS6324.1_cds_0_0_chr8_118881132_r 0 - 186 1.0
+chr9 128764156 128764189 CCDS6914.1_cds_0_0_chr9_128764157_f 0 + 33 1.0
+chr9 128787519 128789136 CCDS6915.1_cds_0_0_chr9_128787520_r 0 - 1617 1.0
+chr9 128882427 128882523 CCDS6917.1_cds_0_0_chr9_128882428_f 0 + 0 0.0
+chr9 128937229 128937445 CCDS6919.1_cds_0_0_chr9_128937230_r 0 - 0 0.0
+chrX 122745047 122745924 CCDS14606.1_cds_0_0_chrX_122745048_f 0 + 877 1.0
+chrX 152648964 152649196 CCDS14733.1_cds_0_0_chrX_152648965_r 0 - 232 1.0
+chrX 152691446 152691471 CCDS14735.1_cds_0_0_chrX_152691447_f 0 + 25 1.0
+chrX 152694029 152694263 CCDS14736.1_cds_0_0_chrX_152694030_r 0 - 234 1.0
diff -r 4e44d29377e3 -r 0da612f8a78e test-data/gops_intersect_diffCols.bed
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/gops_intersect_diffCols.bed Thu Oct 30 16:03:11 2008 -0400
@@ -0,0 +1,49 @@
+chr1 147962192 147962580 CCDS989.1_cds_0_0_chr1_147962193_r 0 -
+chr1 147984545 147984630 CCDS990.1_cds_0_0_chr1_147984546_f 0 +
+chr1 148078400 148078582 CCDS993.1_cds_0_0_chr1_148078401_r 0 -
+chr1 148185136 148185276 CCDS996.1_cds_0_0_chr1_148185137_f 0 +
+chr10 55251623 55253124 CCDS7248.1_cds_0_0_chr10_55251624_r 0 -
+chr11 116124407 116124501 CCDS8374.1_cds_0_0_chr11_116124408_r 0 -
+chr11 116206508 116206563 CCDS8377.1_cds_0_0_chr11_116206509_f 0 +
+chr11 116211733 116212337 CCDS8378.1_cds_0_0_chr11_116211734_r 0 -
+chr12 38440094 38440321 CCDS8736.1_cds_0_0_chr12_38440095_r 0 -
+chr13 112381694 112381953 CCDS9526.1_cds_0_0_chr13_112381695_f 0 +
+chr14 98710240 98712285 CCDS9949.1_cds_0_0_chr14_98710241_r 0 -
+chr15 41486872 41487060 CCDS10096.1_cds_0_0_chr15_41486873_r 0 -
+chr15 41673708 41673857 CCDS10097.1_cds_0_0_chr15_41673709_f 0 +
+chr15 41679161 41679250 CCDS10098.1_cds_0_0_chr15_41679162_r 0 -
+chr16 142908 143003 CCDS10397.1_cds_0_0_chr16_142909_f 0 +
+chr16 244413 244681 CCDS10402.1_cds_0_0_chr16_244414_f 0 +
+chr16 259268 259383 CCDS10403.1_cds_0_0_chr16_259269_r 0 -
+chr18 23786114 23786321 CCDS11891.1_cds_0_0_chr18_23786115_r 0 -
+chr18 59406881 59407046 CCDS11985.1_cds_0_0_chr18_59406882_f 0 +
+chr18 59455932 59456337 CCDS11986.1_cds_0_0_chr18_59455933_r 0 -
+chr19 59068595 59069564 CCDS12866.1_cds_0_0_chr19_59068596_f 0 +
+chr19 59236026 59236146 CCDS12872.1_cds_0_0_chr19_59236027_r 0 -
+chr19 59297998 59298008 CCDS12877.1_cds_0_0_chr19_59297999_f 0 +
+chr2 118288583 118288668 CCDS2120.1_cds_0_0_chr2_118288584_f 0 +
+chr2 220229609 220230869 CCDS2443.1_cds_0_0_chr2_220229610_r 0 -
+chr20 33330413 33330423 CCDS13249.1_cds_0_0_chr20_33330414_r 0 -
+chr20 33513606 33513792 CCDS13255.1_cds_0_0_chr20_33513607_f 0 +
+chr21 32869641 32870022 CCDS13615.1_cds_0_0_chr21_32869642_r 0 -
+chr21 33321040 33322012 CCDS13620.1_cds_0_0_chr21_33321041_f 0 +
+chr22 30120223 30120265 CCDS13897.1_cds_0_0_chr22_30120224_f 0 +
+chr22 30160419 30160661 CCDS13898.1_cds_0_0_chr22_30160420_r 0 -
+chr5 131424298 131424460 CCDS4149.1_cds_0_0_chr5_131424299_f 0 +
+chr5 131556601 131556672 CCDS4151.1_cds_0_0_chr5_131556602_r 0 -
+chr5 131621326 131621419 CCDS4152.1_cds_0_0_chr5_131621327_f 0 +
+chr6 108299600 108299744 CCDS5061.1_cds_0_0_chr6_108299601_r 0 -
+chr6 108594662 108594687 CCDS5063.1_cds_0_0_chr6_108594663_f 0 +
+chr6 108640045 108640151 CCDS5064.1_cds_0_0_chr6_108640046_r 0 -
+chr6 108722976 108723115 CCDS5067.1_cds_0_0_chr6_108722977_f 0 +
+chr7 113660517 113660685 CCDS5760.1_cds_0_0_chr7_113660518_f 0 +
+chr7 116512159 116512389 CCDS5771.1_cds_0_0_chr7_116512160_r 0 -
+chr7 116714099 116714152 CCDS5773.1_cds_0_0_chr7_116714100_f 0 +
+chr7 116945541 116945787 CCDS5774.1_cds_0_0_chr7_116945542_r 0 -
+chr8 118881131 118881317 CCDS6324.1_cds_0_0_chr8_118881132_r 0 -
+chr9 128764156 128764189 CCDS6914.1_cds_0_0_chr9_128764157_f 0 +
+chr9 128787519 128789136 CCDS6915.1_cds_0_0_chr9_128787520_r 0 -
+chrX 122745047 122745924 CCDS14606.1_cds_0_0_chrX_122745048_f 0 +
+chrX 152648964 152649196 CCDS14733.1_cds_0_0_chrX_152648965_r 0 -
+chrX 152691446 152691471 CCDS14735.1_cds_0_0_chrX_152691447_f 0 +
+chrX 152694029 152694263 CCDS14736.1_cds_0_0_chrX_152694030_r 0 -
diff -r 4e44d29377e3 -r 0da612f8a78e test-data/gops_intersect_p_diffCols.bed
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/gops_intersect_p_diffCols.bed Thu Oct 30 16:03:11 2008 -0400
@@ -0,0 +1,49 @@
+chr1 147962192 147962580 CCDS989.1_cds_0_0_chr1_147962193_r 0 -
+chr1 147984545 147984630 CCDS990.1_cds_0_0_chr1_147984546_f 0 +
+chr1 148078400 148078582 CCDS993.1_cds_0_0_chr1_148078401_r 0 -
+chr1 148185136 148185276 CCDS996.1_cds_0_0_chr1_148185137_f 0 +
+chr10 55251623 55253124 CCDS7248.1_cds_0_0_chr10_55251624_r 0 -
+chr11 116124407 116124501 CCDS8374.1_cds_0_0_chr11_116124408_r 0 -
+chr11 116206508 116206563 CCDS8377.1_cds_0_0_chr11_116206509_f 0 +
+chr11 116211733 116212337 CCDS8378.1_cds_0_0_chr11_116211734_r 0 -
+chr12 38440094 38440321 CCDS8736.1_cds_0_0_chr12_38440095_r 0 -
+chr13 112381694 112381953 CCDS9526.1_cds_0_0_chr13_112381695_f 0 +
+chr14 98710240 98712285 CCDS9949.1_cds_0_0_chr14_98710241_r 0 -
+chr15 41486872 41487060 CCDS10096.1_cds_0_0_chr15_41486873_r 0 -
+chr15 41673708 41673857 CCDS10097.1_cds_0_0_chr15_41673709_f 0 +
+chr15 41679161 41679250 CCDS10098.1_cds_0_0_chr15_41679162_r 0 -
+chr16 142908 143003 CCDS10397.1_cds_0_0_chr16_142909_f 0 +
+chr16 244413 244681 CCDS10402.1_cds_0_0_chr16_244414_f 0 +
+chr16 259268 259383 CCDS10403.1_cds_0_0_chr16_259269_r 0 -
+chr18 23786114 23786321 CCDS11891.1_cds_0_0_chr18_23786115_r 0 -
+chr18 59406881 59407046 CCDS11985.1_cds_0_0_chr18_59406882_f 0 +
+chr18 59455932 59456337 CCDS11986.1_cds_0_0_chr18_59455933_r 0 -
+chr19 59068595 59069564 CCDS12866.1_cds_0_0_chr19_59068596_f 0 +
+chr19 59236026 59236146 CCDS12872.1_cds_0_0_chr19_59236027_r 0 -
+chr19 59297998 59298008 CCDS12877.1_cds_0_0_chr19_59297999_f 0 +
+chr2 118288583 118288668 CCDS2120.1_cds_0_0_chr2_118288584_f 0 +
+chr2 220229609 220230869 CCDS2443.1_cds_0_0_chr2_220229610_r 0 -
+chr20 33330413 33330423 CCDS13249.1_cds_0_0_chr20_33330414_r 0 -
+chr20 33513606 33513792 CCDS13255.1_cds_0_0_chr20_33513607_f 0 +
+chr21 32869641 32870022 CCDS13615.1_cds_0_0_chr21_32869642_r 0 -
+chr21 33321040 33322012 CCDS13620.1_cds_0_0_chr21_33321041_f 0 +
+chr22 30120223 30120265 CCDS13897.1_cds_0_0_chr22_30120224_f 0 +
+chr22 30160419 30160661 CCDS13898.1_cds_0_0_chr22_30160420_r 0 -
+chr5 131424298 131424460 CCDS4149.1_cds_0_0_chr5_131424299_f 0 +
+chr5 131556601 131556672 CCDS4151.1_cds_0_0_chr5_131556602_r 0 -
+chr5 131621326 131621419 CCDS4152.1_cds_0_0_chr5_131621327_f 0 +
+chr6 108299600 108299744 CCDS5061.1_cds_0_0_chr6_108299601_r 0 -
+chr6 108594662 108594687 CCDS5063.1_cds_0_0_chr6_108594663_f 0 +
+chr6 108640045 108640151 CCDS5064.1_cds_0_0_chr6_108640046_r 0 -
+chr6 108722976 108723115 CCDS5067.1_cds_0_0_chr6_108722977_f 0 +
+chr7 113660517 113660685 CCDS5760.1_cds_0_0_chr7_113660518_f 0 +
+chr7 116512159 116512389 CCDS5771.1_cds_0_0_chr7_116512160_r 0 -
+chr7 116714099 116714152 CCDS5773.1_cds_0_0_chr7_116714100_f 0 +
+chr7 116945541 116945787 CCDS5774.1_cds_0_0_chr7_116945542_r 0 -
+chr8 118881131 118881317 CCDS6324.1_cds_0_0_chr8_118881132_r 0 -
+chr9 128764156 128764189 CCDS6914.1_cds_0_0_chr9_128764157_f 0 +
+chr9 128787519 128789136 CCDS6915.1_cds_0_0_chr9_128787520_r 0 -
+chrX 122745047 122745924 CCDS14606.1_cds_0_0_chrX_122745048_f 0 +
+chrX 152648964 152649196 CCDS14733.1_cds_0_0_chrX_152648965_r 0 -
+chrX 152691446 152691471 CCDS14735.1_cds_0_0_chrX_152691447_f 0 +
+chrX 152694029 152694263 CCDS14736.1_cds_0_0_chrX_152694030_r 0 -
diff -r 4e44d29377e3 -r 0da612f8a78e test-data/gops_merge_diffCols.dat
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/gops_merge_diffCols.dat Thu Oct 30 16:03:11 2008 -0400
@@ -0,0 +1,69 @@
+#chr name score strand start end
+chr7 113660517 113660685
+chr7 116512159 116512389
+chr7 116714099 116714152
+chr7 116945541 116945787
+chr6 108299600 108299744
+chr6 108594662 108594687
+chr6 108640045 108640151
+chr6 108722976 108723115
+chr5 131311206 131311254
+chr5 131424298 131424460
+chr5 131556601 131556672
+chr5 131621326 131621419
+chrX 122745047 122745924
+chrX 152648964 152649196
+chrX 152691446 152691471
+chrX 152694029 152694263
+chr2 118288583 118288668
+chr2 118390395 118390500
+chr2 220108689 220109267
+chr2 220229609 220230869
+chr1 147962192 147962580
+chr1 147984545 147984630
+chr1 148078400 148078582
+chr1 148185136 148185276
+chr21 32687402 32687588
+chr21 32869641 32870022
+chr21 33321040 33322012
+chr21 33728358 33728724
+chr9 128764156 128764189
+chr9 128787519 128789136
+chr9 128789552 128789584
+chr9 128850516 128850624
+chr8 118881131 118881317
+chr13 29680676 29680875
+chr13 112381694 112381953
+chr12 38440094 38440321
+chr12 38905200 38905351
+chr11 116124407 116124501
+chr11 116206508 116206563
+chr11 116211733 116212337
+chr11 130745911 130745993
+chr10 55251623 55253124
+chr22 30120223 30120265
+chr22 30160419 30160661
+chr22 30228824 30228916
+chr22 30340151 30340376
+chr16 142908 143003
+chr16 179197 179339
+chr16 244413 244681
+chr16 259268 259383
+chr15 41486872 41487060
+chr15 41673708 41673857
+chr15 41679161 41679250
+chr15 41773540 41773689
+chr14 98521864 98521922
+chr14 98710240 98712285
+chr20 33330413 33330423
+chr20 33485370 33486123
+chr20 33488491 33489122
+chr20 33513606 33513792
+chr19 59068595 59069564
+chr19 59236026 59236146
+chr19 59297998 59298008
+chr19 59318205 59318718
+chr18 23786114 23786321
+chr18 59406881 59407046
+chr18 59455932 59456337
+chr18 59528407 59528575
diff -r 4e44d29377e3 -r 0da612f8a78e test-data/gops_subtract_diffCols.dat
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/test-data/gops_subtract_diffCols.dat Thu Oct 30 16:03:11 2008 -0400
@@ -0,0 +1,16 @@
+chr11 1812377 1812407 CCDS7726.1_cds_0_0_chr11_1812378_f 0 +
+chr15 41826029 41826196 CCDS10101.1_cds_0_0_chr15_41826030_f 0 +
+chr16 179963 180135 CCDS10401.1_cds_0_0_chr16_179964_r 0 -
+chr18 59600586 59600754 CCDS11988.1_cds_0_0_chr18_59600587_f 0 +
+chr19 59302168 59302288 CCDS12878.1_cds_0_0_chr19_59302169_r 0 -
+chr2 118394148 118394202 CCDS2121.1_cds_0_0_chr2_118394149_r 0 -
+chr2 220190202 220190242 CCDS2441.1_cds_0_0_chr2_220190203_f 0 +
+chr20 33579500 33579527 CCDS13256.1_cds_0_0_chr20_33579501_r 0 -
+chr20 33593260 33593348 CCDS13257.1_cds_0_0_chr20_33593261_f 0 +
+chr21 32707032 32707192 CCDS13614.1_cds_0_0_chr21_32707033_f 0 +
+chr21 33744994 33745040 CCDS13625.1_cds_0_0_chr21_33744995_r 0 -
+chr22 30665273 30665360 CCDS13901.1_cds_0_0_chr22_30665274_f 0 +
+chr22 30939054 30939266 CCDS13903.1_cds_0_0_chr22_30939055_r 0 -
+chr5 131847541 131847666 CCDS4155.1_cds_0_0_chr5_131847542_r 0 -
+chr9 128882427 128882523 CCDS6917.1_cds_0_0_chr9_128882428_f 0 +
+chr9 128937229 128937445 CCDS6919.1_cds_0_0_chr9_128937230_r 0 -
diff -r 4e44d29377e3 -r 0da612f8a78e tools/new_operations/complement.xml
--- a/tools/new_operations/complement.xml Thu Oct 30 14:46:57 2008 -0400
+++ b/tools/new_operations/complement.xml Thu Oct 30 16:03:11 2008 -0400
@@ -17,6 +17,11 @@
<param name="input1" value="1.bed" />
<param name="allchroms" value="true" />
<output name="output" file="gops_complement_out.bed" />
+ </test>
+ <test>
+ <param name="input1" value="2_mod.bed" ftype="interval"/>
+ <param name="allchroms" value="true" />
+ <output name="output" file="gops_complement_out_diffCols.dat" />
</test>
<test>
<param name="input1" value="gops_bigint.interval" />
diff -r 4e44d29377e3 -r 0da612f8a78e tools/new_operations/coverage.xml
--- a/tools/new_operations/coverage.xml Thu Oct 30 14:46:57 2008 -0400
+++ b/tools/new_operations/coverage.xml Thu Oct 30 16:03:11 2008 -0400
@@ -18,6 +18,11 @@
<param name="input1" value="1.bed" />
<param name="input2" value="2.bed" />
<output name="output" file="gops_coverage_out.interval" />
+ </test>
+ <test>
+ <param name="input1" value="1.bed" />
+ <param name="input2" value="2_mod.bed" ftype="interval"/>
+ <output name="output" file="gops_coverage_out_diffCols.interval" />
</test>
<test>
<param name="input1" value="gops_bigint.interval" />
diff -r 4e44d29377e3 -r 0da612f8a78e tools/new_operations/intersect.xml
--- a/tools/new_operations/intersect.xml Thu Oct 30 14:46:57 2008 -0400
+++ b/tools/new_operations/intersect.xml Thu Oct 30 16:03:11 2008 -0400
@@ -21,13 +21,27 @@
<data format="input" name="output" metadata_source="input1" />
</outputs>
<code file="operation_filter.py"/>
- <tests>
+ <tests>
+ <test>
+ <param name="input1" value="1.bed" />
+ <param name="input2" value="2.bed" />
+ <param name="min" value="1" />
+ <param name="returntype" value="" />
+ <output name="output" file="gops_intersect_out.bed" />
+ </test>
<test>
<param name="input1" value="1.bed" />
- <param name="input2" value="2.bed" />
+ <param name="input2" value="2_mod.bed" ftype="interval"/>
<param name="min" value="1" />
- <param name="returntype" value="" />
- <output name="output" file="gops_intersect_out.bed" />
+ <param name="returntype" value="" />
+ <output name="output" file="gops_intersect_diffCols.bed" />
+ </test>
+ <test>
+ <param name="input1" value="1.bed" />
+ <param name="input2" value="2_mod.bed" ftype="interval"/>
+ <param name="min" value="1" />
+ <param name="returntype" value="Overlapping pieces of Intervals" />
+ <output name="output" file="gops_intersect_p_diffCols.bed" />
</test>
<test>
<param name="input1" value="1.bed" />
diff -r 4e44d29377e3 -r 0da612f8a78e tools/new_operations/merge.xml
--- a/tools/new_operations/merge.xml Thu Oct 30 14:46:57 2008 -0400
+++ b/tools/new_operations/merge.xml Thu Oct 30 16:03:11 2008 -0400
@@ -19,6 +19,11 @@
<test>
<param name="input1" value="1.bed" />
<output name="output" file="gops-merge.dat" />
+ <param name="returntype" value="true" />
+ </test>
+ <test>
+ <param name="input1" value="2_mod.bed" ftype="interval"/>
+ <output name="output" file="gops_merge_diffCols.dat" />
<param name="returntype" value="true" />
</test>
<test>
diff -r 4e44d29377e3 -r 0da612f8a78e tools/new_operations/subtract.xml
--- a/tools/new_operations/subtract.xml Thu Oct 30 14:46:57 2008 -0400
+++ b/tools/new_operations/subtract.xml Thu Oct 30 16:03:11 2008 -0400
@@ -31,6 +31,13 @@
<param name="min" value="1" />
<param name="returntype" value="" />
<output name="output" file="gops-subtract.dat" />
+ </test>
+ <test>
+ <param name="input1" value="1.bed" />
+ <param name="input2" value="2_mod.bed" ftype="interval"/>
+ <param name="min" value="1" />
+ <param name="returntype" value="" />
+ <output name="output" file="gops_subtract_diffCols.dat" />
</test>
<test>
<param name="input1" value="gops_subtract_bigint.bed" />
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/ea92290c4e10
changeset: 1588:ea92290c4e10
user: Nate Coraor <nate(a)bx.psu.edu>
date: Thu Oct 30 16:11:44 2008 -0400
description:
Use LD_RUN_PATH when building pbs_python
1 file(s) affected in this change:
scripts/scramble/scripts/pbs_python.py
diffs (13 lines):
diff -r 0da612f8a78e -r ea92290c4e10 scripts/scramble/scripts/pbs_python.py
--- a/scripts/scramble/scripts/pbs_python.py Thu Oct 30 16:03:11 2008 -0400
+++ b/scripts/scramble/scripts/pbs_python.py Thu Oct 30 16:11:44 2008 -0400
@@ -27,6 +27,9 @@
print "scramble(): removing dir:", dir
shutil.rmtree( dir )
+# the build process doesn't set an rpath for libtorque
+os.environ['LD_RUN_PATH'] = os.environ['LIBTORQUE_DIR']
+
print "scramble(): Running pbs_python configure script"
p = subprocess.Popen( args = "sh configure --with-pbsdir=%s" % os.environ['LIBTORQUE_DIR'], shell = True )
r = p.wait()
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/1d113c5386da
changeset: 1589:1d113c5386da
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Thu Oct 30 16:17:46 2008 -0400
description:
Migrate central repo to alchemy 4.
18 file(s) affected in this change:
eggs.ini
lib/galaxy/app.py
lib/galaxy/jobs/__init__.py
lib/galaxy/model/custom_types.py
lib/galaxy/model/mapping.py
lib/galaxy/model/mapping_tests.py
lib/galaxy/model/orm/__init__.py
lib/galaxy/model/orm/ext/__init__.py
lib/galaxy/model/orm/ext/assignmapper.py
lib/galaxy/web/controllers/root.py
lib/galaxy/web/controllers/user.py
lib/galaxy/web/controllers/workflow.py
lib/galaxy/web/framework/__init__.py
lib/galaxy/webapps/reports/controllers/jobs.py
lib/galaxy/webapps/reports/controllers/system.py
lib/galaxy/webapps/reports/controllers/users.py
scripts/cleanup_datasets/cleanup_datasets.py
tools/stats/grouping.py
diffs (623 lines):
diff -r ea92290c4e10 -r 1d113c5386da eggs.ini
--- a/eggs.ini Thu Oct 30 16:11:44 2008 -0400
+++ b/eggs.ini Thu Oct 30 16:17:46 2008 -0400
@@ -40,7 +40,7 @@
PasteScript = 1.3.6
Routes = 1.6.3
simplejson = 1.5
-SQLAlchemy = 0.3.11
+SQLAlchemy = 0.4.7p1
Tempita = 0.1
twill = 0.9
WebError = 0.8a
@@ -85,7 +85,7 @@
PasteScript = http://cheeseshop.python.org/packages/source/P/PasteScript/PasteScript-1.3.…
Routes = http://pypi.python.org/packages/source/R/Routes/Routes-1.6.3.tar.gz
simplejson = http://cheeseshop.python.org/packages/source/s/simplejson/simplejson-1.5.ta…
-SQLAlchemy = http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.3.11.tar.gz
+SQLAlchemy = http://pypi.python.org/packages/source/S/SQLAlchemy/SQLAlchemy-0.4.7p1.tar.…
Tempita = http://pypi.python.org/packages/source/T/Tempita/Tempita-0.1.tar.gz
twill = http://darcs.idyll.org/~t/projects/twill-0.9.tar.gz
WebError = http://pypi.python.org/packages/source/W/WebError/WebError-0.8a.tar.gz
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/app.py
--- a/lib/galaxy/app.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/app.py Thu Oct 30 16:17:46 2008 -0400
@@ -20,7 +20,7 @@
if self.config.database_connection:
db_url = self.config.database_connection
else:
- db_url = "sqlite://%s?isolation_level=IMMEDIATE" % self.config.database
+ db_url = "sqlite:///%s?isolation_level=IMMEDIATE" % self.config.database
# Setup the database engine and ORM
self.model = galaxy.model.mapping.init( self.config.file_path,
db_url,
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/jobs/__init__.py Thu Oct 30 16:17:46 2008 -0400
@@ -97,11 +97,10 @@
model = self.app.model
# Jobs in the NEW state won't be requeued unless we're tracking in the database
if not self.track_jobs_in_database:
- for job in model.Job.select( model.Job.c.state == model.Job.states.NEW ):
+ for job in model.Job.filter( model.Job.c.state==model.Job.states.NEW ).all():
log.debug( "no runner: %s is still in new state, adding to the jobs queue" %job.id )
self.queue.put( ( job.id, job.tool_id ) )
- for job in model.Job.select( (model.Job.c.state == model.Job.states.RUNNING)
- | (model.Job.c.state == model.Job.states.QUEUED) ):
+ for job in model.Job.filter( (model.Job.c.state == model.Job.states.RUNNING) | (model.Job.c.state == model.Job.states.QUEUED) ).all():
if job.job_runner_name is not None:
# why are we passing the queue to the wrapper?
job_wrapper = JobWrapper( job.id, self.app.toolbox.tools_by_id[ job.tool_id ], self )
@@ -136,7 +135,7 @@
new_jobs = []
if self.track_jobs_in_database:
model = self.app.model
- for j in model.Job.select( model.Job.c.state == model.Job.states.NEW ):
+ for j in model.Job.filter( model.Job.c.state==model.Job.states.NEW ).all():
job = JobWrapper( j.id, self.app.toolbox.tools_by_id[ j.tool_id ], self )
new_jobs.append( job )
else:
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/model/custom_types.py
--- a/lib/galaxy/model/custom_types.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/model/custom_types.py Thu Oct 30 16:17:46 2008 -0400
@@ -18,16 +18,15 @@
self.mutable = mutable
super( JSONType, self).__init__()
- def convert_result_value( self, value, dialect ):
+ def process_bind_param( self, value, dialect ):
if value is None:
return None
- buf = self.impl.convert_result_value( value, dialect )
- return self.jsonifyer.loads( str(buf) )
-
- def convert_bind_param( self, value, dialect ):
+ return self.jsonifyer.dumps( value )
+
+ def process_result_value( self, value, dialect ):
if value is None:
return None
- return self.impl.convert_bind_param( self.jsonifyer.dumps(value), dialect )
+ return self.jsonifyer.loads( str( value ) )
def copy_value( self, value ):
if self.mutable:
@@ -60,10 +59,10 @@
self.mutable = mutable
super( MetadataType, self).__init__()
- def convert_result_value( self, value, dialect ):
+ def process_result_value( self, value, dialect ):
if value is None:
return None
- buf = self.impl.convert_result_value( value, dialect )
+ buf = value
ret = None
try:
ret = self.pickler.loads( str(buf) )
@@ -77,7 +76,7 @@
class TrimmedString( TypeDecorator ):
impl = String
- def convert_bind_param( self, value, dialect ):
+ def process_bind_param( self, value, dialect ):
"""Automatically truncate string values"""
if self.impl.length and value is not None:
value = value[0:self.impl.length]
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/model/mapping.py Thu Oct 30 16:17:46 2008 -0400
@@ -5,23 +5,20 @@
import logging
log = logging.getLogger( __name__ )
-import pkg_resources
-pkg_resources.require( "sqlalchemy>=0.3" )
-
import sys
import datetime
-from sqlalchemy.ext.sessioncontext import SessionContext
-from sqlalchemy.ext.assignmapper import assign_mapper
-from sqlalchemy.ext.orderinglist import ordering_list
-
-from sqlalchemy import *
from galaxy.model import *
+from galaxy.model.orm import *
+from galaxy.model.orm.ext.assignmapper import *
from galaxy.model.custom_types import *
from galaxy.util.bunch import Bunch
-metadata = DynamicMetaData( threadlocal=False )
-context = SessionContext( create_session )
+metadata = MetaData()
+context = Session = scoped_session( sessionmaker( autoflush=False, transactional=False ) )
+
+# For backward compatibility with "context.current"
+context.current = Session
dialect_to_egg = {
"sqlite" : "pysqlite>=2",
@@ -120,15 +117,15 @@
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "history_id", Integer, ForeignKey( "history.id" ), index=True ),
Column( "tool_id", String( 255 ) ),
- Column( "tool_version", String, default="1.0.0" ),
+ Column( "tool_version", TEXT, default="1.0.0" ),
Column( "state", String( 64 ) ),
Column( "info", TrimmedString( 255 ) ),
- Column( "command_line", String() ),
+ Column( "command_line", TEXT ),
Column( "param_filename", String( 1024 ) ),
Column( "runner_name", String( 255 ) ),
- Column( "stdout", String() ),
- Column( "stderr", String() ),
- Column( "traceback", String() ),
+ Column( "stdout", TEXT ),
+ Column( "stderr", TEXT ),
+ Column( "traceback", TEXT ),
Column( "session_id", Integer, ForeignKey( "galaxy_session.id" ), index=True, nullable=True ),
Column( "job_runner_name", String( 255 ) ),
Column( "job_runner_external_id", String( 255 ) ) )
@@ -188,7 +185,7 @@
Column( "user_id", Integer, ForeignKey( "galaxy_user.id" ), index=True, nullable=False ),
Column( "latest_workflow_id", Integer,
ForeignKey( "workflow.id", use_alter=True, name='stored_workflow_latest_workflow_id_fk' ), index=True ),
- Column( "name", String ),
+ Column( "name", TEXT ),
Column( "deleted", Boolean, default=False ),
)
@@ -197,7 +194,7 @@
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "stored_workflow_id", Integer, ForeignKey( "stored_workflow.id" ), index=True, nullable=False ),
- Column( "name", String ),
+ Column( "name", TEXT ),
Column( "has_cycles", Boolean ),
Column( "has_errors", Boolean )
)
@@ -208,8 +205,8 @@
Column( "update_time", DateTime, default=now, onupdate=now ),
Column( "workflow_id", Integer, ForeignKey( "workflow.id" ), index=True, nullable=False ),
Column( "type", String(64) ),
- Column( "tool_id", String ),
- Column( "tool_version", String ), # Reserved for future
+ Column( "tool_id", TEXT ),
+ Column( "tool_version", TEXT ), # Reserved for future
Column( "tool_inputs", JSONType ),
Column( "tool_errors", JSONType ),
Column( "position", JSONType ),
@@ -222,8 +219,8 @@
Column( "id", Integer, primary_key=True ),
Column( "output_step_id", Integer, ForeignKey( "workflow_step.id" ), index=True ),
Column( "input_step_id", Integer, ForeignKey( "workflow_step.id" ), index=True ),
- Column( "output_name", String ),
- Column( "input_name", String)
+ Column( "output_name", TEXT ),
+ Column( "input_name", TEXT)
)
StoredWorkflowUserShareAssociation.table = Table( "stored_workflow_user_share_connection", metadata,
@@ -240,7 +237,7 @@
MetadataFile.table = Table( "metadata_file", metadata,
Column( "id", Integer, primary_key=True ),
- Column( "name", String ),
+ Column( "name", TEXT ),
Column( "hda_id", Integer, ForeignKey( "history_dataset_association.id" ), index=True, nullable=True ),
Column( "create_time", DateTime, default=now ),
Column( "update_time", DateTime, index=True, default=now, onupdate=now ),
@@ -257,9 +254,7 @@
dataset=relation(
Dataset,
primaryjoin=( Dataset.table.c.id == HistoryDatasetAssociation.table.c.dataset_id ), lazy=False ),
- history=relation(
- History,
- primaryjoin=( History.table.c.id == HistoryDatasetAssociation.table.c.history_id ) ),
+ # .history defined in History mapper
copied_to_history_dataset_associations=relation(
HistoryDatasetAssociation,
primaryjoin=( HistoryDatasetAssociation.table.c.copied_from_history_dataset_association_id == HistoryDatasetAssociation.table.c.id ),
@@ -380,11 +375,12 @@
Override __next_hid to generate from the database in a concurrency
safe way.
"""
- conn = self.table.engine.contextual_connect()
+ conn = object_session( self ).connection()
+ table = self.table
trans = conn.begin()
try:
- next_hid = select( [self.c.hid_counter], self.c.id == self.id, for_update=True ).scalar()
- self.table.update( self.c.id == self.id ).execute( hid_counter = ( next_hid + 1 ) )
+ next_hid = select( [table.c.hid_counter], table.c.id == self.id, for_update=True ).scalar()
+ table.update( table.c.id == self.id ).execute( hid_counter = ( next_hid + 1 ) )
trans.commit()
return next_hid
except:
@@ -413,17 +409,21 @@
# Create the database engine
engine = create_engine( url, **engine_options )
# Connect the metadata to the database.
- metadata.connect( engine )
- ## metadata.engine.echo = True
+ metadata.bind = engine
+ # Clear any existing contextual sessions and reconfigure
+ Session.remove()
+ Session.configure( bind=engine )
# Create tables if needed
if create_tables:
metadata.create_all()
# metadata.engine.commit()
# Pack everything into a bunch
result = Bunch( **globals() )
- result.engine = metadata.engine
- result.flush = lambda *args, **kwargs: context.current.flush( *args, **kwargs )
- result.context = context
+ result.engine = engine
+ result.flush = lambda *args, **kwargs: Session.flush( *args, **kwargs )
+ result.session = Session
+ # For backward compatibility with "model.context.current"
+ result.context = Session
result.create_tables = create_tables
return result
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/model/mapping_tests.py
--- a/lib/galaxy/model/mapping_tests.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/model/mapping_tests.py Thu Oct 30 16:17:46 2008 -0400
@@ -22,13 +22,13 @@
model.context.current.flush()
model.context.current.clear()
# Check
- users = model.User.select()
+ users = model.User.query().all()
assert len( users ) == 1
assert users[0].email == "james(a)foo.bar.baz"
assert users[0].password == "password"
assert len( users[0].histories ) == 1
assert users[0].histories[0].name == "History 1"
- hists = model.History.select()
+ hists = model.History.query().all()
assert hists[0].name == "History 1"
assert hists[1].name == ( "H" * 255 )
assert hists[0].user == users[0]
@@ -40,7 +40,7 @@
hists[1].name = "History 2b"
model.context.current.flush()
model.context.current.clear()
- hists = model.History.select()
+ hists = model.History.query().all()
assert hists[0].name == "History 1"
assert hists[1].name == "History 2b"
# gvk TODO need to ad test for GalaxySessions, but not yet sure what they should look like.
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/model/orm/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/orm/__init__.py Thu Oct 30 16:17:46 2008 -0400
@@ -0,0 +1,7 @@
+import pkg_resources
+pkg_resources.require( "SQLAlchemy >= 0.4" )
+
+from sqlalchemy import *
+from sqlalchemy.orm import *
+
+from sqlalchemy.ext.orderinglist import ordering_list
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/model/orm/ext/__init__.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/orm/ext/__init__.py Thu Oct 30 16:17:46 2008 -0400
@@ -0,0 +1,3 @@
+"""
+Galaxy specific SQLAlchemy extensions.
+"""
\ No newline at end of file
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/model/orm/ext/assignmapper.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/lib/galaxy/model/orm/ext/assignmapper.py Thu Oct 30 16:17:46 2008 -0400
@@ -0,0 +1,62 @@
+"""
+This is similar to the assignmapper extensions in SQLAclhemy 0.3 and 0.4 but
+with some compatibility fixes. It assumes that the session is a ScopedSession,
+and thus has the "mapper" method to attach contextual mappers to a class. It
+adds additional query and session methods to the class to support the
+SQLAlchemy 0.3 style of access. The following methods which would normally be
+accessed through "Object.query().method()" are available directly through the
+object:
+
+ 'get', 'filter', 'filter_by', 'select', 'select_by',
+ 'selectfirst', 'selectfirst_by', 'selectone', 'selectone_by',
+ 'get_by', 'join_to', 'join_via', 'count', 'count_by',
+ 'options', 'instances'
+
+Additionally, the following Session methods, which normally accept an instance
+or list of instances, are available directly through the objects, e.g.
+"Session.flush( [instance] )" can be performed as "instance.flush()":
+
+ 'refresh', 'expire', 'delete', 'expunge', 'update'
+"""
+
+__all__ = [ 'assign_mapper' ]
+
+from sqlalchemy import util, exceptions
+import types
+from sqlalchemy.orm import mapper, Query
+
+def _monkeypatch_query_method( name, session, class_ ):
+ def do(self, *args, **kwargs):
+ ## util.warn_deprecated('Query methods on the class are deprecated; use %s.query.%s instead' % (class_.__name__, name))
+ return getattr( class_.query, name)(*args, **kwargs)
+ try:
+ do.__name__ = name
+ except:
+ pass
+ if not hasattr(class_, name):
+ setattr(class_, name, classmethod(do))
+
+def _monkeypatch_session_method(name, session, class_, make_list=False):
+ def do(self, *args, **kwargs):
+ if make_list:
+ self = [ self ]
+ return getattr(session, name)( self, *args, **kwargs )
+ try:
+ do.__name__ = name
+ except:
+ pass
+ if not hasattr(class_, name):
+ setattr(class_, name, do)
+
+def assign_mapper( session, class_, *args, **kwargs ):
+ m = class_.mapper = session.mapper( class_, *args, **kwargs )
+ for name in ('get', 'filter', 'filter_by', 'select', 'select_by',
+ 'selectfirst', 'selectfirst_by', 'selectone', 'selectone_by',
+ 'get_by', 'join_to', 'join_via', 'count', 'count_by',
+ 'options', 'instances'):
+ _monkeypatch_query_method(name, session, class_)
+ for name in ('refresh', 'expire', 'delete', 'expunge', 'update'):
+ _monkeypatch_session_method(name, session, class_)
+ for name in ( 'flush', ):
+ _monkeypatch_session_method( name, session, class_, make_list=True )
+ return m
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/web/controllers/root.py
--- a/lib/galaxy/web/controllers/root.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/web/controllers/root.py Thu Oct 30 16:17:46 2008 -0400
@@ -429,7 +429,7 @@
if not email:
return trans.fill_template("/history/share.mako", histories=histories, email=email, send_to_err=send_to_err)
user = trans.get_user()
- send_to_user = trans.app.model.User.get_by( email = email )
+ send_to_user = trans.app.model.User.filter_by( email=email ).first()
if not send_to_user:
send_to_err = "No such user"
elif user.email == email:
@@ -488,7 +488,7 @@
new_history.user_id = user.id
galaxy_session = trans.get_galaxy_session()
try:
- association = trans.app.model.GalaxySessionToHistoryAssociation.selectone_by( session_id=galaxy_session.id, history_id=new_history.id )
+ association = trans.app.model.GalaxySessionToHistoryAssociation.filter_by( session_id=galaxy_session.id, history_id=new_history.id ).first()
except:
association = None
new_history.add_galaxy_session( galaxy_session, association=association )
@@ -505,7 +505,7 @@
new_history.user_id = None
galaxy_session = trans.get_galaxy_session()
try:
- association = trans.app.model.GalaxySessionToHistoryAssociation.selectone_by( session_id=galaxy_session.id, history_id=new_history.id )
+ association = trans.app.model.GalaxySessionToHistoryAssociation.filter_by( session_id=galaxy_session.id, history_id=new_history.id ).first()
except:
association = None
new_history.add_galaxy_session( galaxy_session, association=association )
@@ -530,7 +530,7 @@
if new_history:
galaxy_session = trans.get_galaxy_session()
try:
- association = trans.app.model.GalaxySessionToHistoryAssociation.selectone_by( session_id=galaxy_session.id, history_id=new_history.id )
+ association = trans.app.model.GalaxySessionToHistoryAssociation.filter_by( session_id=galaxy_session.id, history_id=new_history.id ).first()
except:
association = None
new_history.add_galaxy_session( galaxy_session, association=association )
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/web/controllers/user.py
--- a/lib/galaxy/web/controllers/user.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/web/controllers/user.py Thu Oct 30 16:17:46 2008 -0400
@@ -53,7 +53,7 @@
email_err = "Please enter a real email address"
elif len( email) > 255:
email_err = "Email address exceeds maximum allowable length"
- elif len( trans.app.model.User.select_by( email=email ) ) > 0:
+ elif trans.app.model.User.filter_by( email=email ).first():
email_err = "User with that email already exists"
elif email != conf_email:
conf_email_err = "Email addresses do not match."
@@ -73,7 +73,7 @@
email_error = password_error = None
# Attempt login
if email or password:
- user = trans.app.model.User.get_by( email = email )
+ user = trans.app.model.User.filter_by( email=email ).first()
if not user:
email_error = "No such user"
elif user.external:
@@ -108,7 +108,7 @@
email_error = "Please enter a real email address"
elif len( email) > 255:
email_error = "Email address exceeds maximum allowable length"
- elif len( trans.app.model.User.select_by( email=email ) ) > 0:
+ elif trans.app.model.User.filter_by( email=email ).first():
email_error = "User with that email already exists"
elif len( password ) < 6:
password_error = "Please use a password of at least 6 characters"
@@ -143,7 +143,7 @@
@web.expose
def reset_password(self, trans, email=None, **kwd):
error = ''
- reset_user = trans.app.model.User.get_by( email = email )
+ reset_user = trans.app.model.User.filter_by( email=email ).first()
user = trans.get_user()
if reset_user:
if user and user.id != reset_user.id:
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/web/controllers/workflow.py
--- a/lib/galaxy/web/controllers/workflow.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/web/controllers/workflow.py Thu Oct 30 16:17:46 2008 -0400
@@ -44,7 +44,7 @@
# Load workflow from database
stored = get_stored_workflow( trans, id )
if email:
- other = model.User.get_by( email=email )
+ other = model.User.filter_by( email=email ).first()
if not other:
mtype = "error"
msg = ( "User '%s' does not exist" % email )
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/web/framework/__init__.py Thu Oct 30 16:17:46 2008 -0400
@@ -24,7 +24,7 @@
import mako.template
import mako.lookup
-pkg_resources.require( "sqlalchemy>=0.3" )
+pkg_resources.require( "SQLAlchemy >= 0.4" )
from sqlalchemy import desc
import logging
@@ -172,7 +172,7 @@
if secure_id:
session_key = self.security.decode_session_key( secure_id )
try:
- galaxy_session = self.app.model.GalaxySession.selectone_by( session_key=session_key )
+ galaxy_session = self.app.model.GalaxySession.filter_by( session_key=session_key ).first()
if galaxy_session and galaxy_session.is_valid and galaxy_session.current_history_id:
history = self.app.model.History.get( galaxy_session.current_history_id )
if history and not history.deleted:
@@ -216,7 +216,7 @@
galaxy_session.user_id = self.user.id
try:
# See if we have already associated the history with the session
- association = self.app.model.GalaxySessionToHistoryAssociation.select_by( session_id=galaxy_session.id, history_id=history.id )[0]
+ association = self.app.model.GalaxySessionToHistoryAssociation.filter_by( session_id=galaxy_session.id, history_id=history.id ).first()
except:
association = None
history.add_galaxy_session( galaxy_session, association=association )
@@ -265,7 +265,7 @@
"""Return the user in $HTTP_REMOTE_USER and create if necessary"""
# remote_user middleware ensures HTTP_REMOTE_USER exists
try:
- user = self.app.model.User.selectone_by( email=self.environ[ 'HTTP_REMOTE_USER' ] )
+ user = self.app.model.User.filter_by( email=self.environ[ 'HTTP_REMOTE_USER' ] ).first()
except:
user = self.app.model.User( email=self.environ[ 'HTTP_REMOTE_USER' ] )
user.set_password_cleartext( 'external' )
@@ -281,7 +281,7 @@
if secure_id:
session_key = self.security.decode_session_key( secure_id )
try:
- galaxy_session = self.app.model.GalaxySession.selectone_by( session_key=session_key )
+ galaxy_session = self.app.model.GalaxySession.filter_by( session_key=session_key ).first()
if galaxy_session and galaxy_session.is_valid and galaxy_session.user_id:
user = self.app.model.User.get( galaxy_session.user_id )
if user:
@@ -321,7 +321,7 @@
session_key = self.security.decode_session_key( secure_id )
try:
# Retrive the galaxy_session id via the unique session_key
- galaxy_session = self.app.model.GalaxySession.selectone_by( session_key=session_key )
+ galaxy_session = self.app.model.GalaxySession.filter_by( session_key=session_key ).first()
if galaxy_session and galaxy_session.is_valid:
self.__galaxy_session = galaxy_session
except:
@@ -382,7 +382,7 @@
if self.history is not None:
# See if we have already associated the session with the history
try:
- association = self.app.model.GalaxySessionToHistoryAssociation.select_by( session_id=galaxy_session.id, history_id=self.history.id )[0]
+ association = self.app.model.GalaxySessionToHistoryAssociation.filter_by( session_id=galaxy_session.id, history_id=self.history.id ).first()
except:
association = None
galaxy_session.add_history( self.history, association=association )
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/webapps/reports/controllers/jobs.py
--- a/lib/galaxy/webapps/reports/controllers/jobs.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/webapps/reports/controllers/jobs.py Thu Oct 30 16:17:46 2008 -0400
@@ -5,7 +5,7 @@
from galaxy.webapps.reports.base.controller import *
import galaxy.model
import pkg_resources
-pkg_resources.require( "sqlalchemy>=0.3" )
+pkg_resources.require( "SQLAlchemy >= 0.4" )
import sqlalchemy as sa
import logging
log = logging.getLogger( __name__ )
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/webapps/reports/controllers/system.py
--- a/lib/galaxy/webapps/reports/controllers/system.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/webapps/reports/controllers/system.py Thu Oct 30 16:17:46 2008 -0400
@@ -2,8 +2,9 @@
from datetime import datetime, timedelta
from galaxy.webapps.reports.base.controller import *
import pkg_resources
-pkg_resources.require( "sqlalchemy>=0.3" )
-from sqlalchemy import eagerload, desc
+pkg_resources.require( "SQLAlchemy >= 0.4" )
+from sqlalchemy.orm import eagerload
+from sqlalchemy import desc
import logging
log = logging.getLogger( __name__ )
diff -r ea92290c4e10 -r 1d113c5386da lib/galaxy/webapps/reports/controllers/users.py
--- a/lib/galaxy/webapps/reports/controllers/users.py Thu Oct 30 16:11:44 2008 -0400
+++ b/lib/galaxy/webapps/reports/controllers/users.py Thu Oct 30 16:17:46 2008 -0400
@@ -3,7 +3,7 @@
from galaxy.webapps.reports.base.controller import *
import galaxy.model
import pkg_resources
-pkg_resources.require( "sqlalchemy>=0.3" )
+pkg_resources.require( "SQLAlchemy >= 0.4" )
import sqlalchemy as sa
import logging
log = logging.getLogger( __name__ )
diff -r ea92290c4e10 -r 1d113c5386da scripts/cleanup_datasets/cleanup_datasets.py
--- a/scripts/cleanup_datasets/cleanup_datasets.py Thu Oct 30 16:11:44 2008 -0400
+++ b/scripts/cleanup_datasets/cleanup_datasets.py Thu Oct 30 16:17:46 2008 -0400
@@ -13,8 +13,8 @@
import galaxy.model.mapping
import pkg_resources
-pkg_resources.require( "sqlalchemy>=0.3" )
-from sqlalchemy import eagerload
+pkg_resources.require( "SQLAlchemy >= 0.4" )
+from sqlalchemy.orm import eagerload
assert sys.version_info[:2] >= ( 2, 4 )
@@ -191,8 +191,6 @@
if errmsg:
errors = True
print errmsg
- else:
- print "%s" % dataset.file_name
else:
dataset.purged = True
dataset.flush()
@@ -258,7 +256,6 @@
print errmsg
else:
dataset_count += 1
- print "%s" % dataset.file_name
else:
dataset.purged = True
dataset.file_size = 0
@@ -302,6 +299,7 @@
else:
# Remove dataset file from disk
os.unlink( dataset.file_name )
+ print "%s" % dataset.file_name
# Mark all associated MetadataFiles as deleted and purged and remove them from disk
print "The following metadata files associated with dataset '%s' have been purged" % dataset.file_name
for hda in dataset.history_associations:
diff -r ea92290c4e10 -r 1d113c5386da tools/stats/grouping.py
--- a/tools/stats/grouping.py Thu Oct 30 16:11:44 2008 -0400
+++ b/tools/stats/grouping.py Thu Oct 30 16:17:46 2008 -0400
@@ -90,7 +90,7 @@
for ii, line in enumerate( file( tmpfile.name )):
if line and not line.startswith( '#' ):
- line = line.strip()
+ line = line.rstrip( '\r\n' )
try:
fields = line.split("\t")
item = fields[group_col]
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/95bdb18ee62b
changeset: 1584:95bdb18ee62b
user: Nate Coraor <nate(a)bx.psu.edu>
date: Wed Oct 29 17:32:09 2008 -0400
description:
Update bx-python to r456.
1 file(s) affected in this change:
eggs.ini
diffs (18 lines):
diff -r bfb4189733db -r 95bdb18ee62b eggs.ini
--- a/eggs.ini Wed Oct 29 17:20:54 2008 -0400
+++ b/eggs.ini Wed Oct 29 17:32:09 2008 -0400
@@ -55,12 +55,12 @@
MySQL_python = _5.0.51a_static
python_lzo = _static
flup = .dev_r2311
-bx_python = _dev_r449
+bx_python = _dev_r456
nose = .dev_r101
; source location, necessary for scrambling
[source]
-bx_python = http://dist.g2.bx.psu.edu/bx-python_dist-r449.tar.bz2
+bx_python = http://dist.g2.bx.psu.edu/bx-python_dist-r456.tar.bz2
Cheetah = http://umn.dl.sourceforge.net/sourceforge/cheetahtemplate/Cheetah-1.0.tar.gz
DRMAA_python = http://gridengine.sunsource.net/files/documents/7/36/DRMAA-python-0.2.tar.gz
MySQL_python = http://superb-west.dl.sourceforge.net/sourceforge/mysql-python/MySQL-python… http://mysql.mirrors.pair.com/Downloads/MySQL-5.0/mysql-5.0.51a.tar.gz
1
0
details: http://www.bx.psu.edu/hg/galaxy/rev/bfb4189733db
changeset: 1583:bfb4189733db
user: Nate Coraor <nate(a)bx.psu.edu>
date: Wed Oct 29 17:20:54 2008 -0400
description:
Fix pbs_python to just use existing torque.
3 file(s) affected in this change:
eggs.ini
scripts/scramble/scripts/pbs_python-macosx.py
scripts/scramble/scripts/pbs_python.py
diffs (290 lines):
diff -r c9f19b8b21ba -r bfb4189733db eggs.ini
--- a/eggs.ini Wed Oct 29 12:35:50 2008 -0400
+++ b/eggs.ini Wed Oct 29 17:20:54 2008 -0400
@@ -52,7 +52,6 @@
[tags]
psycopg2 = _8.2.6_static
pysqlite = _3.5.4_static
-pbs_python = _2.1.8
MySQL_python = _5.0.51a_static
python_lzo = _static
flup = .dev_r2311
@@ -65,7 +64,7 @@
Cheetah = http://umn.dl.sourceforge.net/sourceforge/cheetahtemplate/Cheetah-1.0.tar.gz
DRMAA_python = http://gridengine.sunsource.net/files/documents/7/36/DRMAA-python-0.2.tar.gz
MySQL_python = http://superb-west.dl.sourceforge.net/sourceforge/mysql-python/MySQL-python… http://mysql.mirrors.pair.com/Downloads/MySQL-5.0/mysql-5.0.51a.tar.gz
-pbs_python = http://ftp.sara.nl/pub/outgoing/pbs_python-2.9.4.tar.gz http://www.clusterresources.com/downloads/torque/torque-%s.tar.gz
+pbs_python = http://ftp.sara.nl/pub/outgoing/pbs_python-2.9.4.tar.gz
psycopg2 = http://initd.org/pub/software/psycopg/PSYCOPG-2-0/psycopg2-2.0.6.tar.gz http://ftp8.us.postgresql.org/postgresql/source/v8.2.6/postgresql-8.2.6.tar…
pycrypto = http://www.amk.ca/files/python/crypto/pycrypto-2.0.1.tar.gz
pysqlite = http://initd.org/pub/software/pysqlite/releases/2.3/2.3.5/pysqlite-2.3.5.ta… http://www.sqlite.org/sqlite-source-3_5_4.zip
diff -r c9f19b8b21ba -r bfb4189733db scripts/scramble/scripts/pbs_python-macosx.py
--- a/scripts/scramble/scripts/pbs_python-macosx.py Wed Oct 29 12:35:50 2008 -0400
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,138 +0,0 @@
-import os, sys, subprocess, tarfile, shutil
-
-def unpack_prebuilt_torque():
- if not os.access( TORQUE_BINARY_ARCHIVE, os.F_OK ):
- print "unpack_prebuilt_torque(): No binary archive of Torque available for this platform - will build it now"
- build_torque()
- else:
- print "unpack_prebuilt_torque(): Found a previously built Torque binary archive for this platform."
- print "unpack_prebuilt_torque(): To force Torque to be rebuilt, remove the archive:"
- print " ", TORQUE_BINARY_ARCHIVE
- t = tarfile.open( TORQUE_BINARY_ARCHIVE, "r" )
- for fn in t.getnames():
- t.extract( fn )
- t.close()
-
-def build_torque():
- # untar
- print "build_torque(): Unpacking Torque source archive from:"
- print " ", TORQUE_ARCHIVE
- t = tarfile.open( TORQUE_ARCHIVE, "r" )
- for fn in t.getnames():
- t.extract( fn )
- t.close()
- # patch
- file = os.path.join( "torque-%s" %TORQUE_VERSION, "src", "include", "libpbs.h" )
- print "build_torque(): Patching", file
- if not os.access( "%s.orig" %file, os.F_OK ):
- shutil.copyfile( file, "%s.orig" %file )
- i = open( "%s.orig" %file, "r" )
- o = open( file, "w" )
- for line in i.readlines():
- if line == "#define NCONNECTS 5\n":
- line = "#define NCONNECTS 50\n"
- print >>o, line,
- i.close()
- o.close()
- # configure
- print "build_torque(): Running Torque configure script"
- p = subprocess.Popen( args = CONFIGURE, shell = True, cwd = os.path.join( os.getcwd(), "torque-%s" %TORQUE_VERSION) )
- r = p.wait()
- if r != 0:
- print "build_torque(): Torque configure script failed"
- sys.exit( 1 )
- # compile
- print "build_torque(): Building Torque (make)"
- p = subprocess.Popen( args = "make", shell = True, cwd = os.path.join( os.getcwd(), "torque-%s" %TORQUE_VERSION) )
- r = p.wait()
- # libtool won't pass -arch to the linker, maybe it's an old libtool? whatever, this works
- p = subprocess.Popen( args = "gcc -dynamiclib -undefined dynamic_lookup -o .libs/libtorque.0.0.0.dylib .libs/dis.o .libs/discui_.o .libs/discul_.o .libs/disi10d_.o .libs/disi10l_.o .libs/disiui_.o .libs/disp10d_.o .libs/disp10l_.o .libs/disrcs.o .libs/disrd.o .libs/disrf.o .libs/disrfcs.o .libs/disrfst.o .libs/disrl_.o .libs/disrl.o .libs/disrsc.o .libs/disrsi_.o .libs/disrsi.o .libs/disrsl_.o .libs/disrsl.o .libs/disrss.o .libs/disrst.o .libs/disruc.o .libs/disrui.o .libs/disrul.o .libs/disrus.o .libs/diswcs.o .libs/diswf.o .libs/diswl_.o .libs/diswsi.o .libs/diswsl.o .libs/diswui_.o .libs/diswui.o .libs/diswul.o .libs/advise.o .libs/dec_attrl.o .libs/dec_attropl.o .libs/dec_Authen.o .libs/dec_CpyFil.o .libs/dec_JobCred.o .libs/dec_JobFile.o .libs/dec_JobId.o .libs/dec_JobObit.o .libs/dec_Manage.o .libs/dec_MoveJob.o .libs/dec_MsgJob.o .libs/dec_QueueJob.o .libs/dec_Reg.o .libs/dec_ReqExt.o .libs/dec_ReqHdr.o .libs/dec_Resc.o .libs/dec_rpyc.o .libs/dec_rpys.o .libs/dec
_RunJob.o .libs/dec_Shut.o .libs/dec_Sig.o .libs/dec_Status.o .libs/dec_svrattrl.o .libs/dec_Track.o .libs/enc_attrl.o .libs/enc_attropl.o .libs/enc_CpyFil.o .libs/enc_JobCred.o .libs/enc_JobFile.o .libs/enc_JobId.o .libs/enc_JobObit.o .libs/enc_Manage.o .libs/enc_MoveJob.o .libs/enc_MsgJob.o .libs/enc_QueueJob.o .libs/enc_Reg.o .libs/enc_reply.o .libs/enc_ReqExt.o .libs/enc_ReqHdr.o .libs/enc_RunJob.o .libs/enc_Shut.o .libs/enc_Sig.o .libs/enc_Status.o .libs/enc_svrattrl.o .libs/enc_Track.o .libs/get_svrport.o .libs/nonblock.o .libs/PBS_attr.o .libs/pbsD_alterjo.o .libs/pbsD_asyrun.o .libs/PBS_data.o .libs/pbsD_connect.o .libs/pbsD_deljob.o .libs/pbsD_holdjob.o .libs/pbsD_locjob.o .libs/PBSD_manage2.o .libs/pbsD_manager.o .libs/pbsD_movejob.o .libs/PBSD_manager_caps.o .libs/PBSD_msg2.o .libs/pbsD_msgjob.o .libs/pbsD_orderjo.o .libs/PBSD_rdrpy.o .libs/pbsD_rerunjo.o .libs/pbsD_resc.o .libs/pbsD_rlsjob.o .libs/pbsD_runjob.o .libs/pbsD_selectj.o .libs/PBSD_sig2.o .libs/pbsD_si
gjob.o .libs/pbsD_stagein.o .libs/pbsD_statjob.o .libs/pbsD_statnode.o .libs/pbsD_statque.o .libs/pbsD_statsrv.o .libs/PBSD_status2.o .libs/PBSD_status.o .libs/pbsD_submit.o .libs/PBSD_submit_caps.o .libs/pbsD_termin.o .libs/pbs_geterrmg.o .libs/pbs_statfree.o .libs/rpp.o .libs/tcp_dis.o .libs/tm.o .libs/list_link.o .libs/ck_job_name.o .libs/cnt2server.o .libs/cvtdate.o .libs/get_server.o .libs/locate_job.o .libs/parse_at.o .libs/parse_depend.o .libs/parse_destid.o .libs/parse_equal.o .libs/parse_jobid.o .libs/parse_stage.o .libs/prepare_path.o .libs/prt_job_err.o .libs/set_attr.o .libs/set_resource.o .libs/chk_file_sec.o .libs/log_event.o .libs/pbs_log.o .libs/pbs_messages.o .libs/setup_env.o .libs/get_hostaddr.o .libs/get_hostname.o .libs/md5.o .libs/net_client.o .libs/net_server.o .libs/net_set_clse.o .libs/rm.o .libs/port_forwarding.o -lkvm -Wl,-syslibroot -Wl,/Developer/SDKs/MacOSX10.4u.sdk -install_name /usr/local/lib/libtorque.0.dylib -compatibility_version 1 -curre
nt_version 1.0 -arch i386 -arch ppc", shell = True, cwd = os.getcwd()+"/torque-%s/src/lib/Libpbs" %TORQUE_VERSION )
- r = p.wait()
- p = subprocess.Popen( args = "make", shell = True, cwd = os.path.join( os.getcwd(), "torque-%s" %TORQUE_VERSION) )
- r = p.wait()
- if r != 0:
- print "build_torque(): Building Torque (make) failed"
- sys.exit( 1 )
- # install
- print "build_torque(): Installing Torque (make install_lib)"
- p = subprocess.Popen( args = "make DESTDIR=%s/torque install_lib" %os.getcwd(), shell = True, cwd = os.path.join( os.getcwd(), "torque-%s" %TORQUE_VERSION) )
- r = p.wait()
- if r != 0:
- print "build_torque(): Installing Torque (make install_lib) failed"
- sys.exit( 1 )
- # pack
- print "build_torque(): Creating binary Torque archive for future builds of pbs_python"
- t = tarfile.open( TORQUE_BINARY_ARCHIVE, "w:bz2" )
- t.add( "torque" )
- t.close()
-
-# change back to the build dir
-if os.path.dirname( sys.argv[0] ) != "":
- os.chdir( os.path.dirname( sys.argv[0] ) )
-
-# find setuptools
-scramble_lib = os.path.join( "..", "..", "..", "lib" )
-sys.path.append( scramble_lib )
-try:
- from setuptools import *
- import pkg_resources
-except:
- from ez_setup import use_setuptools
- use_setuptools( download_delay=8, to_dir=scramble_lib )
- from setuptools import *
- import pkg_resources
-
-# get the tag
-if os.access( ".galaxy_tag", os.F_OK ):
- tagfile = open( ".galaxy_tag", "r" )
- tag = tagfile.readline().strip()
-else:
- tag = None
-
-TORQUE_VERSION = ( tag.split( "_" ) )[1]
-TORQUE_ARCHIVE = os.path.abspath( os.path.join( "..", "..", "..", "archives", "torque-%s.tar.gz" %TORQUE_VERSION ) )
-TORQUE_BINARY_ARCHIVE = os.path.abspath( os.path.join( "..", "..", "..", "archives", "torque-%s-%s.tar.bz2" %( TORQUE_VERSION, pkg_resources.get_platform() ) ) )
-CONFIGURE = "CFLAGS='-O -g -isysroot /Developer/SDKs/MacOSX10.4u.sdk -arch i386 -arch ppc' "
-CONFIGURE += "LDFLAGS='-Wl,-syslibroot,/Developer/SDKs/MacOSX10.4u.sdk -arch i386 -arch ppc' "
-CONFIGURE += "./configure --prefix=/usr/local --disable-dependency-tracking --without-tcl --without-tk"
-
-# clean, in case you're running this by hand from a dirty module source dir
-for dir in [ "build", "dist", "torque-%s" %TORQUE_VERSION ]:
- if os.access( dir, os.F_OK ):
- print "scramble_it.py: removing dir:", dir
- shutil.rmtree( dir )
-
-# build/unpack Torque
-unpack_prebuilt_torque()
-
-print "scramble_it(): Running pbs_python configure script"
-p = subprocess.Popen( args = "sh configure --with-pbsdir=torque/usr/local/lib", shell = True )
-r = p.wait()
-if r != 0:
- print "scramble_it(): pbs_python configure script failed"
- sys.exit( 1 )
-
-# version string in 2.9.4 setup.py is wrong
-file = "setup.py"
-print "scramble_it(): Patching", file
-if not os.access( "%s.orig" %file, os.F_OK ):
- shutil.copyfile( file, "%s.orig" %file )
-i = open( "%s.orig" %file, "r" )
-o = open( file, "w" )
-for line in i.readlines():
- if line == " version = '2.9.0',\n":
- line = " version = '2.9.4',\n"
- print >>o, line,
-i.close()
-o.close()
-
-# tag
-me = sys.argv[0]
-sys.argv = [ me ]
-if tag is not None:
- sys.argv.append( "egg_info" )
- sys.argv.append( "--tag-build=%s" %tag )
-sys.argv.append( "bdist_egg" )
-
-# go
-execfile( "setup.py", globals(), locals() )
diff -r c9f19b8b21ba -r bfb4189733db scripts/scramble/scripts/pbs_python.py
--- a/scripts/scramble/scripts/pbs_python.py Wed Oct 29 12:35:50 2008 -0400
+++ b/scripts/scramble/scripts/pbs_python.py Wed Oct 29 17:20:54 2008 -0400
@@ -1,65 +1,9 @@
-import os, sys, subprocess, tarfile, shutil
+import os, sys, shutil, subprocess
-def unpack_prebuilt_torque():
- if not os.access( TORQUE_BINARY_ARCHIVE, os.F_OK ):
- print "unpack_prebuilt_torque(): No binary archive of Torque available for this platform - will build it now"
- build_torque()
- else:
- print "unpack_prebuilt_torque(): Found a previously built Torque binary archive for this platform."
- print "unpack_prebuilt_torque(): To force Torque to be rebuilt, remove the archive:"
- print " ", TORQUE_BINARY_ARCHIVE
- t = tarfile.open( TORQUE_BINARY_ARCHIVE, "r" )
- for fn in t.getnames():
- t.extract( fn )
- t.close()
-
-def build_torque():
- # untar
- print "build_torque(): Unpacking Torque source archive from:"
- print " ", TORQUE_ARCHIVE
- t = tarfile.open( TORQUE_ARCHIVE, "r" )
- for fn in t.getnames():
- t.extract( fn )
- t.close()
- # patch
- file = os.path.join( "torque-%s" %TORQUE_VERSION, "src", "include", "libpbs.h" )
- print "build_torque(): Patching", file
- if not os.access( "%s.orig" %file, os.F_OK ):
- shutil.copyfile( file, "%s.orig" %file )
- i = open( "%s.orig" %file, "r" )
- o = open( file, "w" )
- for line in i.readlines():
- if line == "#define NCONNECTS 5\n":
- line = "#define NCONNECTS 50\n"
- print >>o, line,
- i.close()
- o.close()
- # configure
- print "build_torque(): Running Torque configure script"
- p = subprocess.Popen( args = CONFIGURE, shell = True, cwd = os.path.join( os.getcwd(), "torque-%s" %TORQUE_VERSION) )
- r = p.wait()
- if r != 0:
- print "build_torque(): Torque configure script failed"
- sys.exit( 1 )
- # compile
- print "build_torque(): Building Torque (make)"
- p = subprocess.Popen( args = "make", shell = True, cwd = os.path.join( os.getcwd(), "torque-%s" %TORQUE_VERSION) )
- r = p.wait()
- if r != 0:
- print "build_torque(): Building Torque (make) failed"
- sys.exit( 1 )
- # install
- print "build_torque(): Installing Torque (make install_lib)"
- p = subprocess.Popen( args = "make DESTDIR=%s/torque install_lib" %os.getcwd(), shell = True, cwd = os.path.join( os.getcwd(), "torque-%s" %TORQUE_VERSION) )
- r = p.wait()
- if r != 0:
- print "build_torque(): Installing Torque (make install_lib) failed"
- sys.exit( 1 )
- # pack
- print "build_torque(): Creating binary Torque archive for future builds of pbs_python"
- t = tarfile.open( TORQUE_BINARY_ARCHIVE, "w:bz2" )
- t.add( "torque" )
- t.close()
+if "LIBTORQUE_DIR" not in os.environ:
+ print "scramble(): Please set LIBTORQUE_DIR to the path of the"
+ print "scramble(): directory containing libtorque.so"
+ sys.exit(1)
# change back to the build dir
if os.path.dirname( sys.argv[0] ) != "":
@@ -77,37 +21,22 @@
from setuptools import *
import pkg_resources
-# get the tag
-if os.access( ".galaxy_tag", os.F_OK ):
- tagfile = open( ".galaxy_tag", "r" )
- tag = tagfile.readline().strip()
-else:
- tag = None
-
-TORQUE_VERSION = ( tag.split( "_" ) )[1]
-TORQUE_ARCHIVE = os.path.abspath( os.path.join( "..", "..", "..", "archives", "torque-%s.tar.gz" %TORQUE_VERSION ) )
-TORQUE_BINARY_ARCHIVE = os.path.abspath( os.path.join( "..", "..", "..", "archives", "torque-%s-%s.tar.bz2" %( TORQUE_VERSION, pkg_resources.get_platform() ) ) )
-CONFIGURE = "CFLAGS='-fPIC' ./configure --prefix=/usr/local --without-tcl --without-tk"
-
# clean, in case you're running this by hand from a dirty module source dir
-for dir in [ "build", "dist", "torque-%s" %TORQUE_VERSION ]:
+for dir in [ "build", "dist" ]:
if os.access( dir, os.F_OK ):
- print "scramble_it.py: removing dir:", dir
+ print "scramble(): removing dir:", dir
shutil.rmtree( dir )
-# build/unpack Torque
-unpack_prebuilt_torque()
-
-print "scramble_it(): Running pbs_python configure script"
-p = subprocess.Popen( args = "sh configure --with-pbsdir=torque/usr/local/lib", shell = True )
+print "scramble(): Running pbs_python configure script"
+p = subprocess.Popen( args = "sh configure --with-pbsdir=%s" % os.environ['LIBTORQUE_DIR'], shell = True )
r = p.wait()
if r != 0:
- print "scramble_it(): pbs_python configure script failed"
+ print "scramble(): pbs_python configure script failed"
sys.exit( 1 )
# version string in 2.9.4 setup.py is wrong
file = "setup.py"
-print "scramble_it(): Patching", file
+print "scramble(): Patching", file
if not os.access( "%s.orig" %file, os.F_OK ):
shutil.copyfile( file, "%s.orig" %file )
i = open( "%s.orig" %file, "r" )
@@ -122,9 +51,6 @@
# tag
me = sys.argv[0]
sys.argv = [ me ]
-if tag is not None:
- sys.argv.append( "egg_info" )
- sys.argv.append( "--tag-build=%s" %tag )
sys.argv.append( "bdist_egg" )
# go
1
0
29 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/c9f19b8b21ba
changeset: 1582:c9f19b8b21ba
user: Nate Coraor <nate(a)bx.psu.edu>
date: Wed Oct 29 12:35:50 2008 -0400
description:
Re-add JOB_ERROR since some finish hooks use it (albeit improperly).
2 file(s) affected in this change:
lib/galaxy/jobs/__init__.py
lib/galaxy/jobs/runners/pbs.py
diffs (33 lines):
diff -r 4841a9e393c7 -r c9f19b8b21ba lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Tue Oct 28 14:31:02 2008 -0400
+++ b/lib/galaxy/jobs/__init__.py Wed Oct 29 12:35:50 2008 -0400
@@ -15,7 +15,7 @@
log = logging.getLogger( __name__ )
# States for running a job. These are NOT the same as data states
-JOB_WAIT, JOB_INPUT_ERROR, JOB_INPUT_DELETED, JOB_OK, JOB_READY, JOB_DELETED = 'wait', 'input_error', 'input_deleted', 'ok', 'ready', 'deleted'
+JOB_WAIT, JOB_ERROR, JOB_INPUT_ERROR, JOB_INPUT_DELETED, JOB_OK, JOB_READY, JOB_DELETED = 'wait', 'error', 'input_error', 'input_deleted', 'ok', 'ready', 'deleted'
class Sleeper( object ):
"""
@@ -163,6 +163,8 @@
if job_state == JOB_WAIT:
if not self.track_jobs_in_database:
new_waiting.append( job )
+ elif job_state == JOB_ERROR:
+ log.info( "job %d ended with an error" % job.job_id )
elif job_state == JOB_INPUT_ERROR:
log.info( "job %d unable to run: one or more inputs in error state" % job.job_id )
elif job_state == JOB_INPUT_DELETED:
diff -r 4841a9e393c7 -r c9f19b8b21ba lib/galaxy/jobs/runners/pbs.py
--- a/lib/galaxy/jobs/runners/pbs.py Tue Oct 28 14:31:02 2008 -0400
+++ b/lib/galaxy/jobs/runners/pbs.py Wed Oct 29 12:35:50 2008 -0400
@@ -358,7 +358,7 @@
pbs_job_state.job_wrapper.finish( stdout, stderr )
except:
log.exception("Job wrapper finish method failed")
- job_wrapper.fail("Unable to finish job", exception=True)
+ pbs_job_state.job_wrapper.fail("Unable to finish job", exception=True)
# clean up the pbs files
self.cleanup( ( ofile, efile, job_file ) )
1
0
28 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/91f6455e19e4
changeset: 1580:91f6455e19e4
user: Dan Blankenberg <dan(a)bx.psu.edu>
date: Tue Oct 28 12:57:39 2008 -0400
description:
Use a weakref in metadata to prevent a circular reference possibly interfering with garbage collection: hda/lda (parent) <--> MetadataCollection (self)
1 file(s) affected in this change:
lib/galaxy/datatypes/metadata.py
diffs (32 lines):
diff -r 778fcc385ab7 -r 91f6455e19e4 lib/galaxy/datatypes/metadata.py
--- a/lib/galaxy/datatypes/metadata.py Tue Oct 28 11:27:42 2008 -0400
+++ b/lib/galaxy/datatypes/metadata.py Tue Oct 28 12:57:39 2008 -0400
@@ -1,4 +1,4 @@
-import sys, logging, copy, shutil
+import sys, logging, copy, shutil, weakref
from galaxy.util import string_as_bool
from galaxy.util.odict import odict
@@ -40,6 +40,13 @@
#initialize dict if needed
if self.parent._metadata is None:
self.parent._metadata = {}
+ def get_parent( self ):
+ if "_parent" in self.__dict__:
+ return self.__dict__["_parent"]()
+ return None
+ def set_parent( self, parent ):
+ self.__dict__["_parent"] = weakref.ref( parent ) # use weakref to prevent a circular reference interfering with garbage collection: hda/lda (parent) <--> MetadataCollection (self) ; needs to be hashable, so cannot use proxy.
+ parent = property( get_parent, set_parent )
@property
def spec( self ):
return self.parent.datatype.metadata_spec
@@ -65,7 +72,7 @@
return self.parent._metadata[name]
def __setattr__( self, name, value ):
if name == "parent":
- self.__dict__[name] = value
+ return self.set_parent( value )
else:
if name in self.spec:
self.parent._metadata[name] = self.spec[name].unwrap( value )
1
0
28 Oct '08
details: http://www.bx.psu.edu/hg/galaxy/rev/4841a9e393c7
changeset: 1581:4841a9e393c7
user: Greg Von Kuster <greg(a)bx.psu.edu>
date: Tue Oct 28 14:31:02 2008 -0400
description:
Purge metadata files associated with a dataset when the dataset is purged. Also remembered log.exception logs the exception, so corrected a few things in jobs.__init__.
2 file(s) affected in this change:
lib/galaxy/jobs/__init__.py
scripts/cleanup_datasets/cleanup_datasets.py
diffs (279 lines):
diff -r 91f6455e19e4 -r 4841a9e393c7 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py Tue Oct 28 12:57:39 2008 -0400
+++ b/lib/galaxy/jobs/__init__.py Tue Oct 28 14:31:02 2008 -0400
@@ -62,7 +62,7 @@
else :
self.use_policy = False
log.info("Scheduler policy not defined as expected, defaulting to FIFO")
- except AttributeError, detail : # try may throw AttributeError
+ except AttributeError, detail: # try may throw AttributeError
self.use_policy = False
log.exception("Error while loading scheduler policy class, defaulting to FIFO")
else :
@@ -117,8 +117,8 @@
while self.running:
try:
self.monitor_step()
- except Exception, e:
- log.exception( "Exception in monitor_step: %s" % str( e ) )
+ except:
+ log.exception( "Exception in monitor_step" )
# Sleep
self.sleeper.sleep( 1 )
@@ -184,9 +184,8 @@
job.info = msg
log.error( msg )
except Exception, e:
- msg = "failure running job %d: %s" % ( job.job_id, str( e ) )
- job.info = msg
- log.exception( msg )
+ job.info = "failure running job %d: %s" % ( job.job_id, str( e ) )
+ log.exception( "failure running job %d" % job.job_id )
# Update the waiting list
self.waiting = new_waiting
# If special (e.g. fair) scheduling is enabled, dispatch all jobs
@@ -201,9 +200,8 @@
# squeue is empty, so stop dispatching
break
except Exception, e: # if something else breaks while dispatching
- msg = "failure running job %d: %s" % ( sjob.job_id, str( e ) )
- job.fail( msg )
- log.exception( msg )
+ job.fail( "failure running job %d: %s" % ( sjob.job_id, str( e ) ) )
+ log.exception( "failure running job %d" % sjob.job_id )
def put( self, job_id, tool ):
"""Add a job to the queue (by job identifier)"""
@@ -473,8 +471,8 @@
os.remove( fname )
if self.working_directory is not None:
os.rmdir( self.working_directory )
- except Exception, e:
- log.exception( "Unable to cleanup job %s, exception: %s" % ( str( self.job_id ), str( e ) ) )
+ except:
+ log.exception( "Unable to cleanup job %d" % self.job_id )
def get_command_line( self ):
return self.command_line
@@ -573,8 +571,8 @@
while self.running:
try:
self.monitor_step()
- except Exception, e:
- log.exception( "Exception in monitor_step: %s" % str( e ) )
+ except:
+ log.exception( "Exception in monitor_step" )
# Sleep
self.sleeper.sleep( 1 )
diff -r 91f6455e19e4 -r 4841a9e393c7 scripts/cleanup_datasets/cleanup_datasets.py
--- a/scripts/cleanup_datasets/cleanup_datasets.py Tue Oct 28 12:57:39 2008 -0400
+++ b/scripts/cleanup_datasets/cleanup_datasets.py Tue Oct 28 14:31:02 2008 -0400
@@ -47,6 +47,7 @@
app = CleanupDatasetsApplication( database_connection=database_connection, file_path=file_path )
h = app.model.History
d = app.model.Dataset
+ m = app.model.MetadataFile
cutoff_time = datetime.utcnow() - timedelta( days=options.days )
now = strftime( "%Y-%m-%d %H:%M:%S" )
@@ -63,7 +64,7 @@
print "# Datasets will be removed from disk...\n"
else:
print "# Datasets will NOT be removed from disk...\n"
- purge_histories( h, d, cutoff_time, options.remove_from_disk )
+ purge_histories( h, d, m, cutoff_time, options.remove_from_disk )
elif options.info_purge_datasets:
info_purge_datasets( d, cutoff_time )
elif options.purge_datasets:
@@ -71,7 +72,7 @@
print "# Datasets will be removed from disk...\n"
else:
print "# Datasets will NOT be removed from disk...\n"
- purge_datasets( d, cutoff_time, options.remove_from_disk )
+ purge_datasets( d, m, cutoff_time, options.remove_from_disk )
sys.exit(0)
def info_delete_userless_histories( h, cutoff_time ):
@@ -79,7 +80,7 @@
history_count = 0
dataset_count = 0
where = ( h.table.c.user_id==None ) & ( h.table.c.deleted=='f' ) & ( h.table.c.update_time < cutoff_time )
- histories = h.query().filter( where ).options( eagerload( 'active_datasets' ) )
+ histories = h.query().filter( where ).options( eagerload( 'active_datasets' ) ).all()
print '# The following datasets and associated userless histories will be deleted'
start = time.clock()
@@ -105,13 +106,13 @@
print '# The following datasets and associated userless histories have been deleted'
start = time.clock()
- histories = h.query().filter( h_where ).options( eagerload( 'active_datasets' ) )
+ histories = h.query().filter( h_where ).options( eagerload( 'active_datasets' ) ).all()
for history in histories:
for dataset_assoc in history.active_datasets:
if not dataset_assoc.deleted:
# Mark all datasets as deleted
d_where = ( d.table.c.id==dataset_assoc.dataset_id )
- datasets = d.query().filter( d_where )
+ datasets = d.query().filter( d_where ).all()
for dataset in datasets:
if not dataset.deleted:
dataset.deleted = True
@@ -139,13 +140,13 @@
print '# The following datasets and associated deleted histories will be purged'
start = time.clock()
- histories = h.query().filter( h_where ).options( eagerload( 'datasets' ) )
+ histories = h.query().filter( h_where ).options( eagerload( 'datasets' ) ).all()
for history in histories:
for dataset_assoc in history.datasets:
# Datasets can only be purged if their HistoryDatasetAssociation has been deleted.
if dataset_assoc.deleted:
d_where = ( d.table.c.id==dataset_assoc.dataset_id )
- datasets = d.query().filter( d_where )
+ datasets = d.query().filter( d_where ).all()
for dataset in datasets:
if dataset.purgable and not dataset.purged:
print "%s" % dataset.file_name
@@ -160,7 +161,7 @@
print '# %d histories ( including a total of %d datasets ) will be purged. Freed disk space: ' %( history_count, dataset_count ), disk_space, '\n'
print "Elapsed time: ", stop - start, "\n"
-def purge_histories( h, d, cutoff_time, remove_from_disk ):
+def purge_histories( h, d, m, cutoff_time, remove_from_disk ):
# Purges deleted histories whose update_time is older than the cutoff_time.
# The datasets associated with each history are also purged.
history_count = 0
@@ -172,13 +173,13 @@
print '# The following datasets and associated deleted histories have been purged'
start = time.clock()
- histories = h.query().filter( h_where ).options( eagerload( 'datasets' ) )
+ histories = h.query().filter( h_where ).options( eagerload( 'datasets' ) ).all()
for history in histories:
errors = False
for dataset_assoc in history.datasets:
if dataset_assoc.deleted:
d_where = ( d.table.c.id==dataset_assoc.dataset_id )
- datasets = d.query().filter( d_where )
+ datasets = d.query().filter( d_where ).all()
for dataset in datasets:
if dataset.purgable and not dataset.purged:
file_size = dataset.file_size
@@ -186,7 +187,7 @@
dataset.file_size = 0
if remove_from_disk:
dataset.flush()
- errmsg = purge_dataset( dataset )
+ errmsg = purge_dataset( dataset, m )
if errmsg:
errors = True
print errmsg
@@ -196,6 +197,14 @@
dataset.purged = True
dataset.flush()
print "%s" % dataset.file_name
+ # Mark all associated MetadataFiles as deleted and purged
+ print "The following metadata files associated with dataset '%s' have been marked purged" % dataset.file_name
+ for hda in dataset.history_associations:
+ for metadata_file in m.filter( m.table.c.hda_id==hda.id ).all():
+ metadata_file.deleted = True
+ metadata_file.purged = True
+ metadata_file.flush()
+ print "%s" % metadata_file.file_name()
dataset_count += 1
try:
disk_space += file_size
@@ -218,7 +227,7 @@
print '# The following deleted datasets will be purged'
start = time.clock()
- datasets = d.query().filter( where )
+ datasets = d.query().filter( where ).all()
for dataset in datasets:
print "%s" % dataset.file_name
dataset_count += 1
@@ -230,7 +239,7 @@
print '# %d datasets will be purged. Freed disk space: ' %dataset_count, disk_space, '\n'
print "Elapsed time: ", stop - start, "\n"
-def purge_datasets( d, cutoff_time, remove_from_disk ):
+def purge_datasets( d, m, cutoff_time, remove_from_disk ):
# Purges deleted datasets whose update_time is older than cutoff_time. Files may or may
# not be removed from disk.
dataset_count = 0
@@ -240,11 +249,11 @@
print '# The following deleted datasets have been purged'
start = time.clock()
- datasets = d.query().filter( where )
+ datasets = d.query().filter( where ).all()
for dataset in datasets:
file_size = dataset.file_size
if remove_from_disk:
- errmsg = purge_dataset( dataset )
+ errmsg = purge_dataset( dataset, m )
if errmsg:
print errmsg
else:
@@ -255,6 +264,14 @@
dataset.file_size = 0
dataset.flush()
print "%s" % dataset.file_name
+ # Mark all associated MetadataFiles as deleted and purged
+ print "The following metadata files associated with dataset '%s' have been marked purged" % dataset.file_name
+ for hda in dataset.history_associations:
+ for metadata_file in m.filter( m.table.c.hda_id==hda.id ).all():
+ metadata_file.deleted = True
+ metadata_file.purged = True
+ metadata_file.flush()
+ print "%s" % metadata_file.file_name()
dataset_count += 1
try:
disk_space += file_size
@@ -266,11 +283,10 @@
print '# Freed disk space: ', disk_space, '\n'
print "Elapsed time: ", stop - start, "\n"
-def purge_dataset( dataset ):
+def purge_dataset( dataset, m ):
# Removes the file from disk and updates the database accordingly.
if dataset.deleted:
# Remove files from disk and update the database
- purgable = False
try:
dataset.purged = True
dataset.file_size = 0
@@ -284,15 +300,24 @@
if not shared_data.deleted:
break #only purge when not shared
else:
+ # Remove dataset file from disk
os.unlink( dataset.file_name )
- purgable = True
+ # Mark all associated MetadataFiles as deleted and purged and remove them from disk
+ print "The following metadata files associated with dataset '%s' have been purged" % dataset.file_name
+ for hda in dataset.history_associations:
+ for metadata_file in m.filter( m.table.c.hda_id==hda.id ).all():
+ os.unlink( metadata_file.file_name() )
+ metadata_file.deleted = True
+ metadata_file.purged = True
+ metadata_file.flush()
+ print "%s" % metadata_file.file_name()
+ try:
+ # Remove associated extra files from disk if they exist
+ os.unlink( dataset.extra_files_path )
+ except:
+ pass
except Exception, exc:
return "# Error, exception: %s caught attempting to purge %s\n" %( str( exc ), dataset.file_name )
- try:
- if purgable:
- os.unlink( dataset.extra_files_path )
- except:
- pass
else:
return "# Error: '%s' has not previously been deleted, so it cannot be purged\n" %dataset.file_name
return ""
1
0