galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
November 2013
- 1 participants
- 208 discussions
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/3d8841746ce9/
Changeset: 3d8841746ce9
Branch: stable
User: Dave Bouvier
Date: 2013-11-12 21:36:13
Summary: Fix display of status images for installed tool shed repositories when using a proxy prefix.
Affected #: 1 file
diff -r 093626ae621f73b9cf904c935eddae98a8823839 -r 3d8841746ce9b65e19a44028dea2eac73a4eddf8 lib/tool_shed/galaxy_install/grids/admin_toolshed_grids.py
--- a/lib/tool_shed/galaxy_install/grids/admin_toolshed_grids.py
+++ b/lib/tool_shed/galaxy_install/grids/admin_toolshed_grids.py
@@ -2,6 +2,7 @@
from galaxy import model, util
from galaxy.web.framework.helpers import iff, grids
+from galaxy.web import url_for
from galaxy.model.orm import or_
import tool_shed.util.shed_util_common as suc
from tool_shed.util import tool_dependency_util
@@ -13,42 +14,42 @@
deprecated_tip_str = 'class="icon-button" title="This repository is deprecated in the Tool Shed"'
else:
deprecated_tip_str = ''
- return '<img src="/static/images/icon_error_sml.gif" %s/>' % deprecated_tip_str
+ return '<img src="%s/images/icon_error_sml.gif" %s/>' % ( url_for( '/static' ), deprecated_tip_str )
def generate_includes_workflows_img_str( include_mouse_over=False ):
if include_mouse_over:
deprecated_tip_str = 'class="icon-button" title="This repository contains exported workflows"'
else:
deprecated_tip_str = ''
- return '<img src="/static/images/fugue/gear.png" %s/>' % deprecated_tip_str
+ return '<img src="%s/images/fugue/gear.png" %s/>' % ( url_for( '/static' ), deprecated_tip_str )
def generate_latest_revision_img_str( include_mouse_over=False ):
if include_mouse_over:
latest_revision_tip_str = 'class="icon-button" title="This is the latest installable revision of this repository"'
else:
latest_revision_tip_str = ''
- return '<img src="/static/june_2007_style/blue/ok_small.png" %s/>' % latest_revision_tip_str
+ return '<img src="%s/june_2007_style/blue/ok_small.png" %s/>' % ( url_for( '/static' ), latest_revision_tip_str )
def generate_revision_updates_img_str( include_mouse_over=False ):
if include_mouse_over:
revision_updates_tip_str = 'class="icon-button" title="Updates are available in the Tool Shed for this revision"'
else:
revision_updates_tip_str = ''
- return '<img src="/static/images/icon_warning_sml.gif" %s/>' % revision_updates_tip_str
+ return '<img src="%s/images/icon_warning_sml.gif" %s/>' % ( url_for( '/static' ), revision_updates_tip_str )
def generate_revision_upgrades_img_str( include_mouse_over=False ):
if include_mouse_over:
revision_upgrades_tip_str = 'class="icon-button" title="A newer installable revision is available for this repository"'
else:
revision_upgrades_tip_str = ''
- return '<img src="/static/images/up.gif" %s/>' % revision_upgrades_tip_str
+ return '<img src="%s/images/up.gif" %s/>' % ( url_for( '/static' ), revision_upgrades_tip_str )
def generate_unknown_img_str( include_mouse_over=False ):
if include_mouse_over:
unknown_tip_str = 'class="icon-button" title="Unable to get information from the Tool Shed"'
else:
unknown_tip_str = ''
- return '<img src="/static/june_2007_style/blue/question-octagon-frame.png" %s/>' % unknown_tip_str
+ return '<img src="%s/june_2007_style/blue/question-octagon-frame.png" %s/>' % ( url_for( '/static' ), unknown_tip_str )
class InstalledRepositoryGrid( grids.Grid ):
https://bitbucket.org/galaxy/galaxy-central/commits/84f9600dc305/
Changeset: 84f9600dc305
Branch: stable
User: jmchilton
Date: 2013-11-15 15:20:06
Summary: Fix small typo in 9ce5afd (tool shed autoconf action).
Affected #: 1 file
diff -r 3d8841746ce9b65e19a44028dea2eac73a4eddf8 -r 84f9600dc305cd5d47aef72f260826f89e6872eb lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -505,7 +505,7 @@
if 'prefix=' in configure_opts:
pre_cmd = './configure %s && make && make install' % configure_opts
else:
- pre_cmd = './configure prefix=$INSTALL_DIR %s && make && make install' % configure_opts
+ pre_cmd = './configure --prefix=$INSTALL_DIR %s && make && make install' % configure_opts
cmd = install_environment.build_command( td_common_util.evaluate_template( pre_cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
https://bitbucket.org/galaxy/galaxy-central/commits/69f348f4b3c7/
Changeset: 69f348f4b3c7
User: jmchilton
Date: 2013-11-15 15:20:36
Summary: Merge latest stable.
Affected #: 2 files
diff -r 1560813167c0072773738cdf57103ecc0191b327 -r 69f348f4b3c7fac9c406b1666309f82810566802 lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/fabric_util.py
@@ -655,7 +655,7 @@
if 'prefix=' in configure_opts:
pre_cmd = './configure %s && make && make install' % configure_opts
else:
- pre_cmd = './configure prefix=$INSTALL_DIR %s && make && make install' % configure_opts
+ pre_cmd = './configure --prefix=$INSTALL_DIR %s && make && make install' % configure_opts
cmd = install_environment.build_command( td_common_util.evaluate_template( pre_cmd, install_dir ) )
return_code = handle_command( app, tool_dependency, install_dir, cmd )
if return_code:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: Fix bug in 4f7e715 that actually allows multiple repeat values to be specified via API.
by commits-noreply@bitbucket.org 14 Nov '13
by commits-noreply@bitbucket.org 14 Nov '13
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/1560813167c0/
Changeset: 1560813167c0
User: jmchilton
Date: 2013-11-15 06:39:52
Summary: Fix bug in 4f7e715 that actually allows multiple repeat values to be specified via API.
Affected #: 1 file
diff -r 55e0fe991c42a3d1551ae9965d2186b6870aaba9 -r 1560813167c0072773738cdf57103ecc0191b327 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2084,7 +2084,7 @@
rep_index = 0
while True:
rep_name = "%s_%d" % ( key, rep_index )
- if not any( [ key.startswith(rep_name) for key in incoming.keys() ] ):
+ if not any( [ incoming_key.startswith(rep_name) for incoming_key in incoming.keys() ] ):
break
if rep_index < input.max:
new_state = {}
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jmchilton: PEP-8 fixes for test/base/twilltestcase.py.
by commits-noreply@bitbucket.org 14 Nov '13
by commits-noreply@bitbucket.org 14 Nov '13
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/55e0fe991c42/
Changeset: 55e0fe991c42
User: jmchilton
Date: 2013-11-15 00:49:58
Summary: PEP-8 fixes for test/base/twilltestcase.py.
Still hundreds of lines to fix.
Affected #: 1 file
diff -r a231103b3d6e14578a226f9f6d5a3b68c0896854 -r 55e0fe991c42a3d1551ae9965d2186b6870aaba9 test/base/twilltestcase.py
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -1,7 +1,20 @@
import pkg_resources
pkg_resources.require( "twill==0.9" )
-import StringIO, os, filecmp, time, unittest, urllib, logging, difflib, tarfile, zipfile, tempfile, re, shutil, subprocess
+import StringIO
+import os
+import filecmp
+import time
+import unittest
+import urllib
+import logging
+import difflib
+import tarfile
+import zipfile
+import tempfile
+import re
+import shutil
+import subprocess
import pprint
import twill
@@ -26,6 +39,7 @@
logging.getLogger( "ClientCookie.cookies" ).setLevel( logging.WARNING )
log = logging.getLogger( __name__ )
+
class TwillTestCase( unittest.TestCase ):
def setUp( self ):
@@ -46,10 +60,10 @@
self.shed_tools_dict = {}
self.keepOutdir = os.environ.get( 'GALAXY_TEST_SAVE', '' )
if self.keepOutdir > '':
- try:
- os.makedirs(self.keepOutdir)
- except:
- pass
+ try:
+ os.makedirs(self.keepOutdir)
+ except:
+ pass
self.home()
# Functions associated with files
@@ -83,7 +97,7 @@
diff_lines = get_lines_diff( diff )
if diff_lines > allowed_diff_count:
diff_slice = diff[0:40]
- #FIXME: This pdf stuff is rather special cased and has not been updated to consider lines_diff
+ #FIXME: This pdf stuff is rather special cased and has not been updated to consider lines_diff
#due to unknown desired behavior when used in conjunction with a non-zero lines_diff
#PDF forgiveness can probably be handled better by not special casing by __extension__ here
#and instead using lines_diff or a regular expression matching
@@ -109,13 +123,13 @@
break
if not valid_diff:
invalid_diff_lines += 1
- log.info('## files diff on %s and %s lines_diff=%d, found diff = %d, found pdf invalid diff = %d' % (file1,file2,allowed_diff_count,diff_lines,invalid_diff_lines))
+ log.info('## files diff on %s and %s lines_diff=%d, found diff = %d, found pdf invalid diff = %d' % (file1, file2, allowed_diff_count, diff_lines, invalid_diff_lines))
if invalid_diff_lines > allowed_diff_count:
# Print out diff_slice so we can see what failed
print "###### diff_slice ######"
raise AssertionError( "".join( diff_slice ) )
else:
- log.info('## files diff on %s and %s lines_diff=%d, found diff = %d' % (file1,file2,allowed_diff_count,diff_lines))
+ log.info('## files diff on %s and %s lines_diff=%d, found diff = %d' % (file1, file2, allowed_diff_count, diff_lines))
for line in diff_slice:
for char in line:
if ord( char ) > 128:
@@ -124,7 +138,7 @@
def files_re_match( self, file1, file2, attributes=None ):
"""Checks the contents of 2 files for differences using re.match"""
- local_file = open( file1, 'U' ).readlines() #regex file
+ local_file = open( file1, 'U' ).readlines() # regex file
history_data = open( file2, 'U' ).readlines()
assert len( local_file ) == len( history_data ), 'Data File and Regular Expression File contain a different number of lines (%s != %s)\nHistory Data (first 40 lines):\n%s' % ( len( local_file ), len( history_data ), ''.join( history_data[:40] ) )
if attributes is None:
@@ -139,11 +153,11 @@
line_diff_count += 1
diffs.append( 'Regular Expression: %s\nData file : %s' % ( local_file[i].rstrip( '\r\n' ), history_data[i].rstrip( '\r\n' ) ) )
if line_diff_count > lines_diff:
- raise AssertionError, "Regular expression did not match data file (allowed variants=%i):\n%s" % ( lines_diff, "".join( diffs ) )
+ raise AssertionError( "Regular expression did not match data file (allowed variants=%i):\n%s" % ( lines_diff, "".join( diffs ) ) )
def files_re_match_multiline( self, file1, file2, attributes=None ):
"""Checks the contents of 2 files for differences using re.match in multiline mode"""
- local_file = open( file1, 'U' ).read() #regex file
+ local_file = open( file1, 'U' ).read() # regex file
if attributes is None:
attributes = {}
if attributes.get( 'sort', False ):
@@ -157,7 +171,7 @@
def files_contains( self, file1, file2, attributes=None ):
"""Checks the contents of file2 for substrings found in file1, on a per-line basis"""
- local_file = open( file1, 'U' ).readlines() #regex file
+ local_file = open( file1, 'U' ).readlines() # regex file
#TODO: allow forcing ordering of contains
history_data = open( file2, 'U' ).read()
lines_diff = int( attributes.get( 'lines_diff', 0 ) )
@@ -167,7 +181,7 @@
if contains not in history_data:
line_diff_count += 1
if line_diff_count > lines_diff:
- raise AssertionError, "Failed to find '%s' in history data. (lines_diff=%i):\n" % ( contains, lines_diff )
+ raise AssertionError( "Failed to find '%s' in history data. (lines_diff=%i):\n" % ( contains, lines_diff ) )
def get_filename( self, filename, shed_tool_id=None ):
if shed_tool_id and self.shed_tools_dict:
@@ -189,8 +203,8 @@
so the tool-data directory of test data files is contained in the installed tool shed repository.
"""
self.visit_url( "%s/tool_runner?tool_id=upload1" % self.url )
- try:
- self.refresh_form( "file_type", ftype ) #Refresh, to support composite files
+ try:
+ self.refresh_form( "file_type", ftype ) # Refresh, to support composite files
tc.fv( "tool_form", "dbkey", dbkey )
if metadata:
for elem in metadata:
@@ -214,9 +228,9 @@
hids = self.get_hids_in_history()
for hid in hids:
try:
- valid_hid = int( hid )
+ int( hid )
except:
- raise AssertionError, "Invalid hid (%s) created when uploading file %s" % ( hid, filename )
+ raise AssertionError( "Invalid hid (%s) created when uploading file %s" % ( hid, filename ) )
# Wait for upload processing to finish (TODO: this should be done in each test case instead)
self.wait()
@@ -231,21 +245,21 @@
self.home()
except Exception, e:
errmsg = "Problem executing upload utility using url_paste: %s" % str( e )
- raise AssertionError( e )
+ raise AssertionError( errmsg )
# Make sure every history item has a valid hid
hids = self.get_hids_in_history()
for hid in hids:
try:
- valid_hid = int( hid )
+ int( hid )
except:
- raise AssertionError, "Invalid hid (%s) created when pasting %s" % ( hid, url_paste )
+ raise AssertionError( "Invalid hid (%s) created when pasting %s" % ( hid, url_paste ) )
# Wait for upload processing to finish (TODO: this should be done in each test case instead)
self.wait()
def json_from_url( self, url ):
self.visit_url( url )
return from_json_string( self.last_page() )
-
+
# Functions associated with histories
def get_history_from_api( self, encoded_history_id=None ):
if encoded_history_id is None:
@@ -255,7 +269,7 @@
def get_latest_history( self ):
return self.json_from_url( '/api/histories' )[ 0 ]
-
+
def find_hda_by_dataset_name( self, name, history=None ):
if history is None:
history = self.get_history_from_api()
@@ -269,7 +283,7 @@
self.visit_page( "history" )
page = self.last_page()
if page.find( 'error' ) > -1:
- raise AssertionError('Errors in the history for user %s' % self.user )
+ raise AssertionError( 'Errors in the history for user %s' % self.user )
def check_history_for_string( self, patt, show_deleted=False ):
"""Breaks patt on whitespace and searches for each element seperately in the history"""
@@ -320,7 +334,7 @@
# twill stores the regex match in a special stack variable
match = twill.namespaces.get_twill_glocals()[1][ '__match__' ]
json_data = from_json_string( match )
- assert check_fn( json_data ), 'failed check_fn: %s' %( check_fn.func_name )
+ assert check_fn( json_data ), 'failed check_fn: %s' % ( check_fn.func_name )
except Exception, exc:
log.error( exc, exc_info=True )
@@ -374,6 +388,7 @@
num_deleted = len( id.split( ',' ) )
self.home()
self.visit_page( "history/list?operation=delete&id=%s" % ( id ) )
+
check_str = 'Deleted %d %s' % ( num_deleted, iff( num_deleted != 1, "histories", "history" ) )
self.check_page_for_string( check_str )
self.home()
@@ -423,7 +438,7 @@
if active_datasets:
self.check_page_for_string( 'Create</a> a new empty history' )
self.check_page_for_string( 'Construct workflow</a> from current history' )
- self.check_page_for_string( 'Copy</a> current history' )
+ self.check_page_for_string( 'Copy</a> current history' )
self.check_page_for_string( 'Share</a> current history' )
self.check_page_for_string( 'Change default permissions</a> for current history' )
if histories_shared_by_others:
@@ -447,7 +462,7 @@
def rename_history( self, id, old_name, new_name ):
"""Rename an existing history"""
self.home()
- self.visit_page( "history/rename?id=%s&name=%s" %( id, new_name ) )
+ self.visit_page( "history/rename?id=%s&name=%s" % ( id, new_name ) )
check_str = 'History: %s renamed to: %s' % ( old_name, urllib.unquote( new_name ) )
self.check_page_for_string( check_str )
self.home()
@@ -476,6 +491,7 @@
for check_str in action_strings_displayed:
self.check_page_for_string( check_str )
tc.fv( 'share_restricted', 'action', action )
+
tc.submit( "share_restricted_button" )
for check_str in action_strings_displayed_after_submit:
self.check_page_for_string( check_str )
@@ -495,6 +511,7 @@
# If we have an action, then we are sharing datasets with users that do not have access permissions on them
tc.fv( 'share_restricted', 'action', action )
tc.submit( "share_restricted_button" )
+
for check_str in action_strings_displayed:
self.check_page_for_string( check_str )
self.home()
@@ -525,6 +542,7 @@
for check_str in strings_displayed:
self.check_page_for_string( check_str )
self.home()
+
def view_stored_deleted_histories( self, strings_displayed=[] ):
self.home()
self.visit_page( "history/list?f-deleted=True" )
@@ -534,12 +552,14 @@
for check_str in strings_displayed:
self.check_page_for_string( check_str )
self.home()
+
def view_shared_histories( self, strings_displayed=[] ):
self.home()
self.visit_page( "history/list_shared" )
for check_str in strings_displayed:
self.check_page_for_string( check_str )
self.home()
+
def copy_history( self, history_id, copy_choice, strings_displayed=[], strings_displayed_after_submit=[] ):
self.home()
self.visit_page( "history/copy?id=%s" % history_id )
@@ -550,6 +570,7 @@
for check_str in strings_displayed_after_submit:
self.check_page_for_string( check_str )
self.home()
+
def make_accessible_via_link( self, history_id, strings_displayed=[], strings_displayed_after_submit=[] ):
self.home()
self.visit_page( "history/list?operation=share+or+publish&id=%s" % history_id )
@@ -562,6 +583,7 @@
for check_str in strings_displayed_after_submit:
self.check_page_for_string( check_str )
self.home()
+
def disable_access_via_link( self, history_id, strings_displayed=[], strings_displayed_after_submit=[] ):
self.home()
self.visit_page( "history/list?operation=share+or+publish&id=%s" % history_id )
@@ -574,6 +596,7 @@
for check_str in strings_displayed_after_submit:
self.check_page_for_string( check_str )
self.home()
+
def import_history_via_url( self, history_id, email, strings_displayed_after_submit=[] ):
self.home()
self.visit_page( "history/imp?&id=%s" % history_id )
@@ -584,9 +607,10 @@
# Functions associated with datasets (history items) and meta data
def _get_job_stream_output( self, hda_id, stream, format ):
self.visit_page( "datasets/%s/%s" % ( self.security.encode_id( hda_id ), stream ) )
+
output = self.last_page()
if format:
- msg = "---------------------- >> begin tool %s << -----------------------\n" % stream
+ msg = "---------------------- >> begin tool %s << -----------------------\n" % stream
msg += output + "\n"
msg += "----------------------- >> end tool %s << ------------------------\n" % stream
else:
@@ -609,43 +633,48 @@
"""Looks for 'patt' in the edit page when editing a dataset"""
data_list = self.get_history_as_data_list()
self.assertTrue( data_list )
- if hid is None: # take last hid
+ if hid is None: # take last hid
elem = data_list[-1]
hid = int( elem.get('hid') )
self.assertTrue( hid )
self.visit_page( "dataset/edit?hid=%s" % hid )
for subpatt in patt.split():
tc.find(subpatt)
+
def delete_history_item( self, hda_id, strings_displayed=[] ):
"""Deletes an item from a history"""
try:
hda_id = int( hda_id )
except:
- raise AssertionError, "Invalid hda_id '%s' - must be int" % hda_id
+ raise AssertionError( "Invalid hda_id '%s' - must be int" % hda_id )
self.visit_url( "%s/datasets/%s/delete?show_deleted_on_refresh=False" % ( self.url, self.security.encode_id( hda_id ) ) )
for check_str in strings_displayed:
self.check_page_for_string( check_str )
+
def undelete_history_item( self, hda_id, strings_displayed=[] ):
"""Un-deletes a deleted item in a history"""
try:
hda_id = int( hda_id )
except:
- raise AssertionError, "Invalid hda_id '%s' - must be int" % hda_id
+ raise AssertionError( "Invalid hda_id '%s' - must be int" % hda_id )
self.visit_url( "%s/datasets/%s/undelete" % ( self.url, self.security.encode_id( hda_id ) ) )
for check_str in strings_displayed:
self.check_page_for_string( check_str )
+
def display_history_item( self, hda_id, strings_displayed=[] ):
"""Displays a history item - simulates eye icon click"""
self.visit_url( '%s/datasets/%s/display/' % ( self.url, self.security.encode_id( hda_id ) ) )
for check_str in strings_displayed:
self.check_page_for_string( check_str )
self.home()
+
def view_history( self, history_id, strings_displayed=[] ):
"""Displays a history for viewing"""
self.visit_url( '%s/history/view?id=%s' % ( self.url, self.security.encode_id( history_id ) ) )
for check_str in strings_displayed:
self.check_page_for_string( check_str )
self.home()
+
def edit_hda_attribute_info( self, hda_id, new_name='', new_info='', new_dbkey='', new_startcol='',
strings_displayed=[], strings_not_displayed=[] ):
"""Edit history_dataset_association attribute information"""
@@ -673,14 +702,16 @@
for check_str in strings_not_displayed:
try:
self.check_page_for_string( check_str )
- raise AssertionError, "String (%s) incorrectly displayed on Edit Attributes page." % check_str
+ raise AssertionError( "String (%s) incorrectly displayed on Edit Attributes page." % check_str )
except:
pass
self.home()
+
def check_hda_attribute_info( self, hda_id, strings_displayed=[] ):
"""Edit history_dataset_association attribute information"""
for check_str in strings_displayed:
self.check_page_for_string( check_str )
+
def auto_detect_metadata( self, hda_id ):
"""Auto-detect history_dataset_association metadata"""
self.home()
@@ -695,6 +726,7 @@
self.check_page_for_string( 'Attributes updated' )
#self.check_page_for_string( 'Attributes updated' )
self.home()
+
def convert_format( self, hda_id, target_type ):
"""Convert format of history_dataset_association"""
self.home()
@@ -703,8 +735,9 @@
tc.fv( 'convert_data', 'target_type', target_type )
tc.submit( 'convert_data' )
self.check_page_for_string( 'The file conversion of Convert BED to GFF on data' )
- self.wait() #wait for the format convert tool to finish before returning
+ self.wait() # wait for the format convert tool to finish before returning
self.home()
+
def change_datatype( self, hda_id, datatype ):
"""Change format of history_dataset_association"""
self.home()
@@ -714,6 +747,7 @@
tc.submit( 'change' )
self.check_page_for_string( 'Changed the type of dataset' )
self.home()
+
def copy_history_item( self, source_dataset_id=None, target_history_id=None, all_target_history_ids=[],
deleted_history_ids=[] ):
"""
@@ -730,10 +764,10 @@
for id in deleted_history_ids:
try:
self.check_page_for_string( id )
- raise AssertionError, "deleted history id %d displayed in list of target histories" % id
+ raise AssertionError( "deleted history id %d displayed in list of target histories" % id )
except:
pass
-
+
tc.fv( '1', 'target_history_id', target_history_id )
tc.submit( 'do_copy' )
check_str = '1 dataset copied to 1 history'
@@ -760,17 +794,17 @@
def makeTfname(self, fname=None):
"""safe temp name - preserve the file extension for tools that interpret it"""
- suffix = os.path.split(fname)[-1] # ignore full path
- fd,temp_prefix = tempfile.mkstemp(prefix='tmp',suffix=suffix)
+ suffix = os.path.split(fname)[-1] # ignore full path
+ fd, temp_prefix = tempfile.mkstemp(prefix='tmp', suffix=suffix)
return temp_prefix
def verify_dataset_correctness( self, filename, hid=None, wait=True, maxseconds=120, attributes=None, shed_tool_id=None ):
"""Verifies that the attributes and contents of a history item meet expectations"""
if wait:
- self.wait( maxseconds=maxseconds ) #wait for job to finish
+ self.wait( maxseconds=maxseconds ) # wait for job to finish
data_list = self.get_history_as_data_list()
self.assertTrue( data_list )
- if hid is None: # take last hid
+ if hid is None: # take last hid
elem = data_list[-1]
hid = str( elem.get('hid') )
else:
@@ -804,9 +838,9 @@
raise AssertionError( errmsg )
if filename is not None:
local_name = self.get_filename( filename, shed_tool_id=shed_tool_id )
- temp_name = self.makeTfname(fname = filename)
+ temp_name = self.makeTfname(fname=filename)
file( temp_name, 'wb' ).write( data )
-
+
# if the server's env has GALAXY_TEST_SAVE, save the output file to that dir
if self.keepOutdir:
ofn = os.path.join( self.keepOutdir, os.path.basename( local_name ) )
@@ -814,12 +848,11 @@
try:
shutil.copy( temp_name, ofn )
except Exception, exc:
- error_log_msg = ( 'TwillTestCase could not save output file %s to %s: ' % ( temp_name, ofn ) )
+ error_log_msg = ( 'TwillTestCase could not save output file %s to %s: ' % ( temp_name, ofn ) )
error_log_msg += str( exc )
log.error( error_log_msg, exc_info=True )
else:
log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % ( self.keepOutdir, ofn ) )
-
try:
# have to nest try-except in try-finally to handle 2.4
try:
@@ -837,15 +870,15 @@
elif compare == 're_match_multiline':
self.files_re_match_multiline( local_name, temp_name, attributes=attributes )
elif compare == 'sim_size':
- delta = attributes.get('delta','100')
+ delta = attributes.get('delta', '100')
s1 = len(data)
s2 = os.path.getsize(local_name)
- if abs(s1-s2) > int(delta):
- raise Exception, 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name,s1,local_name,s2,delta)
+ if abs(s1 - s2) > int(delta):
+ raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) )
elif compare == "contains":
self.files_contains( local_name, temp_name, attributes=attributes )
else:
- raise Exception, 'Unimplemented Compare type: %s' % compare
+ raise Exception( 'Unimplemented Compare type: %s' % compare )
if extra_files:
self.verify_extra_files_content( extra_files, elem.get( 'id' ), shed_tool_id=shed_tool_id )
except AssertionError, err:
@@ -876,22 +909,22 @@
for filename in os.listdir( self.get_filename( extra_value, shed_tool_id=shed_tool_id ) ):
files_list.append( ( filename, os.path.join( extra_value, filename ), extra_attributes ) )
else:
- raise ValueError, 'unknown extra_files type: %s' % extra_type
+ raise ValueError( 'unknown extra_files type: %s' % extra_type )
for filename, filepath, attributes in files_list:
self.verify_composite_datatype_file_content( filepath, hda_id, base_name=filename, attributes=attributes, shed_tool_id=shed_tool_id )
-
+
def verify_composite_datatype_file_content( self, file_name, hda_id, base_name=None, attributes=None, shed_tool_id=None ):
local_name = self.get_filename( file_name, shed_tool_id=shed_tool_id )
if base_name is None:
base_name = os.path.split(file_name)[-1]
- temp_name = self.makeTfname(fname = base_name)
+ temp_name = self.makeTfname(fname=base_name)
self.visit_url( "%s/datasets/%s/display/%s" % ( self.url, self.security.encode_id( hda_id ), base_name ) )
data = self.last_page()
file( temp_name, 'wb' ).write( data )
if self.keepOutdir > '':
- ofn = os.path.join(self.keepOutdir,base_name)
- shutil.copy(temp_name,ofn)
- log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % (self.keepOutdir,ofn))
+ ofn = os.path.join(self.keepOutdir, base_name)
+ shutil.copy(temp_name, ofn)
+ log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % (self.keepOutdir, ofn))
try:
# have to nest try-except in try-finally to handle 2.4
try:
@@ -905,13 +938,13 @@
elif compare == 're_match_multiline':
self.files_re_match_multiline( local_name, temp_name, attributes=attributes )
elif compare == 'sim_size':
- delta = attributes.get('delta','100')
+ delta = attributes.get('delta', '100')
s1 = len(data)
s2 = os.path.getsize(local_name)
- if abs(s1-s2) > int(delta):
- raise Exception, 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name,s1,local_name,s2,delta)
+ if abs(s1 - s2) > int(delta):
+ raise Exception( 'Files %s=%db but %s=%db - compare (delta=%s) failed' % (temp_name, s1, local_name, s2, delta) )
else:
- raise Exception, 'Unimplemented Compare type: %s' % compare
+ raise Exception( 'Unimplemented Compare type: %s' % compare )
except AssertionError, err:
errmsg = 'Composite file (%s) of History item %s different than expected, difference (using %s):\n' % ( base_name, hda_id, compare )
errmsg += str( err )
@@ -925,7 +958,7 @@
return True
def is_binary( self, filename ):
- temp = open( filename, "U" ) # why is this not filename? Where did temp_name come from
+ temp = open( filename, "U" ) # why is this not filename? Where did temp_name come from
lineno = 0
for line in temp:
lineno += 1
@@ -949,7 +982,7 @@
# Functions associated with user accounts
def create( self, cntrller='user', email='test(a)bx.psu.edu', password='testuser', username='admin-user', redirect='' ):
- # HACK: don't use panels because late_javascripts() messes up the twill browser and it
+ # HACK: don't use panels because late_javascripts() messes up the twill browser and it
# can't find form fields (and hence user can't be logged in).
self.visit_url( "%s/user/create?cntrller=%s&use_panels=False" % ( self.url, cntrller ) )
tc.fv( 'registration', 'email', email )
@@ -980,6 +1013,7 @@
except:
pass
return previously_created, username_taken, invalid_username
+
def create_user_with_info( self, email, password, username, user_info_values, user_type_fd_id='', cntrller='user',
strings_displayed=[], strings_displayed_after_submit=[] ):
# This method creates a new user with associated info
@@ -998,6 +1032,7 @@
for check_str in strings_displayed:
self.check_page_for_string( check_str)
tc.submit( "create_user_button" )
+
def edit_user_info( self, cntrller='user', id='', new_email='', new_username='', password='', new_password='',
info_values=[], strings_displayed=[], strings_displayed_after_submit=[] ):
if cntrller == 'admin':
@@ -1021,51 +1056,53 @@
tc.submit( "change_password_button" )
if info_values:
for index, ( field_name, info_value ) in enumerate( info_values ):
- field_index = index + 1
tc.fv( "user_info", field_name, info_value )
tc.submit( "edit_user_info_button" )
for check_str in strings_displayed_after_submit:
self.check_page_for_string( check_str )
self.home()
+
def user_set_default_permissions( self, cntrller='user', permissions_out=[], permissions_in=[], role_id='2' ):
- # role.id = 2 is Private Role for test2(a)bx.psu.edu
- # NOTE: Twill has a bug that requires the ~/user/permissions page to contain at least 1 option value
+ # role.id = 2 is Private Role for test2(a)bx.psu.edu
+ # NOTE: Twill has a bug that requires the ~/user/permissions page to contain at least 1 option value
# in each select list or twill throws an exception, which is: ParseError: OPTION outside of SELECT
- # Due to this bug, we'll bypass visiting the page, and simply pass the permissions on to the
+ # Due to this bug, we'll bypass visiting the page, and simply pass the permissions on to the
# /user/set_default_permissions method.
url = "user/set_default_permissions?cntrller=%s&update_roles_button=Save&id=None" % cntrller
for po in permissions_out:
key = '%s_out' % po
- url ="%s&%s=%s" % ( url, key, str( role_id ) )
+ url = "%s&%s=%s" % ( url, key, str( role_id ) )
for pi in permissions_in:
key = '%s_in' % pi
- url ="%s&%s=%s" % ( url, key, str( role_id ) )
+ url = "%s&%s=%s" % ( url, key, str( role_id ) )
self.visit_url( "%s/%s" % ( self.url, url ) )
self.check_page_for_string( 'Default new history permissions have been changed.' )
self.home()
- def history_set_default_permissions( self, permissions_out=[], permissions_in=[], role_id=3 ): # role.id = 3 is Private Role for test3(a)bx.psu.edu
- # NOTE: Twill has a bug that requires the ~/user/permissions page to contain at least 1 option value
+
+ def history_set_default_permissions( self, permissions_out=[], permissions_in=[], role_id=3 ): # role.id = 3 is Private Role for test3(a)bx.psu.edu
+ # NOTE: Twill has a bug that requires the ~/user/permissions page to contain at least 1 option value
# in each select list or twill throws an exception, which is: ParseError: OPTION outside of SELECT
- # Due to this bug, we'll bypass visiting the page, and simply pass the permissions on to the
+ # Due to this bug, we'll bypass visiting the page, and simply pass the permissions on to the
# /user/set_default_permissions method.
url = "root/history_set_default_permissions?update_roles_button=Save&id=None&dataset=True"
for po in permissions_out:
key = '%s_out' % po
- url ="%s&%s=%s" % ( url, key, str( role_id ) )
+ url = "%s&%s=%s" % ( url, key, str( role_id ) )
for pi in permissions_in:
key = '%s_in' % pi
- url ="%s&%s=%s" % ( url, key, str( role_id ) )
+ url = "%s&%s=%s" % ( url, key, str( role_id ) )
self.home()
self.visit_url( "%s/%s" % ( self.url, url ) )
self.check_page_for_string( 'Default history permissions have been changed.' )
self.home()
+
def login( self, email='test(a)bx.psu.edu', password='testuser', username='admin-user', redirect='' ):
# test(a)bx.psu.edu is configured as an admin user
previously_created, username_taken, invalid_username = \
self.create( email=email, password=password, username=username, redirect=redirect )
if previously_created:
# The acount has previously been created, so just login.
- # HACK: don't use panels because late_javascripts() messes up the twill browser and it
+ # HACK: don't use panels because late_javascripts() messes up the twill browser and it
# can't find form fields (and hence user can't be logged in).
self.visit_url( "%s/user/login?use_panels=False" % self.url )
self.submit_form( 1, 'login_button', email=email, redirect=redirect, password=password )
@@ -1075,7 +1112,7 @@
self.visit_page( "user/logout" )
self.check_page_for_string( "You have been logged out" )
self.home()
-
+
# Functions associated with browsers, cookies, HTML forms and page visits
def check_for_strings( self, strings_displayed=[], strings_not_displayed=[] ):
@@ -1087,15 +1124,15 @@
self.check_string_not_in_page( string )
def check_page_for_string( self, patt ):
- """Looks for 'patt' in the current browser page"""
+ """Looks for 'patt' in the current browser page"""
page = self.last_page()
if page.find( patt ) == -1:
fname = self.write_temp_file( page )
errmsg = "no match to '%s'\npage content written to '%s'" % ( patt, fname )
raise AssertionError( errmsg )
-
+
def check_string_count_in_page( self, patt, min_count ):
- """Checks the number of 'patt' occurrences in the current browser page"""
+ """Checks the number of 'patt' occurrences in the current browser page"""
page = self.last_page()
patt_count = page.count( patt )
# The number of occurrences of patt in the page should be at least min_count
@@ -1104,15 +1141,15 @@
fname = self.write_temp_file( page )
errmsg = "%i occurrences of '%s' found instead of %i.\npage content written to '%s' " % ( min_count, patt, patt_count, fname )
raise AssertionError( errmsg )
-
+
def check_string_not_in_page( self, patt ):
- """Checks to make sure 'patt' is NOT in the page."""
+ """Checks to make sure 'patt' is NOT in the page."""
page = self.last_page()
if page.find( patt ) != -1:
fname = self.write_temp_file( page )
errmsg = "string (%s) incorrectly displayed in page.\npage content written to '%s'" % ( patt, fname )
raise AssertionError( errmsg )
-
+
def check_page(self, strings_displayed, strings_displayed_count, strings_not_displayed):
"""Checks a page for strings displayed, not displayed and number of occurrences of a string"""
for check_str in strings_displayed:
@@ -1122,7 +1159,6 @@
for check_str in strings_not_displayed:
self.check_string_not_in_page( check_str )
-
def write_temp_file( self, content, suffix='.html' ):
fd, fname = tempfile.mkstemp( suffix=suffix, prefix='twilltestcase-' )
f = os.fdopen( fd, "w" )
@@ -1174,50 +1210,50 @@
for i, control in enumerate( f.controls ):
formcontrols.append( "control %d: %s" % ( i, str( control ) ) )
for i, control in enumerate( f.controls ):
- if not hc_prefix in str( control ):
- try:
- #check if a repeat element needs to be added
- if control.name is not None:
- if control.name not in kwd and control.name.endswith( '_add' ):
- #control name doesn't exist, could be repeat
- repeat_startswith = control.name[0:-4]
- if repeat_startswith and not [ c_name for c_name in controls.keys() if c_name.startswith( repeat_startswith ) ] and [ c_name for c_name in kwd.keys() if c_name.startswith( repeat_startswith ) ]:
- tc.browser.clicked( f, control )
- tc.submit( control.name )
+ if not hc_prefix in str( control ):
+ try:
+ #check if a repeat element needs to be added
+ if control.name is not None:
+ if control.name not in kwd and control.name.endswith( '_add' ):
+ #control name doesn't exist, could be repeat
+ repeat_startswith = control.name[0:-4]
+ if repeat_startswith and not [ c_name for c_name in controls.keys() if c_name.startswith( repeat_startswith ) ] and [ c_name for c_name in kwd.keys() if c_name.startswith( repeat_startswith ) ]:
+ tc.browser.clicked( f, control )
+ tc.submit( control.name )
+ return self.submit_form( form_no=form_no, button=button, **kwd )
+ # Check for refresh_on_change attribute, submit a change if required
+ if hasattr( control, 'attrs' ) and 'refresh_on_change' in control.attrs.keys():
+ changed = False
+ # For DataToolParameter, control.value is the HDA id, but kwd contains the filename.
+ # This loop gets the filename/label for the selected values.
+ item_labels = [ item.attrs[ 'label' ] for item in control.get_items() if item.selected ]
+ for value in kwd[ control.name ]:
+ if value not in control.value and True not in [ value in item_label for item_label in item_labels ]:
+ changed = True
+ break
+ if changed:
+ # Clear Control and set to proper value
+ control.clear()
+ # kwd[control.name] should be a singlelist
+ for elem in kwd[ control.name ]:
+ tc.fv( f.name, control.name, str( elem ) )
+ # Create a new submit control, allows form to refresh, instead of going to next page
+ control = ClientForm.SubmitControl( 'SubmitControl', '___refresh_grouping___', {'name': 'refresh_grouping'} )
+ control.add_to_form( f )
+ control.fixup()
+ # Submit for refresh
+ tc.submit( '___refresh_grouping___' )
return self.submit_form( form_no=form_no, button=button, **kwd )
- # Check for refresh_on_change attribute, submit a change if required
- if hasattr( control, 'attrs' ) and 'refresh_on_change' in control.attrs.keys():
- changed = False
- # For DataToolParameter, control.value is the HDA id, but kwd contains the filename.
- # This loop gets the filename/label for the selected values.
- item_labels = [ item.attrs[ 'label' ] for item in control.get_items() if item.selected ]
- for value in kwd[ control.name ]:
- if value not in control.value and True not in [ value in item_label for item_label in item_labels ]:
- changed = True
- break
- if changed:
- # Clear Control and set to proper value
- control.clear()
- # kwd[control.name] should be a singlelist
- for elem in kwd[ control.name ]:
- tc.fv( f.name, control.name, str( elem ) )
- # Create a new submit control, allows form to refresh, instead of going to next page
- control = ClientForm.SubmitControl( 'SubmitControl', '___refresh_grouping___', {'name':'refresh_grouping'} )
- control.add_to_form( f )
- control.fixup()
- # Submit for refresh
- tc.submit( '___refresh_grouping___' )
- return self.submit_form( form_no=form_no, button=button, **kwd )
- except Exception, e:
- log.exception( "In submit_form, continuing, but caught exception." )
- for formcontrol in formcontrols:
- log.debug( formcontrol )
- continue
- controls[ control.name ] = control
+ except Exception:
+ log.exception( "In submit_form, continuing, but caught exception." )
+ for formcontrol in formcontrols:
+ log.debug( formcontrol )
+ continue
+ controls[ control.name ] = control
# No refresh_on_change attribute found in current form, so process as usual
for control_name, control_value in kwd.items():
if control_name not in controls:
- continue # these cannot be handled safely - cause the test to barf out
+ continue # these cannot be handled safely - cause the test to barf out
if not isinstance( control_value, list ):
control_value = [ control_value ]
control = controls[ control_name ]
@@ -1247,7 +1283,7 @@
else:
for elem in control_value:
control.get( name=elem ).selected = True
- else: # control.is_of_kind( "singlelist" )
+ else: # control.is_of_kind( "singlelist" )
for elem in control_value:
try:
tc.fv( f.name, control.name, str( elem ) )
@@ -1298,15 +1334,16 @@
control.clear()
tc.fv( f.name, control.name, value )
# Create a new submit control, allows form to refresh, instead of going to next page
- control = ClientForm.SubmitControl( 'SubmitControl', '___refresh_grouping___', {'name':'refresh_grouping'} )
+ control = ClientForm.SubmitControl( 'SubmitControl', '___refresh_grouping___', {'name': 'refresh_grouping'} )
control.add_to_form( f )
control.fixup()
# Submit for refresh
tc.submit( '___refresh_grouping___' )
+
def visit_page( self, page ):
# tc.go("./%s" % page)
if not page.startswith( "/" ):
- page = "/" + page
+ page = "/" + page
tc.go( self.url + page )
tc.code( 200 )
@@ -1322,7 +1359,7 @@
if repeat_name is not None:
repeat_button = '%s_add' % repeat_name
# Must click somewhere in tool_form, to disambiguate what form
- # is being targetted.
+ # is being targetted.
tc.browser.clicked( tc.browser.get_form( 'tool_form' ), None )
# Submit the "repeat" form button to add an input)
tc.submit( repeat_button )
@@ -1336,9 +1373,9 @@
galaxy_url = urllib.quote_plus( "%s/tool_runner/index?" % self.url )
self.visit_url( "http://genome.ucsc.edu/cgi-bin/hgTables?GALAXY_URL=%s&hgta_compressType=non…" % ( galaxy_url, tool_id, track_string ) )
tc.fv( "mainForm", "hgta_doTopSubmit", "get output" )
- self.submit_form( button="get output" )#, **track_params )
+ self.submit_form( button="get output" )
tc.fv( 2, "hgta_doGalaxyQuery", "Send query to Galaxy" )
- self.submit_form( button="Send query to Galaxy" )#, **output_params ) #AssertionError: Attempting to set field 'fbQual' to value '['whole']' in form 'None' threw exception: no matching forms! control: <RadioControl(fbQual=[whole, upstreamAll, endAll])>
+ self.submit_form( button="Send query to Galaxy" )
def get_running_datasets( self ):
self.visit_url( '/api/histories' )
@@ -1363,7 +1400,7 @@
slept += sleep_amount
sleep_amount *= 2
if slept + sleep_amount > maxseconds:
- sleep_amount = maxseconds - slept # don't overshoot maxseconds
+ sleep_amount = maxseconds - slept # don't overshoot maxseconds
else:
break
assert slept < maxseconds
@@ -1373,7 +1410,7 @@
def create_new_account_as_admin( self, email='test4(a)bx.psu.edu', password='testuser',
username='regular-user4', redirect='' ):
"""Create a new account for another user"""
- # HACK: don't use panels because late_javascripts() messes up the twill browser and it
+ # HACK: don't use panels because late_javascripts() messes up the twill browser and it
# can't find form fields (and hence user can't be logged in).
self.visit_url( "%s/user/create?cntrller=admin" % self.url )
self.submit_form( 1, 'create_user_button', email=email, redirect=redirect, password=password, confirm=password, username=username )
@@ -1409,6 +1446,7 @@
tc.submit( "reset_user_password_button" )
self.check_page_for_string( "Passwords reset for 1 user." )
self.home()
+
def mark_user_deleted( self, user_id, email='' ):
"""Mark a user as deleted"""
self.home()
@@ -1416,6 +1454,7 @@
check_str = "Deleted 1 users"
self.check_page_for_string( check_str )
self.home()
+
def undelete_user( self, user_id, email='' ):
"""Undelete a user"""
self.home()
@@ -1423,6 +1462,7 @@
check_str = "Undeleted 1 users"
self.check_page_for_string( check_str )
self.home()
+
def purge_user( self, user_id, email ):
"""Purge a user account"""
self.home()
@@ -1430,6 +1470,7 @@
check_str = "Purged 1 users"
self.check_page_for_string( check_str )
self.home()
+
def manage_roles_and_groups_for_user( self, user_id, in_role_ids=[], out_role_ids=[],
in_group_ids=[], out_group_ids=[], strings_displayed=[] ):
self.home()
@@ -1448,12 +1489,13 @@
for check_str in strings_displayed:
self.check_page_for_string( check_str )
self.home()
+ # Tests associated with roles
- # Tests associated with roles
def browse_roles( self, strings_displayed=[] ):
self.visit_url( '%s/admin/roles' % self.url )
for check_str in strings_displayed:
self.check_page_for_string( check_str )
+
def create_role( self,
name='Role One',
description="This is Role One",
@@ -1488,6 +1530,7 @@
self.visit_url( "%s/admin/roles" % self.url )
self.check_page_for_string( name )
self.home()
+
def rename_role( self, role_id, name='Role One Renamed', description='This is Role One Re-described' ):
"""Rename a role"""
self.home()
@@ -1497,6 +1540,7 @@
tc.fv( "1", "description", description )
tc.submit( "rename_role_button" )
self.home()
+
def mark_role_deleted( self, role_id, role_name ):
"""Mark a role as deleted"""
self.home()
@@ -1504,6 +1548,7 @@
check_str = "Deleted 1 roles: %s" % role_name
self.check_page_for_string( check_str )
self.home()
+
def undelete_role( self, role_id, role_name ):
"""Undelete an existing role"""
self.home()
@@ -1511,6 +1556,7 @@
check_str = "Undeleted 1 roles: %s" % role_name
self.check_page_for_string( check_str )
self.home()
+
def purge_role( self, role_id, role_name ):
"""Purge an existing role"""
self.home()
@@ -1518,6 +1564,7 @@
check_str = "Purged 1 roles: %s" % role_name
self.check_page_for_string( check_str )
self.home()
+
def associate_users_and_groups_with_role( self, role_id, role_name, user_ids=[], group_ids=[] ):
self.home()
url = "%s/admin/role?id=%s&role_members_edit_button=Save" % ( self.url, role_id )
@@ -1548,10 +1595,12 @@
self.visit_url( "%s/admin/groups" % self.url )
self.check_page_for_string( name )
self.home()
+
def browse_groups( self, strings_displayed=[] ):
self.visit_url( '%s/admin/groups' % self.url )
for check_str in strings_displayed:
self.check_page_for_string( check_str )
+
def rename_group( self, group_id, name='Group One Renamed' ):
"""Rename a group"""
self.home()
@@ -1560,6 +1609,7 @@
tc.fv( "1", "name", name )
tc.submit( "rename_group_button" )
self.home()
+
def associate_users_and_roles_with_group( self, group_id, group_name, user_ids=[], role_ids=[] ):
self.home()
url = "%s/admin/manage_users_and_roles_for_group?id=%s&group_roles_users_edit_button=Save" % ( self.url, group_id )
@@ -1571,6 +1621,7 @@
check_str = "Group '%s' has been updated with %d associated roles and %d associated users" % ( group_name, len( role_ids ), len( user_ids ) )
self.check_page_for_string( check_str )
self.home()
+
def mark_group_deleted( self, group_id, group_name ):
"""Mark a group as deleted"""
self.home()
@@ -1578,6 +1629,7 @@
check_str = "Deleted 1 groups: %s" % group_name
self.check_page_for_string( check_str )
self.home()
+
def undelete_group( self, group_id, group_name ):
"""Undelete an existing group"""
self.home()
@@ -1585,6 +1637,7 @@
check_str = "Undeleted 1 groups: %s" % group_name
self.check_page_for_string( check_str )
self.home()
+
def purge_group( self, group_id, group_name ):
"""Purge an existing group"""
self.home()
@@ -1628,7 +1681,7 @@
if num_options == 0:
# Default to 2 options
num_options = 2
- for index2 in range( 1, num_options+1 ):
+ for index2 in range( 1, num_options + 1 ):
tc.submit( "addoption_0" )
# Add contents to the new options fields
for index2 in range( num_options ):
@@ -1644,6 +1697,7 @@
for check_str in strings_displayed_after_submit:
self.check_page_for_string( check_str )
self.home()
+
def edit_form( self, id, form_type='', new_form_name='', new_form_desc='', field_dicts=[], field_index=0,
strings_displayed=[], strings_not_displayed=[], strings_displayed_after_submit=[] ):
"""Edit form details; name and description"""
@@ -1771,10 +1825,10 @@
url = "request_type/request_type_permissions?id=%s&update_roles_button=Save" % ( request_type_id )
for po in permissions_out:
key = '%s_out' % po
- url ="%s&%s=%s" % ( url, key, role_ids_str )
+ url = "%s&%s=%s" % ( url, key, role_ids_str )
for pi in permissions_in:
key = '%s_in' % pi
- url ="%s&%s=%s" % ( url, key, role_ids_str )
+ url = "%s&%s=%s" % ( url, key, role_ids_str )
self.home()
self.visit_url( "%s/%s" % ( self.url, url ) )
check_str = "Permissions updated for request type '%s'" % request_type_name
@@ -2080,7 +2134,7 @@
for check_str in strings_not_displayed:
try:
self.check_page_for_string( check_str )
- raise AssertionError, "String (%s) incorrectly displayed when browing library." % check_str
+ raise AssertionError( "String (%s) incorrectly displayed when browing library." % check_str )
except:
pass
def browse_libraries_regular_user( self, strings_displayed=[], strings_not_displayed=[] ):
@@ -2090,7 +2144,7 @@
for check_str in strings_not_displayed:
try:
self.check_page_for_string( check_str )
- raise AssertionError, "String (%s) incorrectly displayed when browing library." % check_str
+ raise AssertionError( "String (%s) incorrectly displayed when browing library." % check_str )
except:
pass
def browse_library( self, cntrller, library_id, show_deleted=False, strings_displayed=[], strings_not_displayed=[] ):
@@ -2100,7 +2154,7 @@
for check_str in strings_not_displayed:
try:
self.check_page_for_string( check_str )
- raise AssertionError, "String (%s) incorrectly displayed when browing library." % check_str
+ raise AssertionError( "String (%s) incorrectly displayed when browing library." % check_str )
except:
pass
def create_library( self, name='Library One', description='This is Library One', synopsis='Synopsis for Library One' ):
@@ -2156,10 +2210,10 @@
url = "library_common/library_permissions?id=%s&cntrller=%s&update_roles_button=Save" % ( library_id, cntrller )
for po in permissions_out:
key = '%s_out' % po
- url ="%s&%s=%s" % ( url, key, role_ids_str )
+ url = "%s&%s=%s" % ( url, key, role_ids_str )
for pi in permissions_in:
key = '%s_in' % pi
- url ="%s&%s=%s" % ( url, key, role_ids_str )
+ url = "%s&%s=%s" % ( url, key, role_ids_str )
self.home()
self.visit_url( "%s/%s" % ( self.url, url ) )
check_str = "Permissions updated for library '%s'." % library_name
@@ -2208,7 +2262,7 @@
for check_str in strings_not_displayed:
try:
self.check_page_for_string( check_str )
- raise AssertionError, "String (%s) incorrectly displayed." % check_str
+ raise AssertionError( "String (%s) incorrectly displayed." % check_str )
except:
pass
if template_refresh_field_contents:
@@ -2227,7 +2281,7 @@
for check_str in strings_not_displayed_after_submit:
try:
self.check_page_for_string( check_str )
- raise AssertionError, "String (%s) incorrectly displayed." % check_str
+ raise AssertionError( "String (%s) incorrectly displayed." % check_str )
except:
pass
self.home()
@@ -2301,10 +2355,10 @@
( self.url, cntrller, library_id, folder_id, id )
for po in permissions_out:
key = '%s_out' % po
- url ="%s&%s=%s" % ( url, key, role_ids_str )
+ url = "%s&%s=%s" % ( url, key, role_ids_str )
for pi in permissions_in:
key = '%s_in' % pi
- url ="%s&%s=%s" % ( url, key, role_ids_str )
+ url = "%s&%s=%s" % ( url, key, role_ids_str )
if permissions_in or permissions_out:
url += "&update_roles_button=Save"
self.visit_url( url )
@@ -2322,7 +2376,7 @@
for check_str in strings_not_displayed:
try:
self.check_page_for_string( check_str )
- raise AssertionError, "String (%s) should not have been displayed on ldda info page." % check_str
+ raise AssertionError( "String (%s) should not have been displayed on ldda info page." % check_str )
except:
pass
self.home()
@@ -2354,7 +2408,7 @@
for check_str in strings_not_displayed:
try:
self.check_page_for_string( check_str )
- raise AssertionError, "String (%s) should not have been displayed on ldda Edit Attributes page." % check_str
+ raise AssertionError( "String (%s) should not have been displayed on ldda Edit Attributes page." % check_str )
except:
pass
self.home()
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: Dave Bouvier: Remove reference to undefined variable.
by commits-noreply@bitbucket.org 14 Nov '13
by commits-noreply@bitbucket.org 14 Nov '13
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/a231103b3d6e/
Changeset: a231103b3d6e
User: Dave Bouvier
Date: 2013-11-15 00:43:58
Summary: Remove reference to undefined variable.
Affected #: 1 file
diff -r 51d3b2de1ed31ad118e939c360f82362d1c10927 -r a231103b3d6e14578a226f9f6d5a3b68c0896854 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -696,8 +696,6 @@
if os.path.exists( tool_dependency_install_path ):
log.debug( 'Uninstallation of tool dependency succeeded, but the installation path still exists on the filesystem. It is now being explicitly deleted.')
suc.remove_dir( tool_dependency_install_path )
- return success
-
def main():
# ---- Configuration ------------------------------------------------------
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: Dave Bouvier: Remove a return that was added while debugging.
by commits-noreply@bitbucket.org 14 Nov '13
by commits-noreply@bitbucket.org 14 Nov '13
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/51d3b2de1ed3/
Changeset: 51d3b2de1ed3
User: Dave Bouvier
Date: 2013-11-14 23:50:54
Summary: Remove a return that was added while debugging.
Affected #: 1 file
diff -r fbbc387df15a317f0e1df50ba8e76cec2750a66a -r 51d3b2de1ed31ad118e939c360f82362d1c10927 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -580,7 +580,6 @@
'''
Update the repository metadata tool_test_results and appropriate flags using the API.
'''
- return {}
params[ 'tool_test_results' ] = test_results_dict
if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ:
return {}
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/fbbc387df15a/
Changeset: fbbc387df15a
User: Dave Bouvier
Date: 2013-11-14 23:37:27
Summary: Fix broken import.
Affected #: 1 file
diff -r 255958e985f9a2e4942a439a08a1dfe3afd9d439 -r fbbc387df15a317f0e1df50ba8e76cec2750a66a test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -13,7 +13,6 @@
import random
import re
import shutil
-import simplejson
import socket
import string
import sys
@@ -54,6 +53,7 @@
eggs.require( "Cheetah" )
eggs.require( "simplejson" )
+import simplejson
import twill
# This should not be required, but it is under certain conditions, thanks to this bug: http://code.google.com/p/python-nose/issues/detail?id=284
@@ -1348,4 +1348,4 @@
print "####################################################################################"
print "# %s - running repository installation and testing script." % now
print "####################################################################################"
- sys.exit( main() )
\ No newline at end of file
+ sys.exit( main() )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: Dave Bouvier: Correct wrong method call.
by commits-noreply@bitbucket.org 14 Nov '13
by commits-noreply@bitbucket.org 14 Nov '13
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/255958e985f9/
Changeset: 255958e985f9
User: Dave Bouvier
Date: 2013-11-14 23:00:11
Summary: Correct wrong method call.
Affected #: 1 file
diff -r cfeb9bd79fe11f6db6600e4c3e28c106e7837a19 -r 255958e985f9a2e4942a439a08a1dfe3afd9d439 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -367,7 +367,7 @@
for repository_dependency in repository.repository_dependencies:
if not repository_dependency.includes_tool_dependencies:
continue
- missing_tool_dependencies.extend( get_failed_tool_dependencies( repository_dependency ) )
+ missing_tool_dependencies.extend( get_missing_tool_dependencies( repository_dependency ) )
return missing_tool_dependencies
def get_repository_info_from_api( url, repository_info_dict ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: Dave Bouvier: Substantial refactoring of the install and test framework to address a number of weaknesses.
by commits-noreply@bitbucket.org 14 Nov '13
by commits-noreply@bitbucket.org 14 Nov '13
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/cfeb9bd79fe1/
Changeset: cfeb9bd79fe1
User: Dave Bouvier
Date: 2013-11-14 22:52:06
Summary: Substantial refactoring of the install and test framework to address a number of weaknesses.
Affected #: 4 files
diff -r ca7110d331571b4a3bbfdd75037053ea4c40b746 -r cfeb9bd79fe11f6db6600e4c3e28c106e7837a19 test/install_and_test_tool_shed_repositories/base/test_db_util.py
--- a/test/install_and_test_tool_shed_repositories/base/test_db_util.py
+++ b/test/install_and_test_tool_shed_repositories/base/test_db_util.py
@@ -30,6 +30,12 @@
model.ToolShedRepository.table.c.installed_changeset_revision == changeset_revision ) ) \
.one()
+def get_missing_tool_dependencies( repository ):
+ return sa_session.query( model.ToolDependency ) \
+ .filter( and_( model.ToolDependency.table.c.tool_shed_repository_id == repository_id,
+ model.ToolDependency.table.c.status != model.ToolDependency.installation_status.INSTALLED ) ) \
+ .all()
+
def get_private_role( user ):
for role in user.all_roles():
if role.name == user.email and role.description == 'Private Role for %s' % user.email:
diff -r ca7110d331571b4a3bbfdd75037053ea4c40b746 -r cfeb9bd79fe11f6db6600e4c3e28c106e7837a19 test/install_and_test_tool_shed_repositories/base/twilltestcase.py
--- a/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
+++ b/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
@@ -61,8 +61,7 @@
changeset_revision = repository_info_dict[ 'changeset_revision' ]
encoded_repository_id = repository_info_dict[ 'repository_id' ]
tool_shed_url = repository_info_dict[ 'tool_shed_url' ]
- preview_params = urllib.urlencode( dict( repository_id=encoded_repository_id, changeset_revision=changeset_revision ) )
- self.visit_url( '%s/repository/preview_tools_in_changeset?%s' % ( tool_shed_url, preview_params ) )
+ # Pass galaxy_url to the tool shed in order to set cookies and redirects correctly.
install_params = urllib.urlencode( dict( repository_ids=encoded_repository_id,
changeset_revisions=changeset_revision,
galaxy_url=self.url ) )
@@ -136,35 +135,17 @@
break
time.sleep( 1 )
- def uninstall_repository( self, installed_repository, deactivate_only=False ):
+ def deactivate_or_uninstall_repository( self, installed_repository, deactivate=False ):
url = '/admin_toolshed/deactivate_or_uninstall_repository?id=%s' % self.security.encode_id( installed_repository.id )
self.visit_url( url )
- if deactivate_only:
+ if deactivate:
tc.fv ( 1, "remove_from_disk", 'false' )
else:
tc.fv ( 1, "remove_from_disk", 'true' )
tc.submit( 'deactivate_or_uninstall_repository_button' )
strings_displayed = [ 'The repository named' ]
- if deactivate_only:
+ if deactivate:
strings_displayed.append( 'has been deactivated' )
else:
strings_displayed.append( 'has been uninstalled' )
self.check_for_strings( strings_displayed, strings_not_displayed=[] )
- # Get all tool dependencies that are not in an installed state and uninstall them explicitly, so that the next installation attempt
- # may succeed.
- installed_state = model.ToolDependency.installation_status.INSTALLED
- tool_dependencies = test_db_util.get_tool_dependencies_for_installed_repository( installed_repository.id, exclude_status=installed_state )
- if len( tool_dependencies ) > 0:
- encoded_tool_dependency_ids = [ self.security.encode_id( tool_dependency.id ) for tool_dependency in tool_dependencies ]
- self.uninstall_tool_dependencies( self.security.encode_id( installed_repository.id ), encoded_tool_dependency_ids )
-
- def uninstall_tool_dependencies( self, encoded_repository_id, encoded_tool_dependency_ids ):
- tool_dependency_ids = ','.join( encoded_tool_dependency_ids )
- url = '/admin_toolshed/uninstall_tool_dependencies?repository_id=%s&inst_td_ids=%s&operation=uninstall' % \
- ( encoded_repository_id, tool_dependency_ids )
- self.visit_url( url )
- html = self.last_page()
- if 'uninstall_tool_dependencies' in html:
- tc.fv( 'uninstall_tool_dependencies', 'tool_dependency_ids', tool_dependency_ids )
- tc.submit( 'uninstall_tool_dependencies_button' )
-
\ No newline at end of file
diff -r ca7110d331571b4a3bbfdd75037053ea4c40b746 -r cfeb9bd79fe11f6db6600e4c3e28c106e7837a19 test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py
--- a/test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py
+++ b/test/install_and_test_tool_shed_repositories/functional/test_install_repositories.py
@@ -16,7 +16,7 @@
# Install the repository through the web interface using twill.
self.install_repository( repository_info_dict )
- def do_uninstallation( self, repository_info_dict, deactivate_only=False ):
+ def do_deactivate_or_uninstall( self, repository_info_dict, deactivate=False ):
self.logout()
self.login( email='test(a)bx.psu.edu', username='test' )
admin_user = test_db_util.get_user( 'test(a)bx.psu.edu' )
@@ -27,7 +27,7 @@
repository_info_dict[ 'changeset_revision' ] )
admin_user_private_role = test_db_util.get_private_role( admin_user )
# Uninstall the repository through the web interface using twill.
- self.uninstall_repository( repository, deactivate_only )
+ self.deactivate_or_uninstall_repository( repository, deactivate )
def generate_install_method( repository_dict=None ):
"""Generate abstract test cases for the defined list of repositories."""
@@ -52,11 +52,11 @@
namespace[ 'install_repository_%s' % repository_dict[ 'name' ] ] = test_method
# The new.classobj function returns a new class object, with name name, derived
# from baseclasses (which should be a tuple of classes) and with namespace dict.
- new_class_obj = new.classobj( name, baseclasses, namespace )
+ new_class_obj = new.classobj( str( name ), baseclasses, namespace )
G[ name ] = new_class_obj
-def generate_uninstall_method( repository_dict=None, deactivate_only=False ):
- """Generate abstract test cases for the defined list of repositories."""
+def generate_deactivate_or_uninstall_method( repository_dict=None, deactivate=False ):
+ """Generate abstract test cases for the received repository_dict."""
if repository_dict is None:
return
# Push all the toolbox tests to module level
@@ -69,14 +69,15 @@
name = "TestUninstallRepository_%s_%s" % ( repository_dict[ 'name' ], repository_dict[ 'changeset_revision' ] )
baseclasses = ( InstallTestRepositories, )
namespace = dict()
- def make_uninstall_method( repository_dict ):
+ def make_deactivate_or_uninstall_method( repository_dict ):
def test_install_repository( self ):
- self.do_uninstallation( repository_dict, deactivate_only )
+ self.do_deactivate_or_uninstall( repository_dict, deactivate )
return test_install_repository
- test_method = make_uninstall_method( repository_dict )
- test_method.__doc__ = "Uninstall the repository %s." % repository_dict[ 'name' ]
- namespace[ 'uninstall_repository_%s_%s' % ( repository_dict[ 'name' ], repository_dict[ 'changeset_revision' ] ) ] = test_method
+ test_method = make_deactivate_or_uninstall_method( repository_dict )
+ test_method.__doc__ = "Deactivate or uninstall the repository %s." % repository_dict[ 'name' ]
+ namespace[ 'uninstall_repository_%s_%s' % ( str( repository_dict[ 'name' ] ), repository_dict[ 'changeset_revision' ] ) ] = test_method
# The new.classobj function returns a new class object, with name name, derived
# from baseclasses (which should be a tuple of classes) and with namespace dict.
- new_class_obj = new.classobj( name, baseclasses, namespace )
- G[ name ] = new_class_obj
\ No newline at end of file
+ new_class_obj = new.classobj( str( name ), baseclasses, namespace )
+ G[ name ] = new_class_obj
+
diff -r ca7110d331571b4a3bbfdd75037053ea4c40b746 -r cfeb9bd79fe11f6db6600e4c3e28c106e7837a19 test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -4,7 +4,25 @@
# order to run functional tests on repository tools after installation. The install_and_test_tool_shed_repositories.sh
# will execute this script with the appropriate parameters.
-import os, sys, shutil, tempfile, re, string, urllib, platform
+import atexit
+import httplib
+import logging
+import os
+import os.path
+import platform
+import random
+import re
+import shutil
+import simplejson
+import socket
+import string
+import sys
+import tempfile
+import time
+import threading
+import unittest
+import urllib
+import urllib2
from time import strftime
# Assume we are run from the galaxy root directory, add lib to the python path
@@ -36,6 +54,8 @@
eggs.require( "Cheetah" )
eggs.require( "simplejson" )
+import twill
+
# This should not be required, but it is under certain conditions, thanks to this bug: http://code.google.com/p/python-nose/issues/detail?id=284
eggs.require( "pysqlite" )
@@ -43,20 +63,17 @@
import install_and_test_tool_shed_repositories.base.test_db_util as test_db_util
import functional.test_toolbox as test_toolbox
-import atexit, logging, os, os.path, sys, tempfile, simplejson
-import twill, unittest, time
-import sys, threading, random
-import httplib, socket
from paste import httpserver
# This is for the galaxy application.
import galaxy.app
from galaxy.app import UniverseApplication
from galaxy.web import buildapp
-from galaxy.util import parse_xml
+from galaxy.util import parse_xml, asbool
from galaxy.util.json import from_json_string, to_json_string
-from tool_shed.util.shed_util_common import url_join
+import tool_shed.util.shed_util_common as suc
+from tool_shed.util import tool_dependency_util
import nose.core
import nose.config
@@ -243,27 +260,29 @@
return passed_tests
return []
-def execute_uninstall_method( app, deactivate_only=False ):
- # Clean out any generated tests.
+def deactivate_repository( app, repository_info_dict ):
+ # Clean out any generated tests. This is necessary for Twill.
remove_generated_tests( app )
sa_session = app.model.context.current
- repositories_to_uninstall = sa_session.query( app.model.ToolShedRepository ).all()
- for repository in repositories_to_uninstall:
- if repository.status in [ app.model.ToolShedRepository.installation_status.UNINSTALLED,
- app.model.ToolShedRepository.installation_status.DEACTIVATED ]:
- continue
- if repository.status not in [ app.model.ToolShedRepository.installation_status.ERROR,
- app.model.ToolShedRepository.installation_status.INSTALLED ]:
- repository.status = app.model.ToolShedRepository.installation_status.ERROR
- sa_session.add( repository )
- sa_session.flush()
- name = str( repository.name )
- owner = str( repository.owner )
- changeset_revision = str( repository.installed_changeset_revision )
- log.debug( 'Changeset revision %s of %s repository %s queued for uninstallation.', changeset_revision, repository.status, name )
- repository_dict = dict( name=name, owner=owner, changeset_revision=changeset_revision )
+ # The dict contains the only repository the app should have installed at this point.
+ repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( repository_info_dict[ 'name' ],
+ repository_info_dict[ 'owner' ],
+ repository_info_dict[ 'changeset_revision' ] )
+ # We have to do this through Twill, in order to maintain app.toolbox and shed_tool_conf.xml in a state that is valid for future tests.
+ for required_repository in repository.repository_dependencies:
+ repository_dict = dict( name=required_repository.name,
+ owner=required_repository.owner,
+ changeset_revision=required_repository.changeset_revision )
# Generate a test method to uninstall this repository through the embedded Galaxy application's web interface.
- test_install_repositories.generate_uninstall_method( repository_dict, deactivate_only )
+ test_install_repositories.generate_deactivate_or_uninstall_method( repository_dict, deactivate=True )
+ log.debug( 'Changeset revision %s of %s repository %s selected for deactivation.' % \
+ ( required_repository.changeset_revision, required_repository.status, required_repository.name ) )
+ repository_dict = dict( name=repository.name,
+ owner=repository.owner,
+ changeset_revision=repository.changeset_revision )
+ test_install_repositories.generate_deactivate_or_uninstall_method( repository_dict, deactivate=True )
+ log.debug( 'Changeset revision %s of %s repository %s selected for deactivation.' % \
+ ( repository.changeset_revision, repository.status, repository.name ) )
# Set up nose to run the generated uninstall method as a functional test.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
@@ -324,7 +343,7 @@
parts.insert( 0, 'api' )
elif 'api' not in parts:
parts.insert( 0, 'api' )
- url = url_join( base, *parts )
+ url = suc.url_join( base, *parts )
if key:
url += '?%s' % urllib.urlencode( dict( key=key ) )
else:
@@ -333,14 +352,6 @@
url += '&%s' % params
return url
-def get_failed_tool_dependencies( repository ):
- missing_dependencies = repository.missing_tool_dependencies
- for repository_dependency in repository.repository_dependencies:
- if not repository_dependency.includes_tool_dependencies:
- continue
- missing_dependencies.extend( get_failed_tool_dependencies( repository_dependency ) )
- return missing_dependencies
-
def get_latest_downloadable_changeset_revision( url, name, owner ):
api_url_parts = [ 'api', 'repositories', 'get_ordered_installable_revisions' ]
params = urllib.urlencode( dict( name=name, owner=owner ) )
@@ -351,6 +362,14 @@
else:
return '000000000000'
+def get_missing_tool_dependencies( repository ):
+ missing_tool_dependencies = repository.missing_tool_dependencies
+ for repository_dependency in repository.repository_dependencies:
+ if not repository_dependency.includes_tool_dependencies:
+ continue
+ missing_tool_dependencies.extend( get_failed_tool_dependencies( repository_dependency ) )
+ return missing_tool_dependencies
+
def get_repository_info_from_api( url, repository_info_dict ):
parts = [ 'api', 'repositories', repository_info_dict[ 'repository_id' ] ]
api_url = get_api_url( base=url, parts=parts )
@@ -459,6 +478,37 @@
return dict()
return tool_test_results
+def install_repository( repository_info_dict ):
+ """
+ The repository_info_dict looks something like:
+ {
+ "changeset_revision": "13fa22a258b5",
+ "contents_url": "/api/repositories/529fd61ab1c6cc36/contents",
+ "deleted": false,
+ "deprecated": false,
+ "description": "Convert column case.",
+ "downloadable": true,
+ "id": "529fd61ab1c6cc36",
+ "long_description": "This tool takes the specified columns and converts them to uppercase or lowercase.",
+ "malicious": false,
+ "name": "change_case",
+ "owner": "test",
+ "private": false,
+ "repository_id": "529fd61ab1c6cc36",
+ "times_downloaded": 0,
+ "tool_shed_url": "http://toolshed.local:10001",
+ "url": "/api/repository_revisions/529fd61ab1c6cc36",
+ "user_id": "529fd61ab1c6cc36"
+ }
+ """
+ data[ 'tool_shed_url' ] = repository_info_dict[ 'tool_shed_url' ]
+ data[ 'name' ] = repository_info_dict[ 'name' ]
+ data[ 'owner' ] = repository_info_dict[ 'owner' ]
+ data[ 'changeset_revision' ] = repository_info_dict[ 'changeset_revision' ]
+ data[ 'install_repository_dependencies' ] = True
+ data[ 'install_tool_dependencies' ] = True
+ submit( options.api, '%s%s' % ( galaxy_url.strip( '/' ), '/api/tool_shed_repositories/new/install_repository_revision' ), data )
+
def is_latest_downloadable_revision( url, repository_info_dict ):
latest_revision = get_latest_downloadable_changeset_revision( url, name=repository_info_dict[ 'name' ], owner=repository_info_dict[ 'owner' ] )
return str( repository_info_dict[ 'changeset_revision' ] ) == str( latest_revision )
@@ -530,11 +580,12 @@
'''
Update the repository metadata tool_test_results and appropriate flags using the API.
'''
+ return {}
params[ 'tool_test_results' ] = test_results_dict
if '-info_only' in sys.argv or 'GALAXY_INSTALL_TEST_INFO_ONLY' in os.environ:
return {}
else:
- return update( tool_shed_api_key, '%s' % ( url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False )
+ return update( tool_shed_api_key, '%s' % ( suc.url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False )
def remove_generated_tests( app ):
# Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them
@@ -558,6 +609,21 @@
for tool in tools_to_delete:
del app.toolbox.tools_by_id[ tool ]
+def remove_install_tests( app ):
+ # Delete any configured repository installation tests from the test_toolbox.__dict__, otherwise nose will find them
+ # and try to install the repository again while running tool functional tests.
+ tests_to_delete = []
+ global test_toolbox
+ # Push all the toolbox tests to module level
+ for key in test_install_repositories.__dict__:
+ if key.startswith( 'TestInstallRepository_' ):
+ log.info( 'Repository installation process found, deleting: %s', key )
+ # We can't delete this test just yet, we're still iterating over __dict__.
+ tests_to_delete.append( key )
+ for key in tests_to_delete:
+ # Now delete the tests found in the previous loop.
+ del test_install_repositories.__dict__[ key ]
+
def run_tests( test_config ):
loader = nose.loader.TestLoader( config=test_config )
test_config.plugins.addPlugin( ReportResults() )
@@ -585,6 +651,55 @@
for repository in repositories_by_owner[ owner ]:
print "# %s owned by %s, changeset revision %s" % ( repository[ 'name' ], repository[ 'owner' ], repository[ 'changeset_revision' ] )
+def uninstall_repository( app, repository_info_dict ):
+ # Clean out any generated tests. This is necessary for Twill.
+ remove_generated_tests( app )
+ sa_session = app.model.context.current
+ # The dict contains the only repository the app should have installed at this point.
+ repository = test_db_util.get_installed_repository_by_name_owner_changeset_revision( repository_info_dict[ 'name' ],
+ repository_info_dict[ 'owner' ],
+ repository_info_dict[ 'changeset_revision' ] )
+ # We have to do this through Twill, in order to maintain app.toolbox and shed_tool_conf.xml in a state that is valid for future tests.
+ for required_repository in repository.repository_dependencies:
+ repository_dict = dict( name=required_repository.name,
+ owner=required_repository.owner,
+ changeset_revision=required_repository.changeset_revision )
+ # Generate a test method to uninstall this repository through the embedded Galaxy application's web interface.
+ test_install_repositories.generate_deactivate_or_uninstall_method( repository_dict, deactivate=False )
+ log.debug( 'Changeset revision %s of %s repository %s selected for uninstallation.' % \
+ ( required_repository.changeset_revision, required_repository.status, required_repository.name ) )
+ repository_dict = dict( name=repository.name,
+ owner=repository.owner,
+ changeset_revision=repository.changeset_revision )
+ test_install_repositories.generate_deactivate_or_uninstall_method( repository_dict, deactivate=False )
+ log.debug( 'Changeset revision %s of %s repository %s selected for uninstallation.' % \
+ ( repository.changeset_revision, repository.status, repository.name ) )
+ # Set up nose to run the generated uninstall method as a functional test.
+ test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
+ test_config.configure( sys.argv )
+ # Run the uninstall method. This method uses the Galaxy web interface to uninstall the previously installed
+ # repository and delete it from disk.
+ result, _ = run_tests( test_config )
+ success = result.wasSuccessful()
+ return success
+
+def uninstall_tool_dependency( app, tool_dependency ):
+ # Clean out any generated tests. This is necessary for Twill.
+ tool_dependency_install_path = tool_dependency.installation_directory( app )
+ uninstalled, error_message = tool_dependency_util.remove_tool_dependency( app, tool_dependency )
+ if error_message:
+ log.debug( error_message )
+ sa_session = app.model.context.current
+ if not uninstalled or tool_dependency.status != app.model.ToolDependency.installation_status.UNINSTALLED:
+ tool_dependency.status = app.model.ToolDependency.installation_status.UNINSTALLED
+ sa_session.add( tool_dependency )
+ sa_session.flush()
+ if os.path.exists( tool_dependency_install_path ):
+ log.debug( 'Uninstallation of tool dependency succeeded, but the installation path still exists on the filesystem. It is now being explicitly deleted.')
+ suc.remove_dir( tool_dependency_install_path )
+ return success
+
+
def main():
# ---- Configuration ------------------------------------------------------
galaxy_test_host = os.environ.get( 'GALAXY_INSTALL_TEST_HOST', default_galaxy_test_host )
@@ -601,10 +716,7 @@
os.mkdir( galaxy_test_tmp_dir )
galaxy_test_proxy_port = None
# Allow the option to keep or delete tool dependencies when a repository has been tested.
- if 'GALAXY_INSTALL_TEST_KEEP_TOOL_DEPENDENCIES' in os.environ:
- deactivate_only = True
- else:
- deactivate_only = False
+ deactivate = asbool( os.environ.get( 'GALAXY_INSTALL_TEST_KEEP_TOOL_DEPENDENCIES', False ) )
# Set up the configuration files for the Galaxy instance.
shed_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_SHED_TOOL_DATA_TABLE_CONF', os.path.join( galaxy_test_tmp_dir, 'test_shed_tool_data_table_conf.xml' ) )
galaxy_tool_data_table_conf_file = os.environ.get( 'GALAXY_INSTALL_TEST_TOOL_DATA_TABLE_CONF', tool_data_table_conf )
@@ -766,10 +878,7 @@
else:
raise Exception( "Test HTTP server did not return '200 OK' after 10 tries" )
log.info( "Embedded galaxy web server started" )
- if galaxy_test_proxy_port:
- log.info( "The embedded Galaxy application is running on %s:%s", galaxy_test_host, galaxy_test_proxy_port )
- else:
- log.info( "The embedded Galaxy application is running on %s:%s", galaxy_test_host, galaxy_test_port )
+ log.info( "The embedded Galaxy application is running on %s:%s", galaxy_test_host, galaxy_test_port )
log.info( "Repositories will be installed from the tool shed at %s", galaxy_tool_shed_url )
success = False
# If a tool_data_table_conf.test.xml file was found, add the entries from it into the app's tool data tables.
@@ -889,6 +998,7 @@
# Use the repository information dict to generate an install method that will install the repository into the embedded
# Galaxy application, with tool dependencies and repository dependencies, if any.
test_install_repositories.generate_install_method( repository_info_dict )
+ # Set the GALAXY_INSTALL_TEST_HOST variable so that Twill will have the Galaxy url to install repositories into.
os.environ[ 'GALAXY_INSTALL_TEST_HOST' ] = galaxy_test_host
# Configure nose to run the install method as a test.
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
@@ -907,6 +1017,8 @@
# If the installation succeeds, configure and run functional tests for this repository. This is equivalent to
# sh run_functional_tests.sh -installed
if success:
+ # Clean out repository installation methods.
+ remove_install_tests( app )
log.debug( 'Installation of %s succeeded, running all defined functional tests.', name )
# Generate the shed_tools_dict that specifies the location of test data contained within this repository. If the repository
# does not have a test-data directory, this will return has_test_data = False, and we will set the do_not_test flag to True,
@@ -996,51 +1108,15 @@
# "reason": "The Galaxy development team has determined that this repository should not be installed and tested by the automated framework."
# }
# }
- failed_tool_dependencies = get_failed_tool_dependencies( repository )
- failed_repository_dependencies = repository.repository_dependencies_with_installation_errors
if 'missing_test_components' not in repository_status:
repository_status[ 'missing_test_components' ] = []
- if not has_test_data:
- # If the repository does not have a test-data directory, any functional tests in the tool configuration will
- # fail. Mark the repository as failed and skip installation.
- log.error( 'Test data is missing for this repository. Updating repository and skipping functional tests.' )
- # Record the lack of test data if the repository metadata defines tools.
- if 'tools' in repository.metadata:
- for tool in repository.metadata[ 'tools' ]:
- tool_id = tool[ 'id' ]
- tool_version = tool[ 'version' ]
- tool_guid = tool[ 'guid' ]
- # In keeping with the standard display layout, add the error message to the dict for each tool individually.
- missing_components = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid,
- missing_components="Repository %s is missing a test-data directory." % name )
- if missing_components not in repository_status[ 'missing_test_components' ]:
- repository_status[ 'missing_test_components' ].append( missing_components )
- else:
- continue
- # Record the status of this repository in the tool shed.
- set_do_not_test = not is_latest_downloadable_revision( galaxy_tool_shed_url, repository_info_dict )
- params[ 'tools_functionally_correct' ] = False
- params[ 'missing_test_components' ] = True
- params[ 'do_not_test' ] = str( set_do_not_test )
- register_test_result( galaxy_tool_shed_url,
- metadata_revision_id,
- repository_status,
- repository_info_dict,
- params )
- # Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
- # repository using Twill.
- execute_uninstall_method( app, deactivate_only )
- # Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
- test_toolbox.toolbox = app.toolbox
- repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
- elif failed_tool_dependencies or failed_repository_dependencies:
+ if repository.missing_tool_dependencies or repository.missing_repository_dependencies:
# If a tool dependency fails to install correctly, this should be considered an installation error,
# and functional tests should be skipped, since the tool dependency needs to be correctly installed
# for the test to be considered reliable.
- log.error( 'One or more dependencies of this repository are marked as missing.' )
- log.error( 'Updating repository and skipping functional tests.' )
+ log.error( 'The following dependencies of this repository are missing, skipping functional tests.' )
# In keeping with the standard display layout, add the error message to the dict for each tool individually.
- for dependency in failed_tool_dependencies:
+ for dependency in repository.missing_tool_dependencies:
log.error( 'Missing tool dependency %s of type %s version %s: %s' % \
( str( dependency.name ), str( dependency.type ), str( dependency.version ), str( dependency.error_message ) ) )
test_result = dict( type=dependency.type,
@@ -1048,7 +1124,7 @@
version=dependency.version,
error_message=dependency.error_message )
repository_status[ 'installation_errors' ][ 'tool_dependencies' ].append( test_result )
- for dependency in repository.repository_dependencies_with_installation_errors:
+ for dependency in repository.missing_repository_dependencies:
log.error( 'Missing repository dependency %s changeset revision %s owned by %s: %s' % \
( str( dependency.name ), str( dependency.changeset_revision ), str( dependency.owner ), str( dependency.error_message ) ) )
test_result = dict( tool_shed=dependency.tool_shed,
@@ -1066,11 +1142,22 @@
repository_status,
repository_info_dict,
params )
- # Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
- # repository using Twill. If tool dependencies failed installation, select to uninstall instead of deavctivate,
- # to make way for the next attempt. Otherwise, default to the value determined by the environment variable
- # GALAXY_INSTALL_TEST_KEEP_TOOL_DEPENDENCIES.
- execute_uninstall_method( app, deactivate_only=deactivate_only )
+ # Since this repository is missing components, we do not want to test it. Deactivate this repository.
+ # The deactivate flag is set to True if the environment variable GALAXY_INSTALL_TEST_KEEP_TOOL_DEPENDENCIES
+ # is set to 'true'.
+ if deactivate:
+ # Recursively retrieve every missing tool dependency for this repository and its required repositories.
+ log.debug( 'Due to the above missing tool dependencies, we are now uninstalling the following tool dependencies, but not changing their repositories.' )
+ missing_tool_dependencies = get_missing_tool_dependencies( repository )
+ for missing_tool_dependency in missing_tool_dependencies:
+ uninstall_tool_dependency( app, missing_tool_dependency )
+ # We are deactivating this repository and all of its repository dependencies.
+ log.debug( 'Due to the above missing repository dependencies, we are now deactivating the following repositories.' )
+ deactivate_repository( app, repository_info_dict )
+ else:
+ # We are uninstalling this repository and all of its repository dependencies.
+ log.debug( 'Due to the above missing repository dependencies, we are now uninstalling the following repositories.' )
+ uninstall_repository( app, repository_info_dict )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
test_toolbox.toolbox = app.toolbox
repositories_failed_install.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
@@ -1142,10 +1229,18 @@
changeset_revision, name )
# Run the uninstall method. This removes tool functional test methods from the test_toolbox module and uninstalls the
# repository using Twill.
- log.debug( 'Uninstalling changeset revision %s of repository %s',
- repository_info_dict[ 'changeset_revision' ],
- repository_info_dict[ 'name' ] )
- success = execute_uninstall_method( app, deactivate_only )
+ if deactivate:
+ log.debug( 'Deactivating changeset revision %s of repository %s',
+ repository_info_dict[ 'changeset_revision' ],
+ repository_info_dict[ 'name' ] )
+ # We are deactivating this repository and all of its repository dependencies.
+ success = deactivate_repository( app, repository_info_dict )
+ else:
+ log.debug( 'Uninstalling changeset revision %s of repository %s',
+ repository_info_dict[ 'changeset_revision' ],
+ repository_info_dict[ 'name' ] )
+ # We are uninstalling this repository and all of its repository dependencies.
+ success = uninstall_repository( app, repository_info_dict )
if not success:
log.error( 'Repository %s failed to uninstall.', repository_info_dict[ 'name' ] )
# Set the test_toolbox.toolbox module-level variable to the new app.toolbox.
@@ -1172,9 +1267,14 @@
repository_info_dict,
params )
try:
- success = execute_uninstall_method( app, deactivate_only )
+ if deactivate:
+ # We are deactivating this repository and all of its repository dependencies.
+ success = deactivate_repository( app, repository_info_dict )
+ else:
+ # We are uninstalling this repository and all of its repository dependencies.
+ success = uninstall_repository( app, repository_info_dict )
except:
- log.exception( 'Encountered error attempting to uninstall %s.', repository_info_dict[ 'name' ] )
+ log.exception( 'Encountered error attempting to deactivate or uninstall %s.', repository_info_dict[ 'name' ] )
success = False
if not success:
log.error( 'Repository %s failed to uninstall.', repository_info_dict[ 'name' ] )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/ca7110d33157/
Changeset: ca7110d33157
User: Dave Bouvier
Date: 2013-11-14 22:31:34
Summary: Rebuild the list of sniffer elem classes after each one is removed from sniffer_elems, in order to keep the mapping current.
Affected #: 1 file
diff -r 51105146f487822cb1d38ba3396882d610e0282e -r ca7110d331571b4a3bbfdd75037053ea4c40b746 lib/galaxy/datatypes/registry.py
--- a/lib/galaxy/datatypes/registry.py
+++ b/lib/galaxy/datatypes/registry.py
@@ -358,6 +358,7 @@
for index, s_e_c in enumerate( sniffer_elem_classes ):
if sniffer_class == s_e_c:
del self.sniffer_elems[ index ]
+ sniffer_elem_classes = [ e.attrib[ 'type' ] for e in self.sniffer_elems ]
self.log.debug( "Removed sniffer element for datatype '%s'" % str( dtype ) )
break
for sniffer_class in self.sniff_order:
https://bitbucket.org/galaxy/galaxy-central/commits/093626ae621f/
Changeset: 093626ae621f
Branch: stable
User: greg
Date: 2013-11-14 21:51:24
Summary: Display an error message rather than raising an exception when handling invalid complex repository dependencies in the Tool Shed.
Affected #: 1 file
diff -r e54372d60f111f4be26868ba416649697b551a13 -r 093626ae621f73b9cf904c935eddae98a8823839 lib/tool_shed/util/commit_util.py
--- a/lib/tool_shed/util/commit_util.py
+++ b/lib/tool_shed/util/commit_util.py
@@ -142,14 +142,13 @@
# <repository name="package_eigen_2_0" owner="test" prior_installation_required="True" />
revised, repository_elem, error_message = handle_repository_dependency_elem( trans, sub_elem, unpopulate=unpopulate )
if error_message:
- exception_message = 'The tool_dependencies.xml file contains an invalid <repository> tag. %s' % error_message
- raise Exception( exception_message )
+ error_message = 'The tool_dependencies.xml file contains an invalid <repository> tag. %s' % error_message
if revised:
elem[ sub_elem_index ] = repository_elem
sub_elem_altered = True
if not altered:
altered = True
- return altered, sub_elem_altered, elem
+ return altered, sub_elem_altered, elem, error_message
def handle_directory_changes( trans, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar, new_repo_alert, commit_message,
undesirable_dirs_removed, undesirable_files_removed ):
@@ -359,13 +358,16 @@
for package_index, package_elem in enumerate( root_elem ):
if package_elem.tag == 'repository':
# We have a complex repository dependency.
- altered, package_altered, root_elem = handle_complex_repository_dependency_elem( trans,
- root_elem,
- package_index,
- package_elem,
- package_altered,
- altered,
- unpopulate=unpopulate )
+ altered, package_altered, root_elem, message = \
+ handle_complex_repository_dependency_elem( trans,
+ root_elem,
+ package_index,
+ package_elem,
+ package_altered,
+ altered,
+ unpopulate=unpopulate )
+ if message:
+ error_message += message
elif package_elem.tag == 'install':
# <install version="1.0">
for actions_index, actions_elem in enumerate( package_elem ):
@@ -392,7 +394,7 @@
for last_actions_elem_package_index, last_actions_elem_package_elem in enumerate( last_actions_elem ):
if last_actions_elem_package_elem.tag == 'repository':
# We have a complex repository dependency.
- altered, last_actions_package_altered, last_actions_elem = \
+ altered, last_actions_package_altered, last_actions_elem, message = \
handle_complex_repository_dependency_elem( trans,
last_actions_elem,
last_actions_elem_package_index,
@@ -400,6 +402,8 @@
last_actions_package_altered,
altered,
unpopulate=unpopulate )
+ if message:
+ error_message += message
if last_actions_package_altered:
last_actions_elem[ last_actions_elem_package_index ] = last_actions_elem_package_elem
actions_group_elem[ last_actions_index ] = last_actions_elem
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Display an error message rather than raising an exception when handling invalid complex repository dependencies in the Tool Shed.
by commits-noreply@bitbucket.org 14 Nov '13
by commits-noreply@bitbucket.org 14 Nov '13
14 Nov '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/51105146f487/
Changeset: 51105146f487
User: greg
Date: 2013-11-14 21:51:24
Summary: Display an error message rather than raising an exception when handling invalid complex repository dependencies in the Tool Shed.
Affected #: 1 file
diff -r 418993fac75d07297dc317e183cf401fd9fbb869 -r 51105146f487822cb1d38ba3396882d610e0282e lib/tool_shed/util/commit_util.py
--- a/lib/tool_shed/util/commit_util.py
+++ b/lib/tool_shed/util/commit_util.py
@@ -142,14 +142,13 @@
# <repository name="package_eigen_2_0" owner="test" prior_installation_required="True" />
revised, repository_elem, error_message = handle_repository_dependency_elem( trans, sub_elem, unpopulate=unpopulate )
if error_message:
- exception_message = 'The tool_dependencies.xml file contains an invalid <repository> tag. %s' % error_message
- raise Exception( exception_message )
+ error_message = 'The tool_dependencies.xml file contains an invalid <repository> tag. %s' % error_message
if revised:
elem[ sub_elem_index ] = repository_elem
sub_elem_altered = True
if not altered:
altered = True
- return altered, sub_elem_altered, elem
+ return altered, sub_elem_altered, elem, error_message
def handle_directory_changes( trans, repository, full_path, filenames_in_archive, remove_repo_files_not_in_tar, new_repo_alert, commit_message,
undesirable_dirs_removed, undesirable_files_removed ):
@@ -359,13 +358,16 @@
for package_index, package_elem in enumerate( root_elem ):
if package_elem.tag == 'repository':
# We have a complex repository dependency.
- altered, package_altered, root_elem = handle_complex_repository_dependency_elem( trans,
- root_elem,
- package_index,
- package_elem,
- package_altered,
- altered,
- unpopulate=unpopulate )
+ altered, package_altered, root_elem, message = \
+ handle_complex_repository_dependency_elem( trans,
+ root_elem,
+ package_index,
+ package_elem,
+ package_altered,
+ altered,
+ unpopulate=unpopulate )
+ if message:
+ error_message += message
elif package_elem.tag == 'install':
# <install version="1.0">
for actions_index, actions_elem in enumerate( package_elem ):
@@ -392,7 +394,7 @@
for last_actions_elem_package_index, last_actions_elem_package_elem in enumerate( last_actions_elem ):
if last_actions_elem_package_elem.tag == 'repository':
# We have a complex repository dependency.
- altered, last_actions_package_altered, last_actions_elem = \
+ altered, last_actions_package_altered, last_actions_elem, message = \
handle_complex_repository_dependency_elem( trans,
last_actions_elem,
last_actions_elem_package_index,
@@ -400,6 +402,8 @@
last_actions_package_altered,
altered,
unpopulate=unpopulate )
+ if message:
+ error_message += message
if last_actions_package_altered:
last_actions_elem[ last_actions_elem_package_index ] = last_actions_elem_package_elem
actions_group_elem[ last_actions_index ] = last_actions_elem
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0