1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/a7ea7728829d/
changeset: a7ea7728829d
user: dan
date: 2012-12-11 20:14:14
summary: Add 'checkers' namespace before calls to .check_*.
affected #: 1 file
diff -r eed6dd67514b5e5ab0174f181af9514dea7a8d33 -r a7ea7728829dcc5395606bd402805d2ac972d6c6 lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -846,17 +846,17 @@
if is_column_based( file_path ):
return True
# If the file is any of the following, don't copy it.
- if check_html( file_path ):
+ if checkers.check_html( file_path ):
return False
- if check_image( file_path ):
+ if checkers.check_image( file_path ):
return False
- if check_binary( name=file_path ):
+ if checkers.check_binary( name=file_path ):
return False
- if is_bz2( file_path ):
+ if checkers.is_bz2( file_path ):
return False
- if is_gzip( file_path ):
+ if checkers.is_gzip( file_path ):
return False
- if check_zip( file_path ):
+ if checkers.check_zip( file_path ):
return False
# Default to copying the file if none of the above are true.
return True
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/eed6dd67514b/
changeset: eed6dd67514b
user: greg
date: 2012-12-11 17:54:09
summary: Fix for building tool dependencies container.
affected #: 1 file
diff -r 71c3b867efd1d8074a59659ed7320c1e6a3d9ed3 -r eed6dd67514b5e5ab0174f181af9514dea7a8d33 lib/galaxy/util/shed_util_common.py
--- a/lib/galaxy/util/shed_util_common.py
+++ b/lib/galaxy/util/shed_util_common.py
@@ -99,7 +99,7 @@
repository_dependencies=repository_dependencies )
containers_dict[ 'repository_dependencies' ] = repository_dependencies_root_folder
if tool_dependencies:
- folder_id, tool_dependencies_root_folder = build_tool_dependencies_folder( folder_id, tool_dependencies, for_galaxy=True )
+ folder_id, tool_dependencies_root_folder = container_util.build_tool_dependencies_folder( folder_id, tool_dependencies, for_galaxy=True )
containers_dict[ 'tool_dependencies' ] = tool_dependencies_root_folder
except Exception, e:
log.debug( "Exception in build_repository_containers_for_galaxy: %s" % str( e ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/795a1799e7dc/
changeset: 795a1799e7dc
user: carlfeberhard
date: 2012-12-10 23:44:11
summary: Fixes to history functional tests; Twilltestcase: added function to parse and check json, exact string matcher
affected #: 5 files
diff -r e19bf2b117638221414239698f840730a2cd0569 -r 795a1799e7dcf092b14f7e2a2dba3fb389403531 lib/galaxy/webapps/galaxy/controllers/root.py
--- a/lib/galaxy/webapps/galaxy/controllers/root.py
+++ b/lib/galaxy/webapps/galaxy/controllers/root.py
@@ -130,6 +130,7 @@
history_panel_template = "root/history.mako"
else:
+ # get all datasets server-side, client-side will get flags and render appropriately
datasets = self.get_history_datasets( trans, history,
show_deleted=True, show_hidden=True, show_purged=True )
diff -r e19bf2b117638221414239698f840730a2cd0569 -r 795a1799e7dcf092b14f7e2a2dba3fb389403531 scripts/functional_tests.py
--- a/scripts/functional_tests.py
+++ b/scripts/functional_tests.py
@@ -241,8 +241,9 @@
try:
os.makedirs( dir )
except OSError:
- pass
- print "Database connection:", database_connection
+ pass
+ log.info( "Database connection:", database_connection )
+
# ---- Build Application --------------------------------------------------
app = None
if start_server:
@@ -412,6 +413,8 @@
if os.path.exists( tempdir ) and 'GALAXY_TEST_NO_CLEANUP' not in os.environ:
log.info( "Cleaning up temporary files in %s" % tempdir )
shutil.rmtree( tempdir )
+ else:
+ log.info( "GALAXY_TEST_NO_CLEANUP is on. Temporary files in %s" % tempdir )
except:
pass
if psu_production and 'GALAXY_TEST_NO_CLEANUP' not in os.environ:
diff -r e19bf2b117638221414239698f840730a2cd0569 -r 795a1799e7dcf092b14f7e2a2dba3fb389403531 templates/root/alternate_history.mako
--- a/templates/root/alternate_history.mako
+++ b/templates/root/alternate_history.mako
@@ -310,11 +310,12 @@
// ostensibly, this is the App
// LOAD INITIAL DATA IN THIS PAGE - since we're already sending it...
// ...use mako to 'bootstrap' the models
- var user = ${ get_current_user() },
+ var page_show_deleted = ${ 'true' if show_deleted == True else ( 'null' if show_deleted == None else 'false' ) },
+ page_show_hidden = ${ 'true' if show_hidden == True else ( 'null' if show_hidden == None else 'false' ) },
+
+ user = ${ get_current_user() },
history = ${ get_history( history.id ) },
hdas = ${ get_hdas( history.id, datasets ) };
- var currUser = new User( user );
- if( !Galaxy.currUser ){ Galaxy.currUser = currUser; }
// add user data to history
// i don't like this history+user relationship, but user authentication changes views/behaviour
@@ -326,8 +327,8 @@
urlTemplates : galaxy_paths.attributes,
logger : ( debugging )?( console ):( null ),
// is page sending in show settings? if so override history's
- show_deleted : ${ 'true' if show_deleted == True else ( 'null' if show_deleted == None else 'false' ) },
- show_hidden : ${ 'true' if show_hidden == True else ( 'null' if show_hidden == None else 'false' ) }
+ show_deleted : page_show_deleted,
+ show_hidden : page_show_hidden
});
historyPanel.render();
@@ -337,15 +338,16 @@
// urlTemplates : galaxy_paths.attributes,
// logger : ( debugging )?( console ):( null ),
// // is page sending in show settings? if so override history's
- // show_deleted : ${ 'true' if show_deleted == True else ( 'null' if show_deleted == None else 'false' ) },
- // show_hidden : ${ 'true' if show_hidden == True else ( 'null' if show_hidden == None else 'false' ) }
+ // show_deleted : page_show_deleted,
+ // show_hidden : page_show_hidden
//});
//historyPanel.model.loadFromApi( history.id );
// set it up to be accessible across iframes
//TODO:?? mem leak
top.Galaxy.currHistoryPanel = historyPanel;
-
+ var currUser = new User( user );
+ if( !Galaxy.currUser ){ Galaxy.currUser = currUser; }
// QUOTA METER is a cross-frame ui element (meter in masthead, over quota message in history)
// create it and join them here for now (via events)
diff -r e19bf2b117638221414239698f840730a2cd0569 -r 795a1799e7dcf092b14f7e2a2dba3fb389403531 test/base/twilltestcase.py
--- a/test/base/twilltestcase.py
+++ b/test/base/twilltestcase.py
@@ -249,8 +249,9 @@
page = self.last_page()
if page.find( 'error' ) > -1:
raise AssertionError('Errors in the history for user %s' % self.user )
+
def check_history_for_string( self, patt, show_deleted=False ):
- """Looks for 'string' in history page"""
+ """Breaks patt on whitespace and searches for each element seperately in the history"""
self.home()
if show_deleted:
self.visit_page( "history?show_deleted=True" )
@@ -264,11 +265,57 @@
errmsg = "no match to '%s'\npage content written to '%s'" % ( subpatt, fname )
raise AssertionError( errmsg )
self.home()
+
+ def check_history_for_exact_string( self, string, show_deleted=False ):
+ """Looks for exact match to 'string' in history page"""
+ self.home()
+ if show_deleted:
+ self.visit_page( "history?show_deleted=True" )
+ else:
+ self.visit_page( "history" )
+ try:
+ tc.find( string )
+ except:
+ fname = self.write_temp_file( tc.browser.get_html() )
+ errmsg = "no match to '%s'\npage content written to '%s'" % ( string, fname )
+ raise AssertionError( errmsg )
+ self.home()
+
+ def check_history_json( self, pattern, check_fn, show_deleted=None, multiline=True ):
+ """
+ Tries to find a JSON string in the history page using the regex pattern,
+ parse it, and assert check_fn returns True when called on that parsed
+ data.
+ """
+ self.home()
+ if show_deleted:
+ self.visit_page( "history?show_deleted=True" )
+ elif show_deleted == False:
+ self.visit_page( "history?show_deleted=False" )
+ else:
+ self.visit_page( "history" )
+ try:
+ tc.find( pattern, flags=( 'm' if multiline else '' ) )
+ # twill stores the regex match in a special stack variable
+ match = twill.namespaces.get_twill_glocals()[1][ '__match__' ]
+ json_data = from_json_string( match )
+ assert check_fn( json_data ), 'failed check_fn'
+
+ except Exception, exc:
+ log.error( exc, exc_info=True )
+ fname = self.write_temp_file( tc.browser.get_html() )
+ errmsg = ( "json '%s' could not be found or failed check_fn" % ( pattern ) +
+ "\npage content written to '%s'" % ( fname ) )
+ raise AssertionError( errmsg )
+
+ self.home()
+
def clear_history( self ):
"""Empties a history of all datasets"""
self.visit_page( "clear_history" )
self.check_history_for_string( 'Your history is empty' )
self.home()
+
def delete_history( self, id ):
"""Deletes one or more histories"""
history_list = self.get_histories_as_data_list()
@@ -279,6 +326,7 @@
check_str = 'Deleted %d %s' % ( num_deleted, iff( num_deleted != 1, "histories", "history" ) )
self.check_page_for_string( check_str )
self.home()
+
def delete_current_history( self, strings_displayed=[] ):
"""Deletes the current history"""
self.home()
@@ -286,16 +334,19 @@
for check_str in strings_displayed:
self.check_page_for_string( check_str )
self.home()
+
def get_histories_as_data_list( self ):
"""Returns the data elements of all histories"""
tree = self.histories_as_xml_tree()
data_list = [ elem for elem in tree.findall("data") ]
return data_list
+
def get_history_as_data_list( self, show_deleted=False ):
"""Returns the data elements of a history"""
tree = self.history_as_xml_tree( show_deleted=show_deleted )
data_list = [ elem for elem in tree.findall("data") ]
return data_list
+
def history_as_xml_tree( self, show_deleted=False ):
"""Returns a parsed xml object of a history"""
self.home()
@@ -303,6 +354,7 @@
xml = self.last_page()
tree = ElementTree.fromstring(xml)
return tree
+
def histories_as_xml_tree( self ):
"""Returns a parsed xml object of all histories"""
self.home()
@@ -310,6 +362,7 @@
xml = self.last_page()
tree = ElementTree.fromstring(xml)
return tree
+
def history_options( self, user=False, active_datasets=False, activatable_datasets=False, histories_shared_by_others=False ):
"""Mimics user clicking on history options link"""
self.home()
@@ -329,6 +382,7 @@
self.check_page_for_string( 'Rename</a> current history' )
self.check_page_for_string( 'Delete</a> current history' )
self.home()
+
def new_history( self, name=None ):
"""Creates a new, empty history"""
self.home()
@@ -338,6 +392,7 @@
self.visit_url( "%s/history_new" % self.url )
self.check_history_for_string('Your history is empty')
self.home()
+
def rename_history( self, id, old_name, new_name ):
"""Rename an existing history"""
self.home()
@@ -345,6 +400,7 @@
check_str = 'History: %s renamed to: %s' % ( old_name, urllib.unquote( new_name ) )
self.check_page_for_string( check_str )
self.home()
+
def set_history( self ):
"""Sets the history (stores the cookies for this run)"""
if self.history_id:
@@ -353,6 +409,7 @@
else:
self.new_history()
self.home()
+
def share_current_history( self, email, strings_displayed=[], strings_displayed_after_submit=[],
action='', action_strings_displayed=[], action_strings_displayed_after_submit=[] ):
"""Share the current history with different users"""
@@ -372,6 +429,7 @@
for check_str in action_strings_displayed_after_submit:
self.check_page_for_string( check_str )
self.home()
+
def share_histories_with_users( self, ids, emails, strings_displayed=[], strings_displayed_after_submit=[],
action=None, action_strings_displayed=[] ):
"""Share one or more histories with one or more different users"""
@@ -389,6 +447,7 @@
for check_str in action_strings_displayed:
self.check_page_for_string( check_str )
self.home()
+
def unshare_history( self, history_id, user_id, strings_displayed=[] ):
"""Unshare a history that has been shared with another user"""
self.visit_url( "%s/history/list?id=%s&operation=share+or+publish" % ( self.url, history_id ) )
@@ -396,12 +455,14 @@
self.check_page_for_string( check_str )
self.visit_url( "%s/history/sharing?unshare_user=%s&id=%s" % ( self.url, user_id, history_id ) )
self.home()
+
def switch_history( self, id='', name='' ):
"""Switches to a history in the current list of histories"""
self.visit_url( "%s/history/list?operation=switch&id=%s" % ( self.url, id ) )
if name:
- self.check_history_for_string( escape( name ) )
+ self.check_history_for_exact_string( name )
self.home()
+
def view_stored_active_histories( self, strings_displayed=[] ):
self.home()
self.visit_page( "history/list" )
@@ -698,11 +759,13 @@
# if the server's env has GALAXY_TEST_SAVE, save the output file to that dir
if self.keepOutdir:
ofn = os.path.join( self.keepOutdir, os.path.basename( local_name ) )
+ log.debug( 'keepoutdir: %s, ofn: %s', self.keepOutdir, ofn )
try:
shutil.copy( temp_name, ofn )
except Exception, exc:
error_log_msg = ( 'TwillTestCase could not save output file %s to %s: ' % ( temp_name, ofn ) )
error_log_msg += str( exc )
+ log.error( error_log_msg, exc_info=True )
else:
log.debug('## GALAXY_TEST_SAVE=%s. saved %s' % ( self.keepOutdir, ofn ) )
diff -r e19bf2b117638221414239698f840730a2cd0569 -r 795a1799e7dcf092b14f7e2a2dba3fb389403531 test/functional/test_history_functions.py
--- a/test/functional/test_history_functions.py
+++ b/test/functional/test_history_functions.py
@@ -250,8 +250,7 @@
sa_session.query( galaxy.model.HistoryDatasetAssociation )
.filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3.id,
galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) )
- .first()
- )
+ .first() )
assert hda_2_bed is not None, "Problem retrieving hda_2_bed from database"
self.delete_history_item( str( hda_2_bed.id ) )
@@ -260,8 +259,7 @@
sa_session.query( galaxy.model.HistoryDatasetAssociation )
.filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3.id,
galaxy.model.HistoryDatasetAssociation.table.c.name=='3.bed' ) )
- .first()
- )
+ .first() )
assert hda_3_bed is not None, "Problem retrieving hda_3_bed from database"
self.delete_history_item( str( hda_3_bed.id ) )
@@ -281,39 +279,52 @@
self.view_stored_active_histories( strings_displayed=[ "Clone of '%s'" % history3.name ] )
# Switch to the cloned history to make sure activatable datasets were cloned
self.switch_history( id=self.security.encode_id( history3_clone2.id ), name=history3_clone2.name )
- hda_2_bed = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3_clone2.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) ) \
- .first()
+ hda_2_bed = (
+ sa_session.query( galaxy.model.HistoryDatasetAssociation )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3_clone2.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) )
+ .first() )
assert hda_2_bed is not None, "Problem retrieving hda_2_bed from database"
- hda_3_bed = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3_clone2.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='3.bed' ) ) \
- .first()
+ hda_3_bed = (
+ sa_session.query( galaxy.model.HistoryDatasetAssociation )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history3_clone2.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name=='3.bed' ) )
+ .first() )
assert hda_3_bed is not None, "Problem retrieving hda_3_bed from database"
+
# Make sure the deleted datasets are included in the cloned history
- check_str = 'This dataset has been deleted. Click undelete id=%d' % hda_2_bed.id
- self.check_history_for_string( check_str, show_deleted=True )
- check_str = 'This dataset has been deleted. Click undelete id=%d' % hda_3_bed.id
- self.check_history_for_string( check_str, show_deleted=True )
+ # check for encoded ids
+ # - these will be available bc the refreshed page will have bootstrapped json for the hdas
+ #NOTE: that these WON'T be available when refreshes become less common
+ # (when the backbone.js is fully integrated and refreshes aren't used after every history function)
+ self.check_history_for_exact_string( self.security.encode_id( hda_2_bed.id ), show_deleted=True )
+ self.check_history_for_exact_string( self.security.encode_id( hda_3_bed.id ), show_deleted=True )
+
# Test cloning only active datasets
- self.clone_history( self.security.encode_id( history3.id ),
- 'active',
- strings_displayed_after_submit=[ 'is now included in your previously stored histories.' ] )
+ self.clone_history(
+ self.security.encode_id( history3.id ),
+ 'active',
+ strings_displayed_after_submit=[ 'is now included in your previously stored histories.' ] )
global history3_clone3
- history3_clone3 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
- .order_by( desc( galaxy.model.History.table.c.create_time ) ) \
- .first()
+ history3_clone3 = (
+ sa_session.query( galaxy.model.History )
+ .filter( and_( galaxy.model.History.table.c.deleted==False,
+ galaxy.model.History.table.c.user_id==admin_user.id ) )
+ .order_by( desc( galaxy.model.History.table.c.create_time ) )
+ .first()
+ )
assert history3_clone3 is not None, "Problem retrieving history3_clone3 from database"
+
# Check list of histories to make sure shared history3 was cloned
self.view_stored_active_histories( strings_displayed = ["Clone of '%s'" % history3.name ] )
- # Switch to the cloned history to make sure activatable datasets were cloned
+
+ # Switch to the cloned history to make sure ONLY activatable datasets were cloned
self.switch_history( id=self.security.encode_id( history3_clone3.id ) )
# Make sure the deleted datasets are NOT included in the cloned history
+ # - again using the bootstrapped json for the hdas
try:
- self.check_history_for_string( 'This dataset has been deleted.', show_deleted=True )
+ self.check_history_for_exact_string( '"deleted": true', show_deleted=True )
+ #self.check_history_for_string( 'This dataset has been deleted.', show_deleted=True )
raise AssertionError, "Deleted datasets incorrectly included in cloned history history3_clone3"
except:
pass
@@ -349,6 +360,7 @@
# Shared history3 should be in regular_user3's list of shared histories
self.view_shared_histories( cstrings_displayed=[ history3.name, admin_user.email ] )
"""
+
def test_045_change_permissions_on_current_history( self ):
"""Testing changing permissions on the current history"""
# Logged in as regular_user3
@@ -402,6 +414,7 @@
current_history_permissions.sort()
if current_history_permissions != history5_default_permissions:
raise AssertionError, "With logout and login, the history default permissions are not preserved"
+
def test_050_sharing_restricted_history_by_making_datasets_public( self ):
"""Testing sharing a restricted history by making the datasets public"""
# Logged in as admin_user
@@ -432,6 +445,7 @@
self.check_history_for_string( 'chr1' )
self.logout()
self.login( email=admin_user.email )
+
def test_055_sharing_restricted_history_by_making_new_sharing_role( self ):
"""Testing sharing a restricted history by associating a new sharing role with protected datasets"""
# At this point, history5 should have 1 item, 1.bed, which is public. We'll add another
@@ -506,6 +520,7 @@
self.display_history_item( str( hda_2_bed.id ), strings_displayed=[ 'chr1' ] )
# Delete the clone so the next test will be valid
self.delete_history( id=self.security.encode_id( history5_clone2.id ) )
+
def test_060_sharing_restricted_history_with_multiple_users_by_changing_no_permissions( self ):
"""Testing sharing a restricted history with multiple users, making no permission changes"""
# Logged in as regular_user2
@@ -515,10 +530,12 @@
# regular_user2 should be able to access history5's 2.bed dataset since it is associated with a
# sharing role, and regular_user3 should be able to access history5's 1.bed, but not 2.bed even
# though they can see it in their shared history.
+
# We first need to unshare history5 from regular_user2 so that we can re-share it.
self.unshare_history( self.security.encode_id( history5.id ),
self.security.encode_id( regular_user2.id ),
strings_displayed=[ regular_user1.email, regular_user2.email ] )
+
# Make sure the history was unshared correctly
self.logout()
self.login( email=regular_user2.email )
@@ -528,11 +545,14 @@
raise AssertionError, "history5 still shared with regular_user2 after unsharing it with that user."
except:
pass
+
self.logout()
self.login( admin_user.email )
email = '%s,%s' % ( regular_user2.email, regular_user3.email )
- strings_displayed_after_submit = [ 'The following datasets can be shared with %s with no changes' % email,
- 'The following datasets can be shared with %s by updating their permissions' % email ]
+ strings_displayed_after_submit = [
+ 'The following datasets can be shared with %s with no changes' % email,
+ 'The following datasets can be shared with %s by updating their permissions' % email ]
+
# history5 will be shared with regular_user1, regular_user2 and regular_user3
self.share_current_history( email,
strings_displayed_after_submit=strings_displayed_after_submit,
@@ -547,30 +567,35 @@
'activatable',
strings_displayed_after_submit=[ 'is now included in your previously stored histories.' ] )
global history5_clone3
- history5_clone3 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==regular_user2.id ) ) \
- .order_by( desc( galaxy.model.History.table.c.create_time ) ) \
- .first()
+ history5_clone3 = (
+ sa_session.query( galaxy.model.History )
+ .filter( and_( galaxy.model.History.table.c.deleted==False,
+ galaxy.model.History.table.c.user_id==regular_user2.id ) )
+ .order_by( desc( galaxy.model.History.table.c.create_time ) )
+ .first() )
assert history5_clone3 is not None, "Problem retrieving history5_clone3 from database"
+
# Check list of histories to make sure shared history3 was cloned
self.view_stored_active_histories( strings_displayed=[ "Clone of '%s'" % history5.name ] )
# Make sure the dataset is accessible
self.switch_history( id=self.security.encode_id( history5_clone3.id ), name=history5_clone3.name )
- # Make sure both datasets are in the history
+ # Make sure both datasets are in the history
self.check_history_for_string( '1.bed' )
self.check_history_for_string( '2.bed' )
# Get both new hdas from the db that were created for the shared history
- hda_1_bed = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone3.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='1.bed' ) ) \
- .first()
+ hda_1_bed = (
+ sa_session.query( galaxy.model.HistoryDatasetAssociation )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone3.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name=='1.bed' ) )
+ .first() )
assert hda_1_bed is not None, "Problem retrieving hda_1_bed from database"
- hda_2_bed = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone3.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) ) \
- .first()
+ hda_2_bed = (
+ sa_session.query( galaxy.model.HistoryDatasetAssociation )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone3.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) )
+ .first() )
assert hda_2_bed is not None, "Problem retrieving hda_2_bed from database"
+
# Make sure 1.bed is accessible since it is public
self.display_history_item( str( hda_1_bed.id ), strings_displayed=[ 'chr1' ] )
# Make sure 2.bed is accessible since it is associated with a sharing role
@@ -582,34 +607,39 @@
self.login( email=regular_user3.email )
# Shared history5 should be in regular_user2's list of shared histories
self.view_shared_histories( strings_displayed=[ history5.name, admin_user.email ] )
+
# Clone restricted history5
self.clone_history( self.security.encode_id( history5.id ),
'activatable',
strings_displayed_after_submit=[ 'is now included in your previously stored histories.' ] )
global history5_clone4
- history5_clone4 = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==regular_user3.id ) ) \
- .order_by( desc( galaxy.model.History.table.c.create_time ) ) \
- .first()
+ history5_clone4 = (
+ sa_session.query( galaxy.model.History )
+ .filter( and_( galaxy.model.History.table.c.deleted==False,
+ galaxy.model.History.table.c.user_id==regular_user3.id ) )
+ .order_by( desc( galaxy.model.History.table.c.create_time ) )
+ .first() )
assert history5_clone4 is not None, "Problem retrieving history5_clone4 from database"
+
# Check list of histories to make sure shared history3 was cloned
self.view_stored_active_histories( strings_displayed=[ "Clone of '%s'" % history5.name ] )
# Make sure the dataset is accessible
self.switch_history( id=self.security.encode_id( history5_clone4.id ), name=history5_clone4.name )
- # Make sure both datasets are in the history
+ # Make sure both datasets are in the history
self.check_history_for_string( '1.bed' )
self.check_history_for_string( '2.bed' )
# Get both new hdas from the db that were created for the shared history
- hda_1_bed = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone4.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='1.bed' ) ) \
- .first()
+ hda_1_bed = (
+ sa_session.query( galaxy.model.HistoryDatasetAssociation )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone4.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name=='1.bed' ) )
+ .first() )
assert hda_1_bed is not None, "Problem retrieving hda_1_bed from database"
- hda_2_bed = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
- .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone4.id,
- galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) ) \
- .first()
+ hda_2_bed = (
+ sa_session.query( galaxy.model.HistoryDatasetAssociation )
+ .filter( and_( galaxy.model.HistoryDatasetAssociation.table.c.history_id==history5_clone4.id,
+ galaxy.model.HistoryDatasetAssociation.table.c.name=='2.bed' ) )
+ .first() )
assert hda_2_bed is not None, "Problem retrieving hda_2_bed from database"
# Make sure 1.bed is accessible since it is public
self.display_history_item( str( hda_1_bed.id ), strings_displayed=[ 'chr1' ] )
@@ -619,20 +649,31 @@
raise AssertionError, "History item 2.bed is accessible by user %s when is should not be" % regular_user3.email
except:
pass
- self.check_history_for_string( 'You do not have permission to view this dataset' )
+
+ # check the history page json for hda_2_bed and if it's accessible
+ def hda_2_bed_is_inaccessible( hda_list ):
+ for hda in hda_list:
+ if hda[ 'id' ] == self.security.encode_id( hda_2_bed.id ):
+ return ( not hda[ 'accessible' ] )
+ return False
+ self.check_history_json( r'\bhdas\s*=\s*(.*);', hda_2_bed_is_inaccessible )
+
# Admin users can view all datasets ( using the history/view feature ), so make sure 2.bed is accessible to the admin
self.logout()
self.login( email=admin_user.email )
self.view_history( str( hda_2_bed.history_id ), strings_displayed=[ '<td>NM_005997_cds_0_0_chr1_147962193_r</td>' ] )
self.logout()
self.login( email=regular_user3.email )
+
# Delete the clone so the next test will be valid
self.delete_history( id=self.security.encode_id( history5_clone4.id ) )
+
def test_065_sharing_private_history_by_choosing_to_not_share( self ):
"""Testing sharing a restricted history with multiple users by choosing not to share"""
- # Logged in as regular_user3
+ # Logged in as regular_user3 - login as admin
self.logout()
self.login( email=admin_user.email )
+
# Unshare history5 from regular_user2
self.unshare_history( self.security.encode_id( history5.id ),
self.security.encode_id( regular_user2.id ),
@@ -641,7 +682,8 @@
self.unshare_history( self.security.encode_id( history5.id ),
self.security.encode_id( regular_user3.id ),
strings_displayed=[ regular_user1.email, regular_user3.email ] )
- # Make sure the history was unshared correctly
+
+ # Make sure the histories were unshared correctly
self.logout()
self.login( email=regular_user2.email )
self.visit_page( "root/history_options" )
@@ -650,6 +692,7 @@
raise AssertionError, "history5 still shared with regular_user2 after unshaing it with that user."
except:
pass
+
self.logout()
self.login( email=regular_user3.email )
self.visit_page( "root/history_options" )
@@ -660,42 +703,73 @@
pass
self.logout()
self.login( email=admin_user.email )
+
def test_070_history_show_and_hide_deleted_datasets( self ):
"""Testing displaying deleted history items"""
+ #NOTE: due to the new client-side rendering of the history, this test isn't very apt
+ # (a) searching for strings in the dom doesn't work (they won't be twill's html) and
+ # (b) all datasets are included in the bootstrapped hda json regardless of the show_deleted setting
+ #CE: for now, I'm changing this to simply check whether the show_deleted flag
+ # is being properly passed to the history control
+ #TODO: this test needs to be moved to client-side testing framework (selenium or other)
+
# Logged in as admin_user
+ # create a new history and upload a new hda (1.bed) into it
self.new_history( name=urllib.quote( 'show hide deleted datasets' ) )
- latest_history = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
- .order_by( desc( galaxy.model.History.table.c.create_time ) ) \
- .first()
+ latest_history = (
+ sa_session.query( galaxy.model.History )
+ .filter( and_( galaxy.model.History.table.c.deleted==False,
+ galaxy.model.History.table.c.user_id==admin_user.id ) )
+ .order_by( desc( galaxy.model.History.table.c.create_time ) )
+ .first() )
assert latest_history is not None, "Problem retrieving latest_history from database"
self.upload_file('1.bed', dbkey='hg18')
- latest_hda = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
- .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) ) \
- .first()
+ latest_hda = (
+ sa_session.query( galaxy.model.HistoryDatasetAssociation )
+ .order_by( desc( galaxy.model.HistoryDatasetAssociation.table.c.create_time ) )
+ .first() )
+
+ # delete that item and make sure the 'history empty' message shows
self.home()
+ log.info( 'deleting last hda' )
self.delete_history_item( str( latest_hda.id ) )
- self.check_history_for_string( 'Your history is empty' )
+ # check the historyPanel settings.show_deleted for a null json value (no show_deleted in query string)
+ self.check_history_json( r'\bpage_show_deleted\s*=\s*(.*),', lambda x: x == None )
+
+ # reload this history with the show_deleted flag set in the query string
+ # the deleted dataset should be there with the proper 'deleted' text
self.home()
- self.visit_url( "%s/history/?show_deleted=True" % self.url )
- self.check_page_for_string( 'This dataset has been deleted.' )
- self.check_page_for_string( '1.bed' )
+ log.info( 'turning show_deleted on' )
+ #self.visit_url( "%s/history/?show_deleted=True" % self.url )
+ # check the historyPanel settings.show_deleted for a true json value
+ self.check_history_json( r'\bpage_show_deleted\s*=\s*(.*),', lambda x: x == True, show_deleted=True )
+
+ # reload this history again with the show_deleted flag set TO FALSE in the query string
+ # make sure the 'history empty' message shows
self.home()
- self.visit_url( "%s/history/?show_deleted=False" % self.url )
- self.check_page_for_string( 'Your history is empty' )
+ log.info( 'turning show_deleted off' )
+ #self.visit_url( "%s/history/?show_deleted=False" % self.url )
+ # check the historyPanel settings.show_deleted for a false json value
+ self.check_history_json( r'\bpage_show_deleted\s*=\s*(.*),', lambda x: x == False, show_deleted=False )
+
+ # delete this history
self.delete_history( self.security.encode_id( latest_history.id ) )
+
def test_075_deleting_and_undeleting_history_items( self ):
"""Testing deleting and un-deleting history items"""
# logged in as admin_user
+
# Deleting the current history in the last method created a new history
- latest_history = sa_session.query( galaxy.model.History ) \
- .filter( and_( galaxy.model.History.table.c.deleted==False,
- galaxy.model.History.table.c.user_id==admin_user.id ) ) \
- .order_by( desc( galaxy.model.History.table.c.create_time ) ) \
- .first()
+ latest_history = (
+ sa_session.query( galaxy.model.History )
+ .filter( and_( galaxy.model.History.table.c.deleted==False,
+ galaxy.model.History.table.c.user_id==admin_user.id ) )
+ .order_by( desc( galaxy.model.History.table.c.create_time ) )
+ .first() )
assert latest_history is not None, "Problem retrieving latest_history from database"
- self.rename_history( self.security.encode_id( latest_history.id ), latest_history.name, new_name=urllib.quote( 'delete undelete history items' ) )
+
+ self.rename_history( self.security.encode_id( latest_history.id ),
+ latest_history.name, new_name=urllib.quote( 'delete undelete history items' ) )
# Add a new history item
self.upload_file( '1.bed', dbkey='hg15' )
latest_hda = sa_session.query( galaxy.model.HistoryDatasetAssociation ) \
@@ -722,6 +796,7 @@
self.check_page_for_string( '1.bed' )
self.check_page_for_string( 'hg15' )
self.delete_history( self.security.encode_id( latest_history.id ) )
+
def test_080_copying_history_items_between_histories( self ):
"""Testing copying history items between histories"""
# logged in as admin_user
@@ -776,6 +851,7 @@
self.check_history_for_string( hda1.name )
self.delete_history( self.security.encode_id( history6.id ) )
self.delete_history( self.security.encode_id( history7.id ) )
+
def test_085_reset_data_for_later_test_runs( self ):
"""Reseting data to enable later test runs to to be valid"""
# logged in as admin_user
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/e19bf2b11763/
changeset: e19bf2b11763
user: greg
date: 2012-12-10 22:27:29
summary: Handle circular repository dependencies to "n" levels of depth.
affected #: 3 files
diff -r c460f284077f0b400901e4cd791d94a311425751 -r e19bf2b117638221414239698f840730a2cd0569 lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -1,8 +1,6 @@
import os, tempfile, shutil, logging, urllib2
from galaxy import util
-from galaxy.datatypes.checkers import *
-from galaxy.util.json import *
-from galaxy.util.shed_util_common import *
+from shed_util_common import *
from galaxy.tools.search import ToolBoxSearch
from galaxy.tool_shed.tool_dependencies.install_util import create_or_update_tool_dependency, install_package, set_environment
from galaxy.tool_shed.encoding_util import *
diff -r c460f284077f0b400901e4cd791d94a311425751 -r e19bf2b117638221414239698f840730a2cd0569 lib/galaxy/util/shed_util_common.py
--- a/lib/galaxy/util/shed_util_common.py
+++ b/lib/galaxy/util/shed_util_common.py
@@ -194,28 +194,18 @@
option_value = trans.security.encode_id( repository.id )
repositories_select_field.add_option( option_label, option_value )
return repositories_select_field
-def can_add_entry_to_all_repository_dependencies( current_repository_key, repository_dependency, all_repository_dependencies ):
- """
- Handle circular repository dependencies that could result in an infinite loop by determining if it is safe to add an entry to the
- repository dependencies container.
- """
- # First check for an exact match - if this is true, the changeset revision was not updated.
- repository_dependency_as_key = container_util.generate_repository_dependencies_key_for_repository( repository_dependency[ 0 ],
- repository_dependency[ 1 ],
- repository_dependency[ 2 ],
- repository_dependency[ 3] )
- current_repository_key_as_repository_dependency = current_repository_key.split( container_util.STRSEP )
- if repository_dependency_as_key in all_repository_dependencies:
- val = all_repository_dependencies[ repository_dependency_as_key ]
- if current_repository_key_as_repository_dependency in val:
- return False
- # Now handle the case where an update to the changeset revision was done, so everything will match except the changeset_revision.
- repository_dependency_as_partial_key = container_util.STRSEP.join( [ repository_dependency[ 0 ], repository_dependency[ 1 ], repository_dependency[ 2 ] ] )
- for key in all_repository_dependencies:
- if key.startswith( repository_dependency_as_partial_key ):
- val = all_repository_dependencies[ key ]
- if current_repository_key_as_repository_dependency in val:
- return False
+def can_add_to_key_rd_dicts( key_rd_dict, key_rd_dicts ):
+ """Handle the case where an update to the changeset revision was done."""
+ k = key_rd_dict.keys()[ 0 ]
+ rd = key_rd_dict[ k ]
+ partial_rd = rd[ 0:3 ]
+ for kr_dict in key_rd_dicts:
+ key = kr_dict.keys()[ 0 ]
+ if key == k:
+ val = kr_dict[ key ]
+ for repository_dependency in val:
+ if repository_dependency[ 0:3 ] == partial_rd:
+ return False
return True
def can_generate_tool_dependency_metadata( root, metadata_dict ):
"""
@@ -555,13 +545,15 @@
metadata = repository_metadata.metadata
if metadata:
# Get a dictionary of all repositories upon which the contents of the received repository depends.
- repository_dependencies = get_repository_dependencies_for_changeset_revision( trans,
- repo,
- repository,
- repository_metadata,
- str( url_for( '/', qualified=True ) ).rstrip( '/' ),
- repository_dependencies=None,
- all_repository_dependencies=None )
+ repository_dependencies = get_repository_dependencies_for_changeset_revision( trans=trans,
+ repo=repo,
+ repository=repository,
+ repository_metadata=repository_metadata,
+ toolshed_base_url=str( url_for( '/', qualified=True ) ).rstrip( '/' ),
+ key_rd_dicts_to_be_processed=None,
+ all_repository_dependencies=None,
+ handled_key_rd_dicts=None,
+ circular_repository_dependencies=None )
# Cast unicode to string.
repo_info_dict[ str( repository.name ) ] = ( str( repository.description ),
str( repository_clone_url ),
@@ -1037,6 +1029,11 @@
if ctx_file_name == stripped_filename:
return manifest_ctx, ctx_file
return None, None
+def get_key_for_repository_changeset_revision( toolshed_base_url, repository, repository_metadata ):
+ return container_util.generate_repository_dependencies_key_for_repository( toolshed_base_url=toolshed_base_url,
+ repository_name=repository.name,
+ repository_owner=repository.user.username,
+ changeset_revision=repository_metadata.changeset_revision )
def get_file_context_from_ctx( ctx, filename ):
# We have to be careful in determining if we found the correct file because multiple files with the same name may be in different directories
# within ctx if the files were moved within the change set. For example, in the following ctx.files() list, the former may have been moved to
@@ -1197,123 +1194,68 @@
.filter( and_( trans.model.Repository.table.c.name == name,
trans.model.Repository.table.c.user_id == user.id ) ) \
.first()
-def get_repository_dependencies_for_changeset_revision( trans, repo, repository, repository_metadata, toolshed_base_url, repository_dependencies=None,
- all_repository_dependencies=None, handled=None ):
+def get_repository_dependencies_for_changeset_revision( trans, repo, repository, repository_metadata, toolshed_base_url,
+ key_rd_dicts_to_be_processed=None, all_repository_dependencies=None,
+ handled_key_rd_dicts=None, circular_repository_dependencies=None ):
"""
Return a dictionary of all repositories upon which the contents of the received repository_metadata record depend. The dictionary keys
are name-spaced values consisting of toolshed_base_url/repository_name/repository_owner/changeset_revision and the values are lists of
repository_dependency tuples consisting of ( toolshed_base_url, repository_name, repository_owner, changeset_revision ). This method
ensures that all required repositories to the nth degree are returned.
"""
- if handled is None:
- handled = []
+ if handled_key_rd_dicts is None:
+ handled_key_rd_dicts = []
if all_repository_dependencies is None:
all_repository_dependencies = {}
- if repository_dependencies is None:
- repository_dependencies = []
+ if key_rd_dicts_to_be_processed is None:
+ key_rd_dicts_to_be_processed = []
+ if circular_repository_dependencies is None:
+ circular_repository_dependencies = []
+ # Assume the current repository does not have repository dependencies defined for it.
+ current_repository_key = None
metadata = repository_metadata.metadata
if metadata and 'repository_dependencies' in metadata:
+ current_repository_key = get_key_for_repository_changeset_revision( toolshed_base_url, repository, repository_metadata )
repository_dependencies_dict = metadata[ 'repository_dependencies' ]
- # The repository_dependencies entry in the metadata is a dictionary that may have a value for a 'description' key. We want to
- # store the value of this key only once, the first time through this recursive method.
- current_repository_key = container_util.generate_repository_dependencies_key_for_repository( toolshed_base_url=toolshed_base_url,
- repository_name=repository.name,
- repository_owner=repository.user.username,
- changeset_revision=repository_metadata.changeset_revision )
if not all_repository_dependencies:
- # Initialize the all_repository_dependencies dictionary. It's safe to assume that current_repository_key in this case will have a value.
- all_repository_dependencies[ 'root_key' ] = current_repository_key
- all_repository_dependencies[ current_repository_key ] = []
- if 'description' not in all_repository_dependencies:
- description = repository_dependencies_dict.get( 'description', None )
- all_repository_dependencies[ 'description' ] = description
- # The next key of interest in repository_dependencies_dict is 'repository_dependencies', which is a list of tuples.
- repository_dependencies_tups = repository_dependencies_dict[ 'repository_dependencies' ]
- if repository_dependencies_tups and current_repository_key:
- # Remove all repository dependencies that point to a revision within its own repository.
- repository_dependencies_tups = remove_ropository_dependency_reference_to_self( repository_dependencies_tups, current_repository_key )
- for repository_dependency in repository_dependencies_tups:
- if repository_dependency not in handled and repository_dependency not in repository_dependencies:
- # The following if statement handles repositories dependencies that are circular in nature.
- if current_repository_key:
- if current_repository_key in all_repository_dependencies:
- # Add all repository dependencies for the current repository into it's entry in all_repository_dependencies.
- all_repository_dependencies_val = all_repository_dependencies[ current_repository_key ]
- if repository_dependency not in all_repository_dependencies_val:
- all_repository_dependencies_val.append( repository_dependency )
- all_repository_dependencies[ current_repository_key ] = all_repository_dependencies_val
- elif can_add_entry_to_all_repository_dependencies( current_repository_key, repository_dependency, all_repository_dependencies ):
- # We don't have a circular dependency that could result in an infinite loop.
- all_repository_dependencies[ current_repository_key ] = [ repository_dependency ]
- repository_dependencies.append( repository_dependency )
- else:
- # The current repository does not have repository dependencies defined for it.
- current_repository_key = None
- # The following if statement handles repositories dependencies that are circular in nature.
- if current_repository_key and current_repository_key in all_repository_dependencies:
- repository_dependencies_tups = [ rd for rd in all_repository_dependencies[ current_repository_key ] ]
- if repository_dependencies_tups:
- repository_dependency = repository_dependencies_tups.pop( 0 )
- if repository_dependency not in handled:
- handled.append( repository_dependency )
- if repository_dependency in repository_dependencies:
- repository_dependencies.remove( repository_dependency )
- toolshed, name, owner, changeset_revision = repository_dependency
- if tool_shed_is_this_tool_shed( toolshed ):
- required_repository = get_repository_by_name_and_owner( trans, name, owner )
- required_repository_metadata = get_repository_metadata_by_repository_id_changset_revision( trans,
- trans.security.encode_id( required_repository.id ),
- changeset_revision )
- if required_repository_metadata:
- required_repo_dir = required_repository.repo_path( trans.app )
- required_repo = hg.repository( get_configured_ui(), required_repo_dir )
- else:
- # The repository changeset_revision is no longer installable, so see if there's been an update.
- required_repo_dir = required_repository.repo_path( trans.app )
- required_repo = hg.repository( get_configured_ui(), required_repo_dir )
- required_changeset_revision = get_next_downloadable_changeset_revision( required_repository, required_repo, changeset_revision )
- required_repository_metadata = get_repository_metadata_by_repository_id_changset_revision( trans,
- trans.security.encode_id( required_repository.id ),
- required_changeset_revision )
- if required_repository_metadata:
- # The changeset_revision defined in a repository_dependencies.xml file is outdated, so we need to fix appropriate
- # entries in our all_repository_dependencies dictionary.
- updated_repository_dependency = [ toolshed, name, owner, required_changeset_revision ]
- for k, v in all_repository_dependencies.items():
- if k in [ 'root_key', 'description' ]:
- continue
- for i, current_repository_dependency in enumerate( v ):
- cts, cn, co, ccr = current_repository_dependency
- if toolshed == cts and name == cn and owner == co and changeset_revision == ccr:
- if updated_repository_dependency in v:
- # We've already stored the updated repository_dependency, so remove the outdated one.
- v = v.remove( repository_dependency )
- all_repository_dependencies[ k ] = v
- else:
- # Store the updated repository_dependency.
- v[ i ] = updated_repository_dependency
- all_repository_dependencies[ k ] = v
- if required_repository_metadata:
- # The required_repository_metadata changeset_revision is installable.
- required_metadata = required_repository_metadata.metadata
- if required_metadata:
- for repository_dependency in repository_dependencies_tups:
- if repository_dependency not in repository_dependencies:
- repository_dependencies.append( repository_dependency )
- return get_repository_dependencies_for_changeset_revision( trans=trans,
- repo=required_repo,
- repository=required_repository,
- repository_metadata=required_repository_metadata,
- toolshed_base_url=toolshed,
- repository_dependencies=repository_dependencies,
- all_repository_dependencies=all_repository_dependencies,
- handled=handled )
- else:
- # The repository is in a different tool shed, so build an url and send a request.
- error_message = "Repository dependencies are currently supported only within the same tool shed. Ignoring repository dependency definition "
- error_message += "for tool shed %s, name %s, owner %s, changeset revision %s" % ( toolshed, name, owner, changeset_revision )
- log.debug( error_message )
+ all_repository_dependencies = initialize_all_repository_dependencies( current_repository_key,
+ repository_dependencies_dict,
+ all_repository_dependencies )
+ # Handle the repository dependencies defined in the current repository, if any, and populate the various repository dependency objects for
+ # this round of processing.
+ current_repository_key_rd_dicts, key_rd_dicts_to_be_processed, handled_key_rd_dicts, all_repository_dependencies = \
+ populate_repository_dependency_objects_for_processing( trans,
+ current_repository_key,
+ repository_dependencies_dict,
+ key_rd_dicts_to_be_processed,
+ handled_key_rd_dicts,
+ circular_repository_dependencies,
+ all_repository_dependencies )
+ if current_repository_key:
+ if current_repository_key_rd_dicts:
+ # There should be only a single current_repository_key_rd_dict in this list.
+ current_repository_key_rd_dict = current_repository_key_rd_dicts[ 0 ]
+ # Handle circular repository dependencies.
+ if not in_circular_repository_dependencies( current_repository_key_rd_dict, circular_repository_dependencies ):
+ if current_repository_key in all_repository_dependencies:
+ handle_current_repository_dependency( trans,
+ current_repository_key,
+ key_rd_dicts_to_be_processed,
+ all_repository_dependencies,
+ handled_key_rd_dicts,
+ circular_repository_dependencies )
+ elif key_rd_dicts_to_be_processed:
+ handle_next_repository_dependency( trans, key_rd_dicts_to_be_processed, all_repository_dependencies, handled_key_rd_dicts, circular_repository_dependencies )
+ elif key_rd_dicts_to_be_processed:
+ handle_next_repository_dependency( trans, key_rd_dicts_to_be_processed, all_repository_dependencies, handled_key_rd_dicts, circular_repository_dependencies )
+ elif key_rd_dicts_to_be_processed:
+ handle_next_repository_dependency( trans, key_rd_dicts_to_be_processed, all_repository_dependencies, handled_key_rd_dicts, circular_repository_dependencies )
return all_repository_dependencies
+def get_repository_dependency_as_key( repository_dependency ):
+ return container_util.generate_repository_dependencies_key_for_repository( repository_dependency[ 0 ],
+ repository_dependency[ 1 ],
+ repository_dependency[ 2 ],
+ repository_dependency[ 3] )
def get_repository_file_contents( file_path ):
if checkers.is_gzip( file_path ):
safe_str = to_safe_string( '\ngzip compressed file\n' )
@@ -1415,11 +1357,77 @@
relative_path_to_sample_file = relative_path_to_sample_file[ len( tool_path ) + 1 :]
sample_file_metadata_paths.append( relative_path_to_sample_file )
return sample_file_metadata_paths, sample_file_copy_paths
+def get_updated_changeset_revisions_for_repository_dependencies( trans, key_rd_dicts ):
+ updated_key_rd_dicts = []
+ for key_rd_dict in key_rd_dicts:
+ key = key_rd_dict.keys()[ 0 ]
+ repository_dependency = key_rd_dict[ key ]
+ toolshed, name, owner, changeset_revision = repository_dependency
+ if tool_shed_is_this_tool_shed( toolshed ):
+ repository = get_repository_by_name_and_owner( trans, name, owner )
+ repository_metadata = get_repository_metadata_by_repository_id_changset_revision( trans,
+ trans.security.encode_id( repository.id ),
+ changeset_revision )
+ if repository_metadata:
+ # The repository changeset_revision is installable, so no updates are available.
+ new_key_rd_dict = {}
+ new_key_rd_dict[ key ] = repository_dependency
+ updated_key_rd_dicts.append( key_rd_dict )
+ else:
+ # The repository changeset_revision is no longer installable, so see if there's been an update.
+ repo_dir = repository.repo_path( trans.app )
+ repo = hg.repository( get_configured_ui(), repo_dir )
+ changeset_revision = get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
+ repository_metadata = get_repository_metadata_by_repository_id_changset_revision( trans,
+ trans.security.encode_id( repository.id ),
+ changeset_revision )
+ if repository_metadata:
+ new_key_rd_dict = {}
+ new_key_rd_dict[ key ] = [ toolshed, name, owner, repository_metadata.changeset_revision ]
+ # We have the updated changset revision.
+ updated_key_rd_dicts.append( new_key_rd_dict )
+ return updated_key_rd_dicts
def get_user_by_username( trans, username ):
"""Get a user from the database by username"""
return trans.sa_session.query( trans.model.User ) \
.filter( trans.model.User.table.c.username == username ) \
.one()
+def handle_circular_repository_dependency( repository_key, repository_dependency, circular_repository_dependencies, handled_key_rd_dicts, all_repository_dependencies ):
+ all_repository_dependencies_root_key = all_repository_dependencies[ 'root_key' ]
+ repository_dependency_as_key = get_repository_dependency_as_key( repository_dependency )
+ repository_key_as_repository_dependency = repository_key.split( container_util.STRSEP )
+ update_circular_repository_dependencies( repository_key,
+ repository_dependency,
+ all_repository_dependencies[ repository_dependency_as_key ],
+ circular_repository_dependencies )
+ if all_repository_dependencies_root_key != repository_dependency_as_key:
+ all_repository_dependencies[ repository_key ] = [ repository_dependency ]
+ return circular_repository_dependencies, handled_key_rd_dicts, all_repository_dependencies
+def handle_current_repository_dependency( trans, current_repository_key, key_rd_dicts_to_be_processed, all_repository_dependencies, handled_key_rd_dicts,
+ circular_repository_dependencies ):
+ current_repository_key_rd_dicts = []
+ for rd in all_repository_dependencies[ current_repository_key ]:
+ rd_copy = [ str( item ) for item in rd ]
+ new_key_rd_dict = {}
+ new_key_rd_dict[ current_repository_key ] = rd_copy
+ current_repository_key_rd_dicts.append( new_key_rd_dict )
+ if current_repository_key_rd_dicts:
+ toolshed, required_repo, required_repository, required_repository_metadata, repository_key_rd_dicts, key_rd_dicts_to_be_processed, handled_key_rd_dicts = \
+ handle_key_rd_dicts_for_repository( trans,
+ current_repository_key,
+ current_repository_key_rd_dicts,
+ key_rd_dicts_to_be_processed,
+ handled_key_rd_dicts,
+ circular_repository_dependencies )
+ return get_repository_dependencies_for_changeset_revision( trans=trans,
+ repo=required_repo,
+ repository=required_repository,
+ repository_metadata=required_repository_metadata,
+ toolshed_base_url=toolshed,
+ key_rd_dicts_to_be_processed=key_rd_dicts_to_be_processed,
+ all_repository_dependencies=all_repository_dependencies,
+ handled_key_rd_dicts=handled_key_rd_dicts,
+ circular_repository_dependencies=circular_repository_dependencies )
def handle_existing_tool_dependencies_that_changed_in_update( app, repository, original_dependency_dict, new_dependency_dict ):
"""
This method is called when a Galaxy admin is getting updates for an installed tool shed repository in order to cover the case where an
@@ -1438,6 +1446,56 @@
else:
deleted_tool_dependency_names.append( original_dependency_val_dict[ 'name' ] )
return updated_tool_dependency_names, deleted_tool_dependency_names
+def handle_key_rd_dicts_for_repository( trans, current_repository_key, repository_key_rd_dicts, key_rd_dicts_to_be_processed, handled_key_rd_dicts, circular_repository_dependencies ):
+ key_rd_dict = repository_key_rd_dicts.pop( 0 )
+ repository_dependency = key_rd_dict[ current_repository_key ]
+ toolshed, name, owner, changeset_revision = repository_dependency
+ if tool_shed_is_this_tool_shed( toolshed ):
+ required_repository = get_repository_by_name_and_owner( trans, name, owner )
+ required_repository_metadata = get_repository_metadata_by_repository_id_changset_revision( trans,
+ trans.security.encode_id( required_repository.id ),
+ changeset_revision )
+ if required_repository_metadata:
+ required_repo_dir = required_repository.repo_path( trans.app )
+ required_repo = hg.repository( get_configured_ui(), required_repo_dir )
+ # The required_repository_metadata changeset_revision is installable.
+ required_metadata = required_repository_metadata.metadata
+ if required_metadata:
+ for current_repository_key_rd_dict in repository_key_rd_dicts:
+ if not in_key_rd_dicts( current_repository_key_rd_dict, key_rd_dicts_to_be_processed ):
+ key_rd_dicts_to_be_processed.append( current_repository_key_rd_dict )
+ # Mark the current repository_dependency as handled_key_rd_dicts.
+ if not in_key_rd_dicts( key_rd_dict, handled_key_rd_dicts ):
+ handled_key_rd_dicts.append( key_rd_dict )
+ # Remove the current repository from the list of repository_dependencies to be processed.
+ if in_key_rd_dicts( key_rd_dict, key_rd_dicts_to_be_processed ):
+ key_rd_dicts_to_be_processed = remove_from_key_rd_dicts( key_rd_dict, key_rd_dicts_to_be_processed )
+ else:
+ # The repository is in a different tool shed, so build an url and send a request.
+ error_message = "Repository dependencies are currently supported only within the same tool shed. Ignoring repository dependency definition "
+ error_message += "for tool shed %s, name %s, owner %s, changeset revision %s" % ( toolshed, name, owner, changeset_revision )
+ log.debug( error_message )
+ return toolshed, required_repo, required_repository, required_repository_metadata, repository_key_rd_dicts, key_rd_dicts_to_be_processed, handled_key_rd_dicts
+def handle_next_repository_dependency( trans, key_rd_dicts_to_be_processed, all_repository_dependencies, handled_key_rd_dicts, circular_repository_dependencies ):
+ next_repository_key_rd_dict = key_rd_dicts_to_be_processed.pop( 0 )
+ next_repository_key_rd_dicts = [ next_repository_key_rd_dict ]
+ next_repository_key = next_repository_key_rd_dict.keys()[ 0 ]
+ toolshed, required_repo, required_repository, required_repository_metadata, repository_key_rd_dicts, key_rd_dicts_to_be_processed, handled_key_rd_dicts = \
+ handle_key_rd_dicts_for_repository( trans,
+ next_repository_key,
+ next_repository_key_rd_dicts,
+ key_rd_dicts_to_be_processed,
+ handled_key_rd_dicts,
+ circular_repository_dependencies )
+ return get_repository_dependencies_for_changeset_revision( trans=trans,
+ repo=required_repo,
+ repository=required_repository,
+ repository_metadata=required_repository_metadata,
+ toolshed_base_url=toolshed,
+ key_rd_dicts_to_be_processed=key_rd_dicts_to_be_processed,
+ all_repository_dependencies=all_repository_dependencies,
+ handled_key_rd_dicts=handled_key_rd_dicts,
+ circular_repository_dependencies=circular_repository_dependencies )
def handle_sample_files_and_load_tool_from_disk( trans, repo_files_dir, tool_config_filepath, work_dir ):
# Copy all sample files from disk to a temporary directory since the sample files may be in multiple directories.
message = ''
@@ -1489,8 +1547,59 @@
message = str( e )
error = True
return error, message
+def in_all_repository_dependencies( repository_key, repository_dependency, all_repository_dependencies ):
+ """Return True if { repository_key :repository_dependency } is in all_repository_dependencies."""
+ for key, val in all_repository_dependencies.items():
+ if key != repository_key:
+ continue
+ if repository_dependency in val:
+ return True
+ return False
+def in_circular_repository_dependencies( repository_key_rd_dict, circular_repository_dependencies ):
+ """
+ Return True if any combination of a circular dependency tuple is the key : value pair defined in the received repository_key_rd_dict. This
+ means that each circular dependency tuple is converted into the key : value pair for vomparision.
+ """
+ for tup in circular_repository_dependencies:
+ rd_0, rd_1 = tup
+ rd_0_as_key = get_repository_dependency_as_key( rd_0 )
+ rd_1_as_key = get_repository_dependency_as_key( rd_1 )
+ if rd_0_as_key in repository_key_rd_dict and repository_key_rd_dict[ rd_0_as_key ] == rd_1:
+ return True
+ if rd_1_as_key in repository_key_rd_dict and repository_key_rd_dict[ rd_1_as_key ] == rd_0:
+ return True
+ return False
+def in_key_rd_dicts( key_rd_dict, key_rd_dicts ):
+ k = key_rd_dict.keys()[ 0 ]
+ v = key_rd_dict[ k ]
+ for key_rd_dict in key_rd_dicts:
+ for key, val in key_rd_dict.items():
+ if key == k and val == v:
+ return True
+ return False
+def is_circular_repository_dependency( repository_key, repository_dependency, all_repository_dependencies ):
+ """
+ Return True if the received repository_dependency is a key in all_repository_dependencies whose list of repository dependencies
+ includes the received repository_key.
+ """
+ repository_dependency_as_key = get_repository_dependency_as_key( repository_dependency )
+ repository_key_as_repository_dependency = repository_key.split( container_util.STRSEP )
+ for key, val in all_repository_dependencies.items():
+ if key != repository_dependency_as_key:
+ continue
+ if repository_key_as_repository_dependency in val:
+ return True
+ return False
def is_downloadable( metadata_dict ):
return 'datatypes' in metadata_dict or 'repository_dependencies' in metadata_dict or 'tools' in metadata_dict or 'workflows' in metadata_dict
+def initialize_all_repository_dependencies( current_repository_key, repository_dependencies_dict, all_repository_dependencies ):
+ # Initialize the all_repository_dependencies dictionary. It's safe to assume that current_repository_key in this case will have a value.
+ all_repository_dependencies[ 'root_key' ] = current_repository_key
+ all_repository_dependencies[ current_repository_key ] = []
+ # Store the value of the 'description' key only once, the first time through this recursive method.
+ description = repository_dependencies_dict.get( 'description', None )
+ all_repository_dependencies[ 'description' ] = description
+ return all_repository_dependencies
def load_tool_from_config( app, full_path ):
try:
tool = app.toolbox.load_tool( full_path )
@@ -1553,24 +1662,78 @@
"key": full_path }
folder_contents.append( node )
return folder_contents
+def populate_repository_dependency_objects_for_processing( trans, current_repository_key, repository_dependencies_dict, key_rd_dicts_to_be_processed,
+ handled_key_rd_dicts, circular_repository_dependencies, all_repository_dependencies ):
+ current_repository_key_rd_dicts = []
+ for rd in repository_dependencies_dict[ 'repository_dependencies' ]:
+ new_key_rd_dict = {}
+ new_key_rd_dict[ current_repository_key ] = rd
+ current_repository_key_rd_dicts.append( new_key_rd_dict )
+ if current_repository_key_rd_dicts and current_repository_key:
+ # Remove all repository dependencies that point to a revision within its own repository.
+ current_repository_key_rd_dicts = remove_ropository_dependency_reference_to_self( current_repository_key_rd_dicts )
+ current_repository_key_rd_dicts = get_updated_changeset_revisions_for_repository_dependencies( trans, current_repository_key_rd_dicts )
+ for key_rd_dict in current_repository_key_rd_dicts:
+ is_circular = False
+ if not in_key_rd_dicts( key_rd_dict, handled_key_rd_dicts ) and not in_key_rd_dicts( key_rd_dict, key_rd_dicts_to_be_processed ):
+ repository_dependency = key_rd_dict[ current_repository_key ]
+ if current_repository_key in all_repository_dependencies:
+ # Add all repository dependencies for the current repository into it's entry in all_repository_dependencies.
+ all_repository_dependencies_val = all_repository_dependencies[ current_repository_key ]
+ if repository_dependency not in all_repository_dependencies_val:
+ all_repository_dependencies_val.append( repository_dependency )
+ all_repository_dependencies[ current_repository_key ] = all_repository_dependencies_val
+ elif not in_all_repository_dependencies( current_repository_key, repository_dependency, all_repository_dependencies ):
+ # Handle circular repository dependencies.
+ if is_circular_repository_dependency( current_repository_key, repository_dependency, all_repository_dependencies ):
+ is_circular = True
+ circular_repository_dependencies, handled_key_rd_dicts, all_repository_dependencies = \
+ handle_circular_repository_dependency( current_repository_key,
+ repository_dependency,
+ circular_repository_dependencies,
+ handled_key_rd_dicts,
+ all_repository_dependencies )
+ else:
+ all_repository_dependencies[ current_repository_key ] = [ repository_dependency ]
+ if not is_circular and can_add_to_key_rd_dicts( key_rd_dict, key_rd_dicts_to_be_processed ):
+ new_key_rd_dict = {}
+ new_key_rd_dict[ current_repository_key ] = repository_dependency
+ key_rd_dicts_to_be_processed.append( new_key_rd_dict )
+ return current_repository_key_rd_dicts, key_rd_dicts_to_be_processed, handled_key_rd_dicts, all_repository_dependencies
def remove_dir( dir ):
if os.path.exists( dir ):
try:
shutil.rmtree( dir )
except:
pass
-def remove_ropository_dependency_reference_to_self( repository_dependencies, repository_key ):
+def remove_from_key_rd_dicts( key_rd_dict, key_rd_dicts ):
+ k = key_rd_dict.keys()[ 0 ]
+ v = key_rd_dict[ k ]
+ clean_key_rd_dicts = []
+ for krd_dict in key_rd_dicts:
+ key = krd_dict.keys()[ 0 ]
+ val = krd_dict[ key ]
+ if key == k and val == v:
+ continue
+ clean_key_rd_dicts.append( krd_dict )
+ return clean_key_rd_dicts
+def remove_ropository_dependency_reference_to_self( key_rd_dicts ):
"""Remove all repository dependencies that point to a revision within its own repository."""
- clean_repository_dependencies = []
- repository_tup = repository_key.split( container_util.STRSEP )
+ clean_key_rd_dicts = []
+ key = key_rd_dicts[ 0 ].keys()[ 0 ]
+ repository_tup = key.split( container_util.STRSEP )
rd_toolshed, rd_name, rd_owner, rd_changeset_revision = repository_tup
- for repository_dependency in repository_dependencies:
+ for key_rd_dict in key_rd_dicts:
+ k = key_rd_dict.keys()[ 0 ]
+ repository_dependency = key_rd_dict[ k ]
toolshed, name, owner, changeset_revision = repository_dependency
if rd_toolshed == toolshed and rd_name == name and rd_owner == owner:
log.debug( "Removing repository dependency for repository %s owned by %s since it refers to a revision within itself." % ( name, owner ) )
else:
- clean_repository_dependencies.append( repository_dependency )
- return clean_repository_dependencies
+ new_key_rd_dict = {}
+ new_key_rd_dict[ key ] = repository_dependency
+ clean_key_rd_dicts.append( new_key_rd_dict )
+ return clean_key_rd_dicts
def remove_tool_dependency_installation_directory( dependency_install_dir ):
if os.path.exists( dependency_install_dir ):
try:
@@ -1841,6 +2004,19 @@
else:
translated_string = ''
return translated_string
+def update_circular_repository_dependencies( repository_key, repository_dependency, repository_dependencies, circular_repository_dependencies ):
+ repository_dependency_as_key = get_repository_dependency_as_key( repository_dependency )
+ repository_key_as_repository_dependency = repository_key.split( container_util.STRSEP )
+ if repository_key_as_repository_dependency in repository_dependencies:
+ found = False
+ for tup in circular_repository_dependencies:
+ if repository_dependency in tup and repository_key_as_repository_dependency in tup:
+ # The circular dependency has already been included.
+ found = True
+ if not found:
+ new_circular_tup = [ repository_dependency, repository_key_as_repository_dependency ]
+ circular_repository_dependencies.append( new_circular_tup )
+ return circular_repository_dependencies
def update_existing_tool_dependency( app, repository, original_dependency_dict, new_dependencies_dict ):
"""
Update an exsiting tool dependency whose definition was updated in a change set pulled by a Galaxy administrator when getting updates
@@ -1889,8 +2065,7 @@
sa_session.flush()
new_tool_dependency = tool_dependency
else:
- # We have no new tool dependency definition based on a matching dependency name, so remove the existing tool dependency record
- # from the database.
+ # We have no new tool dependency definition based on a matching dependency name, so remove the existing tool dependency record from the database.
log.debug( "Deleting tool dependency with name '%s', type '%s' and version '%s' from the database since it is no longer defined." % \
( str( tool_dependency.name ), str( tool_dependency.type ), str( tool_dependency.version ) ) )
sa_session.delete( tool_dependency )
diff -r c460f284077f0b400901e4cd791d94a311425751 -r e19bf2b117638221414239698f840730a2cd0569 lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -1782,14 +1782,14 @@
is_malicious = repository_metadata.malicious
if repository_metadata:
# Get a dictionary of all repositories upon which the contents of the current repository_metadata record depend.
- repository_dependencies = get_repository_dependencies_for_changeset_revision( trans,
- repo,
- repository,
- repository_metadata,
- str( url_for( '/', qualified=True ) ).rstrip( '/' ),
- repository_dependencies=None,
+ repository_dependencies = get_repository_dependencies_for_changeset_revision( trans=trans,
+ repo=repo,
+ repository=repository,
+ repository_metadata=repository_metadata,
+ toolshed_base_url=str( url_for( '/', qualified=True ) ).rstrip( '/' ),
+ key_rd_dicts_to_be_processed=None,
all_repository_dependencies=None,
- handled=None )
+ handled_key_rd_dicts=None )
if is_malicious:
if trans.app.security_agent.can_push( trans.app, trans.user, repository ):
message += malicious_error_can_push
@@ -1895,14 +1895,14 @@
repository_metadata_id = trans.security.encode_id( repository_metadata.id ),
metadata = repository_metadata.metadata
# Get a dictionary of all repositories upon which the contents of the current repository_metadata record depend.
- repository_dependencies = get_repository_dependencies_for_changeset_revision( trans,
- repo,
- repository,
- repository_metadata,
- str( url_for( '/', qualified=True ) ).rstrip( '/' ),
- repository_dependencies=None,
+ repository_dependencies = get_repository_dependencies_for_changeset_revision( trans=trans,
+ repo=repo,
+ repository=repository,
+ repository_metadata=repository_metadata,
+ toolshed_base_url=str( url_for( '/', qualified=True ) ).rstrip( '/' ),
+ key_rd_dicts_to_be_processed=None,
all_repository_dependencies=None,
- handled=None )
+ handled_key_rd_dicts=None )
else:
repository_metadata_id = None
metadata = None
@@ -2417,14 +2417,14 @@
repository_metadata_id = trans.security.encode_id( repository_metadata.id )
metadata = repository_metadata.metadata
# Get a dictionary of all repositories upon which the contents of the current repository_metadata record depend.
- repository_dependencies = get_repository_dependencies_for_changeset_revision( trans,
- repo,
- repository,
- repository_metadata,
- str( url_for( '/', qualified=True ) ).rstrip( '/' ),
- repository_dependencies=None,
+ repository_dependencies = get_repository_dependencies_for_changeset_revision( trans=trans,
+ repo=repo,
+ repository=repository,
+ repository_metadata=repository_metadata,
+ toolshed_base_url=str( url_for( '/', qualified=True ) ).rstrip( '/' ),
+ key_rd_dicts_to_be_processed=None,
all_repository_dependencies=None,
- handled=None )
+ handled_key_rd_dicts=None )
else:
repository_metadata_id = None
metadata = None
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/c460f284077f/
changeset: c460f284077f
user: dan
date: 2012-12-10 22:14:05
summary: Fix for value_to_display_text in DatasetToolparameter when encountering a non-set optional multiple dataset; inspired by a patch from Kyle Ellrott.
affected #: 1 file
diff -r fc84a8b469c3181be64083a91d0fc2faae5e73ef -r c460f284077f0b400901e4cd791d94a311425751 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1605,7 +1605,7 @@
return value.file_name
def value_to_display_text( self, value, app ):
- if not isinstance(value, list):
+ if value and not isinstance( value, list ):
value = [ value ]
if value:
return ", ".join( [ "%s: %s" % ( item.hid, item.name ) for item in value ] )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/fc84a8b469c3/
changeset: fc84a8b469c3
user: jgoecks
date: 2012-12-10 22:08:44
summary: Trackster: use tile region in place of tile index for simplicity.
affected #: 1 file
diff -r c3acc86490780d1949c4a3abf1263ace7c5ece6c -r fc84a8b469c3181be64083a91d0fc2faae5e73ef static/scripts/viz/trackster/tracks.js
--- a/static/scripts/viz/trackster/tracks.js
+++ b/static/scripts/viz/trackster/tracks.js
@@ -2762,23 +2762,27 @@
}
*/
},
+
/**
* Generate a key for the tile cache.
* TODO: create a TileCache object (like DataCache) and generate key internally.
*/
- _gen_tile_cache_key: function(w_scale, tile_index) {
- return w_scale + '_' + tile_index;
+ _gen_tile_cache_key: function(w_scale, tile_region) {
+ return w_scale + '_' + tile_region;
},
+
/**
* Request that track be drawn.
*/
request_draw: function(force, clear_after) {
this.view.request_redraw(false, force, clear_after, this);
},
+
/**
* Actions to be taken before drawing.
*/
before_draw: function() {},
+
/**
* Draw track. It is possible to force a redraw rather than use cached tiles and/or clear old
* tiles after drawing new tiles.
@@ -2826,7 +2830,8 @@
is_tile = function(o) { return (o && 'track' in o); };
// Draw tiles.
while ( ( tile_index * TILE_SIZE * resolution ) < high ) {
- var draw_result = this.draw_helper( force, tile_index, resolution, this.tiles_div, w_scale );
+ var tile_region = this._get_tile_bounds(tile_index, resolution),
+ draw_result = this.draw_helper( force, tile_region, resolution, this.tiles_div, w_scale );
if ( is_tile(draw_result) ) {
drawn_tiles.push( draw_result );
} else {
@@ -2871,10 +2876,9 @@
* Retrieves from cache, draws, or sets up drawing for a single tile. Returns either a Tile object or a
* jQuery.Deferred object that is fulfilled when tile can be drawn again.
*/
- draw_helper: function(force, tile_index, resolution, parent_element, w_scale, kwargs) {
+ draw_helper: function(force, region, resolution, parent_element, w_scale, kwargs) {
var track = this,
- key = this._gen_tile_cache_key(w_scale, tile_index),
- region = this._get_tile_bounds(tile_index, resolution);
+ key = this._gen_tile_cache_key(w_scale, region);
// Init kwargs if necessary to avoid having to check if kwargs defined.
if (!kwargs) { kwargs = {}; }
@@ -3022,7 +3026,7 @@
/**
* Returns a genome region that corresponds to a tile at a particular resolution
- */
+ */
_get_tile_bounds: function(tile_index, resolution) {
var tile_low = Math.floor( tile_index * TILE_SIZE * resolution ),
tile_length = Math.ceil( TILE_SIZE * resolution ),
@@ -3223,11 +3227,10 @@
this.action_icons.param_space_viz_icon.hide();
},
can_draw: Drawable.prototype.can_draw,
- draw_helper: function(force, tile_index, resolution, parent_element, w_scale, kwargs) {
+ draw_helper: function(force, region, resolution, parent_element, w_scale, kwargs) {
// FIXME: this function is similar to TiledTrack.draw_helper -- can the two be merged/refactored?
var track = this,
- key = this._gen_tile_cache_key(w_scale, tile_index),
- region = this._get_tile_bounds(tile_index, resolution);
+ key = this._gen_tile_cache_key(w_scale, region);
// Init kwargs if necessary to avoid having to check if kwargs defined.
if (!kwargs) { kwargs = {}; }
@@ -3446,9 +3449,9 @@
/**
* Only retrieves data and draws tile if reference data can be displayed.
*/
- draw_helper: function(force, tile_index, resolution, parent_element, w_scale, kwargs) {
+ draw_helper: function(force, region, resolution, parent_element, w_scale, kwargs) {
if (w_scale > this.view.canvas_manager.char_width_px) {
- return TiledTrack.prototype.draw_helper.call(this, force, tile_index, resolution, parent_element, w_scale, kwargs);
+ return TiledTrack.prototype.draw_helper.call(this, force, region, resolution, parent_element, w_scale, kwargs);
}
else {
this.hide_contents();
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/c3acc8649078/
changeset: c3acc8649078
user: jgoecks
date: 2012-12-10 21:47:05
summary: Trackster: use underscore methods to simplify tile search/iteration and remove old debugging statement.
affected #: 1 file
diff -r 3ee0e5ee1b375c0fd580c26a93850f2fa44f93f0 -r c3acc86490780d1949c4a3abf1263ace7c5ece6c static/scripts/viz/trackster/tracks.js
--- a/static/scripts/viz/trackster/tracks.js
+++ b/static/scripts/viz/trackster/tracks.js
@@ -2854,21 +2854,17 @@
//
// If some tiles have icons, set padding of tiles without icons so features and rows align.
//
- var icons_present = false;
- for (var tile_index = 0; tile_index < tiles.length; tile_index++) {
- if (tiles[tile_index].has_icons) {
- icons_present = true;
- break;
- }
- }
+ var icons_present = _.find(tiles, function(tile) {
+ return tile.has_icons;
+ });
+
if (icons_present) {
- for (var tile_index = 0; tile_index < tiles.length; tile_index++) {
- tile = tiles[tile_index];
+ _.each(tiles, function(tile) {
if (!tile.has_icons) {
// Need to align with other tile(s) that have icons.
tile.html_elt.css("padding-top", ERROR_PADDING);
}
- }
+ });
}
},
/**
@@ -4045,7 +4041,6 @@
var painter = new (this.painter)(filtered, tile_low, tile_high, this.prefs, mode, filter_alpha_scaler, filter_height_scaler, ref_seq);
var feature_mapper = null;
- // console.log(( tile_low - this.view.low ) * w_scale, tile_index, w_scale);
ctx.fillStyle = this.prefs.block_color;
ctx.font = ctx.canvas.manager.default_font;
ctx.textAlign = "right";
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.