1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/6304eb6a9110/
changeset: 6304eb6a9110
user: greg
date: 2012-12-05 16:47:48
summary: Various fixes and enhancements to support new repository dependencies.
affected #: 5 files
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 lib/galaxy/util/shed_util_common.py
--- a/lib/galaxy/util/shed_util_common.py
+++ b/lib/galaxy/util/shed_util_common.py
@@ -79,7 +79,11 @@
lock.acquire( True )
if tool_dependencies:
# Add the install_dir attribute to the tool_dependencies.
- tool_dependencies = add_installation_directories_to_tool_dependencies( trans, repository_name, repository_owner, changeset_revision, tool_dependencies )
+ tool_dependencies = add_installation_directories_to_tool_dependencies( trans,
+ repository_name,
+ repository_owner,
+ changeset_revision,
+ tool_dependencies )
try:
folder_id = 0
if readme_files_dict:
@@ -321,6 +325,7 @@
log.debug( error_message )
return False, error_message
def compare_changeset_revisions( ancestor_changeset_revision, ancestor_metadata_dict, current_changeset_revision, current_metadata_dict ):
+ """Compare the contents of two changeset revisions to determine if a new repository metadata revision should be created."""
# The metadata associated with ancestor_changeset_revision is ancestor_metadata_dict. This changeset_revision is an ancestor of
# current_changeset_revision which is associated with current_metadata_dict. A new repository_metadata record will be created only
# when this method returns the string 'not equal and not subset'.
@@ -328,23 +333,38 @@
ancestor_tools = ancestor_metadata_dict.get( 'tools', [] )
ancestor_guids = [ tool_dict[ 'guid' ] for tool_dict in ancestor_tools ]
ancestor_guids.sort()
+ ancestor_repository_dependencies_dict = ancestor_metadata_dict.get( 'repository_dependencies', {} )
+ ancestor_repository_dependencies = ancestor_repository_dependencies_dict.get( 'repository_dependencies', [] )
ancestor_tool_dependencies = ancestor_metadata_dict.get( 'tool_dependencies', [] )
ancestor_workflows = ancestor_metadata_dict.get( 'workflows', [] )
current_datatypes = current_metadata_dict.get( 'datatypes', [] )
current_tools = current_metadata_dict.get( 'tools', [] )
current_guids = [ tool_dict[ 'guid' ] for tool_dict in current_tools ]
current_guids.sort()
+ current_repository_dependencies_dict = current_metadata_dict.get( 'repository_dependencies', {} )
+ current_repository_dependencies = current_repository_dependencies_dict.get( 'repository_dependencies', [] )
current_tool_dependencies = current_metadata_dict.get( 'tool_dependencies', [] )
current_workflows = current_metadata_dict.get( 'workflows', [] )
# Handle case where no metadata exists for either changeset.
- if not ancestor_guids and not current_guids and not ancestor_workflows and not current_workflows and not ancestor_datatypes and not current_datatypes:
+ no_datatypes = not ancestor_datatypes and not current_datatypes
+ no_repository_dependencies = not ancestor_repository_dependencies and not current_repository_dependencies
+ # Note: we currently don't need to check tool_dependencies since we're checking for guids - tool_dependencies always require tools (currently).
+ no_tool_dependencies = not ancestor_tool_dependencies and not current_tool_dependencies
+ no_tools = not ancestor_guids and not current_guids
+ no_workflows = not ancestor_workflows and not current_workflows
+ if no_datatypes and no_repository_dependencies and no_tool_dependencies and no_tools and no_workflows:
return 'no metadata'
+ repository_dependency_comparison = compare_repository_dependencies( ancestor_repository_dependencies, current_repository_dependencies )
workflow_comparison = compare_workflows( ancestor_workflows, current_workflows )
datatype_comparison = compare_datatypes( ancestor_datatypes, current_datatypes )
# Handle case where all metadata is the same.
- if ancestor_guids == current_guids and workflow_comparison == 'equal' and datatype_comparison == 'equal':
+ if ancestor_guids == current_guids and repository_dependency_comparison == 'equal' and workflow_comparison == 'equal' and datatype_comparison == 'equal':
return 'equal'
- if workflow_comparison in [ 'equal', 'subset' ] and datatype_comparison in [ 'equal', 'subset' ]:
+ # Handle case where ancestor metadata is a subset of current metadata.
+ repository_dependency_is_subset = repository_dependency_comparison in [ 'equal', 'subset' ]
+ workflow_dependency_is_subset = workflow_comparison in [ 'equal', 'subset' ]
+ datatype_is_subset = datatype_comparison in [ 'equal', 'subset' ]
+ if repository_dependency_is_subset and workflow_dependency_is_subset and datatype_is_subset:
is_subset = True
for guid in ancestor_guids:
if guid not in current_guids:
@@ -354,10 +374,8 @@
return 'subset'
return 'not equal and not subset'
def compare_datatypes( ancestor_datatypes, current_datatypes ):
- # Determine if ancestor_datatypes is the same as current_datatypes
- # or if ancestor_datatypes is a subset of current_datatypes. Each
- # datatype dict looks something like:
- # {"dtype": "galaxy.datatypes.images:Image", "extension": "pdf", "mimetype": "application/pdf"}
+ """Determine if ancestor_datatypes is the same as or a subset of current_datatypes."""
+ # Each datatype dict looks something like: {"dtype": "galaxy.datatypes.images:Image", "extension": "pdf", "mimetype": "application/pdf"}
if len( ancestor_datatypes ) <= len( current_datatypes ):
for ancestor_datatype in ancestor_datatypes:
# Currently the only way to differentiate datatypes is by name.
@@ -378,9 +396,31 @@
else:
return 'subset'
return 'not equal and not subset'
+def compare_repository_dependencies( ancestor_repository_dependencies, current_repository_dependencies ):
+ """Determine if ancestor_repository_dependencies is the same as or a subset of current_repository_dependencies."""
+ # The list of repository_dependencies looks something like: [["http://localhost:9009", "emboss_datatypes", "test", "ab03a2a5f407"]].
+ # Create a string from each tuple in the list for easier comparison.
+ if len( ancestor_repository_dependencies ) <= len( current_repository_dependencies ):
+ for ancestor_tup in ancestor_repository_dependencies:
+ ancestor_tool_shed, ancestor_repository_name, ancestor_repository_owner, ancestor_changeset_revision = ancestor_tup
+ found_in_current = False
+ for current_tup in current_repository_dependencies:
+ current_tool_shed, current_repository_name, current_repository_owner, current_changeset_revision = current_tup
+ if current_tool_shed == ancestor_tool_shed and \
+ current_repository_name == ancestor_repository_name and \
+ current_repository_owner == ancestor_repository_owner and \
+ current_changeset_revision == ancestor_changeset_revision:
+ found_in_current = True
+ break
+ if not found_in_current:
+ return 'not equal and not subset'
+ if len( ancestor_repository_dependencies ) == len( current_repository_dependencies ):
+ return 'equal'
+ else:
+ return 'subset'
+ return 'not equal and not subset'
def compare_workflows( ancestor_workflows, current_workflows ):
- # Determine if ancestor_workflows is the same as current_workflows
- # or if ancestor_workflows is a subset of current_workflows.
+ """Determine if ancestor_workflows is the same as current_workflows or if ancestor_workflows is a subset of current_workflows."""
if len( ancestor_workflows ) <= len( current_workflows ):
for ancestor_workflow_tup in ancestor_workflows:
# ancestor_workflows is a list of tuples where each contained tuple is
@@ -417,6 +457,7 @@
message = ''
return message
def copy_disk_sample_files_to_dir( trans, repo_files_dir, dest_path ):
+ """Copy all files currently on disk that end with the .sample extension to the directory to which dest_path refers."""
sample_files = []
for root, dirs, files in os.walk( repo_files_dir ):
if root.find( '.hg' ) < 0:
@@ -664,10 +705,12 @@
# If the list of sample files includes a tool_data_table_conf.xml.sample file, laad it's table elements into memory.
relative_path, filename = os.path.split( sample_file )
if filename == 'tool_data_table_conf.xml.sample':
- new_table_elems = app.tool_data_tables.add_new_entries_from_config_file( config_filename=sample_file,
- tool_data_path=original_tool_data_path,
- shed_tool_data_table_config=app.config.shed_tool_data_table_config,
- persist=persist )
+ new_table_elems, error_message = app.tool_data_tables.add_new_entries_from_config_file( config_filename=sample_file,
+ tool_data_path=original_tool_data_path,
+ shed_tool_data_table_config=app.config.shed_tool_data_table_config,
+ persist=persist )
+ if error_message:
+ invalid_file_tups.append( ( filename, error_message ) )
for root, dirs, files in os.walk( files_dir ):
if root.find( '.hg' ) < 0 and root.find( 'hgrc' ) < 0:
if '.hg' in dirs:
@@ -675,13 +718,8 @@
for name in files:
# See if we have a repository dependencies defined.
if name == 'repository_dependencies.xml':
- relative_path_to_repository_dependencies = get_relative_path_to_repository_file( root,
- name,
- relative_install_dir,
- work_dir,
- shed_config_dict,
- resetting_all_metadata_on_repository )
- metadata_dict = generate_repository_dependency_metadata( relative_path_to_repository_dependencies, metadata_dict )
+ path_to_repository_dependencies_config = os.path.join( root, name )
+ metadata_dict = generate_repository_dependency_metadata( path_to_repository_dependencies_config, metadata_dict )
# See if we have one or more READ_ME files.
elif name.lower() in readme_file_names:
relative_path_to_readme = get_relative_path_to_repository_file( root,
@@ -793,7 +831,9 @@
if repository_dependencies_tup not in repository_dependencies_tups:
repository_dependencies_tups.append( repository_dependencies_tup )
if repository_dependencies_tups:
- metadata_dict[ 'repository_dependencies' ] = repository_dependencies_tups
+ repository_dependencies_dict = dict( description=root.get( 'description' ),
+ repository_dependencies=repository_dependencies_tups )
+ metadata_dict[ 'repository_dependencies' ] = repository_dependencies_dict
return metadata_dict
def generate_tool_dependency_metadata( app, repository, tool_dependencies_config, metadata_dict, original_repository_metadata=None ):
"""
@@ -1087,25 +1127,47 @@
trans.model.Repository.table.c.user_id == user.id ) ) \
.first()
def get_repository_dependencies_for_changeset_revision( trans, repo, repository, repository_metadata, toolshed_base_url, repository_dependencies=None,
- all_repository_dependencies=None ):
+ all_repository_dependencies=None, handled=None ):
"""
Return a dictionary of all repositories upon which the contents of the received repository_metadata record depend. The dictionary keys
are name-spaced values consisting of toolshed_base_url/repository_name/repository_owner/changeset_revision and the values are lists of
repository_dependency tuples consisting of ( toolshed_base_url, repository_name, repository_owner, changeset_revision ). This method
ensures that all required repositories to the nth degree are returned.
"""
+ if handled is None:
+ handled = []
if all_repository_dependencies is None:
all_repository_dependencies = {}
if repository_dependencies is None:
repository_dependencies = []
metadata = repository_metadata.metadata
if metadata and 'repository_dependencies' in metadata:
+ repository_dependencies_dict = metadata[ 'repository_dependencies' ]
+ # The repository_dependencies entry in the metadata is a dictionary that may have a value for a 'description' key. We want to
+ # store the value of this key only once, the first time through this recursive method.
repository_dependencies_root_key = generate_repository_dependencies_key_for_repository( toolshed_base_url=toolshed_base_url,
repository_name=repository.name,
repository_owner=repository.user.username,
changeset_revision=repository_metadata.changeset_revision )
- for repository_dependency in metadata[ 'repository_dependencies' ]:
- if repository_dependency not in repository_dependencies:
+ if not all_repository_dependencies:
+ # Initialize the all_repository_dependencies dictionary.
+ all_repository_dependencies[ 'root_key' ] = repository_dependencies_root_key
+ all_repository_dependencies[ repository_dependencies_root_key ] = []
+ if 'description' not in all_repository_dependencies:
+ description = repository_dependencies_dict.get( 'description', None )
+ all_repository_dependencies[ 'description' ] = description
+
+ # The next key of interest in repository_dependencies_dict is 'repository_dependencies', which is a list of tuples.
+ repository_dependencies_tups = repository_dependencies_dict[ 'repository_dependencies' ]
+ for repository_dependency in repository_dependencies_tups:
+ # Skip repository dependencies that point to the root repository.
+ check_key = generate_repository_dependencies_key_for_repository( toolshed_base_url=repository_dependency[ 0 ],
+ repository_name=repository_dependency[ 1 ],
+ repository_owner=repository_dependency[ 2 ],
+ changeset_revision=repository_dependency[ 3 ] )
+ if check_key == repository_dependencies_root_key:
+ handled.append( repository_dependency )
+ elif repository_dependency not in handled and repository_dependency not in repository_dependencies:
repository_dependencies.append( repository_dependency )
else:
repository_dependencies_root_key = None
@@ -1121,6 +1183,7 @@
if repository_dependency not in all_repository_dependencies_val:
all_repository_dependencies_val.append( repository_dependency )
all_repository_dependencies[ repository_dependencies_root_key ] = all_repository_dependencies_val
+ handled.append( repository_dependency )
else:
# Insert this repository_dependency.
all_repository_dependencies[ repository_dependencies_root_key ] = [ repository_dependency ]
@@ -1141,6 +1204,23 @@
required_repository_metadata = get_repository_metadata_by_repository_id_changset_revision( trans,
trans.security.encode_id( required_repository.id ),
required_changeset_revision )
+ if required_repository_metadata:
+ # The changeset_revision defined in a repository_dependencies.xml file is outdated, so we need to fix appropriate
+ # entries in our all_repository_dependencies dictionary.
+ updated_repository_dependency = [ tool_shed, name, owner, required_changeset_revision ]
+ for k, v in all_repository_dependencies.items():
+ if k in [ 'root_key', 'description' ]:
+ continue
+ for i, current_repository_dependency in enumerate( v ):
+ current_tool_shed, current_name, current_owner, current_changeset_revision = current_repository_dependency
+ if tool_shed == current_tool_shed and name == current_name and owner == current_owner and changeset_revision == current_changeset_revision:
+ if updated_repository_dependency in v:
+ # We've already stored the updated repository_dependency, so remove the outdated one.
+ v = v.remove( repository_dependency )
+ else:
+ # Store the updated repository_dependency.
+ v[ i ] = updated_repository_dependency
+ all_repository_dependencies[ k ] = v
if required_repository_metadata:
# The required_repository_metadata changeset_revision is installable.
required_metadata = required_repository_metadata.metadata
@@ -1151,7 +1231,8 @@
repository_metadata=required_repository_metadata,
toolshed_base_url=tool_shed,
repository_dependencies=repository_dependencies,
- all_repository_dependencies=all_repository_dependencies )
+ all_repository_dependencies=all_repository_dependencies,
+ handled=handled )
else:
# The repository is in a different tool shed, so build an url and send a request.
raise Exception( "Repository dependencies that refer to repositories in other tool sheds is not yet supported." )
@@ -1321,16 +1402,18 @@
error = False
message = ''
try:
- new_table_elems = app.tool_data_tables.add_new_entries_from_config_file( config_filename=filename,
- tool_data_path=app.config.tool_data_path,
- shed_tool_data_table_config=app.config.shed_tool_data_table_config,
- persist=persist )
+ new_table_elems, message = app.tool_data_tables.add_new_entries_from_config_file( config_filename=filename,
+ tool_data_path=app.config.tool_data_path,
+ shed_tool_data_table_config=app.config.shed_tool_data_table_config,
+ persist=persist )
+ if message:
+ error = True
except Exception, e:
message = str( e )
error = True
return error, message
def is_downloadable( metadata_dict ):
- return 'datatypes' in metadata_dict or 'tools' in metadata_dict or 'workflows' in metadata_dict
+ return 'datatypes' in metadata_dict or 'repository_dependencies' in metadata_dict or 'tools' in metadata_dict or 'workflows' in metadata_dict
def load_tool_from_config( app, full_path ):
try:
tool = app.toolbox.load_tool( full_path )
@@ -1502,14 +1585,15 @@
cloned_ok, error_message = clone_repository( repository_clone_url, work_dir, str( ctx.rev() ) )
if cloned_ok:
log.debug( "Generating metadata for changset revision: %s", str( ctx.rev() ) )
- current_metadata_dict, invalid_file_tups = generate_metadata_for_changeset_revision( app=trans.app,
- repository=repository,
- repository_clone_url=repository_clone_url,
- relative_install_dir=repo_dir,
- repository_files_dir=work_dir,
- resetting_all_metadata_on_repository=True,
- updating_installed_repository=False,
- persist=False )
+ current_metadata_dict, invalid_tups = generate_metadata_for_changeset_revision( app=trans.app,
+ repository=repository,
+ repository_clone_url=repository_clone_url,
+ relative_install_dir=repo_dir,
+ repository_files_dir=work_dir,
+ resetting_all_metadata_on_repository=True,
+ updating_installed_repository=False,
+ persist=False )
+ invalid_file_tups.extend( invalid_tups )
if current_metadata_dict:
if not metadata_changeset_revision and not metadata_dict:
# We're at the first change set in the change log.
@@ -1707,11 +1791,7 @@
if new_dependency_name and new_dependency_type and new_dependency_version:
# Update all attributes of the tool_dependency record in the database.
log.debug( "Updating tool dependency '%s' with type '%s' and version '%s' to have new type '%s' and version '%s'." % \
- ( str( tool_dependency.name ),
- str( tool_dependency.type ),
- str( tool_dependency.version ),
- str( new_dependency_type ),
- str( new_dependency_version ) ) )
+ ( str( tool_dependency.name ), str( tool_dependency.type ), str( tool_dependency.version ), str( new_dependency_type ), str( new_dependency_version ) ) )
tool_dependency.type = new_dependency_type
tool_dependency.version = new_dependency_version
tool_dependency.status = app.model.ToolDependency.installation_status.UNINSTALLED
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 lib/galaxy/webapps/community/controllers/common.py
--- a/lib/galaxy/webapps/community/controllers/common.py
+++ b/lib/galaxy/webapps/community/controllers/common.py
@@ -191,17 +191,13 @@
tool.id,
tool.version )
def get_absolute_path_to_file_in_repository( repo_files_dir, file_name ):
+ stripped_file_name = strip_path( file_name )
file_path = None
- found = False
for root, dirs, files in os.walk( repo_files_dir ):
if root.find( '.hg' ) < 0:
for name in files:
- if name == file_name:
- file_path = os.path.abspath( os.path.join( root, name ) )
- found = True
- break
- if found:
- break
+ if name == stripped_file_name:
+ return os.path.abspath( os.path.join( root, name ) )
return file_path
def get_category( trans, id ):
"""Get a category from the database"""
@@ -501,7 +497,6 @@
as_html=True,
displaying_invalid_tool=True )
message = concat_messages( message, message2 )
- status = 'error'
else:
tool, message, sample_files = handle_sample_files_and_load_tool_from_tmp_config( trans, repo, changeset_revision, tool_config_filename, work_dir )
remove_dir( work_dir )
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -1019,6 +1019,8 @@
message = util.restore_text( params.get( 'message', '' ) )
status = params.get( 'status', 'done' )
repository, tool, message = load_tool_from_changeset_revision( trans, repository_id, changeset_revision, tool_config )
+ if message:
+ status = 'error'
tool_state = self.__new_state( trans )
is_malicious = changeset_is_malicious( trans, repository_id, repository.tip( trans.app ) )
metadata = self.get_metadata( trans, repository_id, changeset_revision )
@@ -1760,7 +1762,7 @@
repository_metadata_id = None
metadata = None
is_malicious = False
- repository_dependencies = []
+ repository_dependencies = None
if changeset_revision != INITIAL_CHANGELOG_HASH:
repository_metadata = get_repository_metadata_by_changeset_revision( trans, id, changeset_revision )
if repository_metadata:
@@ -1786,7 +1788,8 @@
repository_metadata,
str( url_for( '/', qualified=True ) ).rstrip( '/' ),
repository_dependencies=None,
- all_repository_dependencies=None )
+ all_repository_dependencies=None,
+ handled=None )
if is_malicious:
if trans.app.security_agent.can_push( trans.app, trans.user, repository ):
message += malicious_error_can_push
@@ -1898,7 +1901,8 @@
repository_metadata,
str( url_for( '/', qualified=True ) ).rstrip( '/' ),
repository_dependencies=None,
- all_repository_dependencies=None )
+ all_repository_dependencies=None,
+ handled=None )
else:
repository_metadata_id = None
metadata = None
@@ -2383,7 +2387,7 @@
email_alerts = from_json_string( repository.email_alerts )
else:
email_alerts = []
- repository_dependencies = []
+ repository_dependencies = None
user = trans.user
if user and params.get( 'receive_email_alerts_button', False ):
flush_needed = False
@@ -2419,7 +2423,8 @@
repository_metadata,
str( url_for( '/', qualified=True ) ).rstrip( '/' ),
repository_dependencies=None,
- all_repository_dependencies=None )
+ all_repository_dependencies=None,
+ handled=None )
else:
repository_metadata_id = None
metadata = None
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 lib/galaxy/webapps/community/util/container_util.py
--- a/lib/galaxy/webapps/community/util/container_util.py
+++ b/lib/galaxy/webapps/community/util/container_util.py
@@ -11,6 +11,7 @@
self.id = id
self.key = key
self.label = label
+ self.description = None
self.datatypes = []
self.folders = []
self.invalid_tools = []
@@ -24,7 +25,8 @@
if folder == contained_folder:
return index, contained_folder
return 0, None
-
+ def remove_repository_dependency( self, repository_dependency ):
+ self.repository_dependencies.remove( repository_dependency )
class Datatype( object ):
"""Datatype object"""
def __init__( self, id=None, extension=None, type=None, mimetype=None, subclass=None ):
@@ -57,6 +59,9 @@
self.repository_name = repository_name
self.repository_owner = repository_owner
self.changeset_revision = changeset_revision
+ @property
+ def listify( self ):
+ return [ self.toolshed, self.repository_name, self.repository_owner, self.changeset_revision ]
class Tool( object ):
"""Tool object"""
@@ -183,11 +188,15 @@
repository_dependencies_root_folder = Folder( id=folder_id, key='root', label='root' )
folder_id += 1
# Create the Repository dependencies folder and add it to the root folder.
- key = generate_repository_dependencies_key_for_repository( toolshed_base_url, repository_name, repository_owner, changeset_revision )
- repository_dependencies_folder = Folder( id=folder_id, key=key, label=label )
+ repository_dependencies_folder_key = repository_dependencies[ 'root_key' ]
+ repository_dependencies_folder = Folder( id=folder_id, key=repository_dependencies_folder_key, label=label )
+ # The received repository_dependencies is a dictionary with a single 'description' key, and one or more repository_dependency keys.
+ # We want the description value associated with the repository_dependencies_folder.
+ repository_dependencies_folder.description = repository_dependencies.get( 'description', None )
repository_dependencies_root_folder.folders.append( repository_dependencies_folder )
- # Process the repository dependencies.
for key, val in repository_dependencies.items():
+ if key in [ 'root_key', 'description' ]:
+ continue
# Only create a new folder object if necessary.
folder = get_folder( repository_dependencies_root_folder, key )
if not folder:
@@ -197,19 +206,20 @@
folder = Folder( id=folder_id, key=key, label=label )
for repository_dependency_tup in val:
toolshed, name, owner, changeset_revision = repository_dependency_tup
- if not is_folder( repository_dependencies.keys(), toolshed, name, owner, changeset_revision ):
- # Create a new repository_dependency.
- repository_dependency_id += 1
- repository_dependency = RepositoryDependency( id=repository_dependency_id,
- toolshed=toolshed,
- repository_name=name,
- repository_owner=owner,
- changeset_revision=changeset_revision )
- # Insert the repository_dependency into the folder.
- folder.repository_dependencies.append( repository_dependency )
+ # Create a new repository_dependency.
+ repository_dependency_id += 1
+ repository_dependency = RepositoryDependency( id=repository_dependency_id,
+ toolshed=toolshed,
+ repository_name=name,
+ repository_owner=owner,
+ changeset_revision=changeset_revision )
+ # Insert the repository_dependency into the folder.
+ folder.repository_dependencies.append( repository_dependency )
if not get_folder( repository_dependencies_folder, key ):
# Insert the folder into the list.
repository_dependencies_folder.folders.append( folder )
+ # Remove repository_dependencies that are also folders.
+ remove_unwanted_repository_dependencies( repository_dependencies_folder )
else:
repository_dependencies_root_folder = None
return folder_id, repository_dependencies_root_folder
@@ -378,4 +388,10 @@
def key_is_current_repositorys_key( repository_name, repository_owner, changeset_revision, key ):
toolshed_base_url, key_name, key_owner, key_changeset_revision = get_components_from_key( key )
return repository_name == key_name and repository_owner == key_owner and changeset_revision == key_changeset_revision
+def remove_unwanted_repository_dependencies( folder ):
+ for repository_dependency in folder.repository_dependencies:
+ toolshed, name, owner, changeset_revision = repository_dependency.listify
+ key = generate_repository_dependencies_key_for_repository( toolshed, name, owner, changeset_revision )
+ if get_folder( folder, key ):
+ folder.remove_repository_dependency( repository_dependency )
\ No newline at end of file
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 templates/webapps/community/repository/common.mako
--- a/templates/webapps/community/repository/common.mako
+++ b/templates/webapps/community/repository/common.mako
@@ -204,7 +204,10 @@
if folder.datatypes:
col_span_str = 'colspan="4"'
elif folder.label == 'Repository dependencies':
- folder_label = "%s<i> - this repository requires installation of these additional repositories</i>" % folder_label
+ if folder.description:
+ folder_label = "%s<i> - %s</i>" % ( folder_label, folder.description )
+ else:
+ folder_label = "%s<i> - this repository requires installation of these additional repositories</i>" % folder_label
elif folder.invalid_tools:
folder_label = "%s<i> - click the tool config file name to see why the tool is invalid</i>" % folder_label
elif folder.tool_dependencies:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/5c5249fac5c8/
changeset: 5c5249fac5c8
user: greg
date: 2012-12-05 16:46:50
summary: Handle exceptions when parsing proprietary tool_data_table_conf.xml.sample files in the tool shed, and use the webapp's configured tool_data_path when configuring and loading the ToolDataTableManager.
affected #: 1 file
diff -r e40fdd5f6e8936f936f7118b0b9521f7379972ee -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e lib/galaxy/tools/data/__init__.py
--- a/lib/galaxy/tools/data/__init__.py
+++ b/lib/galaxy/tools/data/__init__.py
@@ -77,8 +77,15 @@
</table>
"""
- tree = util.parse_xml( config_filename )
- root = tree.getroot()
+ error_message = ''
+ table_elems = []
+ try:
+ tree = util.parse_xml( config_filename )
+ root = tree.getroot()
+ except Exception, e:
+ error_message = 'Error attempting to parse file %s: %s' % ( str( os.path.split( config_filename )[ 1 ] ), str( e ) )
+ log.debug( error_message )
+ return table_elems, error_message
# Make a copy of the current list of data_table_elem_names so we can persist later if changes to the config file are necessary.
original_data_table_elem_names = [ name for name in self.data_table_elem_names ]
if root.tag == 'tables':
@@ -86,7 +93,6 @@
tool_data_path=tool_data_path,
from_shed_config=True )
else:
- table_elems = []
type = root.get( 'type', 'tabular' )
assert type in tool_data_table_types, "Unknown data table type '%s'" % type
table_elems.append( root )
@@ -101,7 +107,7 @@
if persist and self.data_table_elem_names != original_data_table_elem_names:
# Persist Galaxy's version of the changed tool_data_table_conf.xml file.
self.to_xml_file( shed_tool_data_table_config )
- return table_elems
+ return table_elems, error_message
def to_xml_file( self, shed_tool_data_table_config ):
"""Write the current in-memory version of the shed_tool_data_table_conf.xml file to disk."""
full_path = os.path.abspath( shed_tool_data_table_config )
@@ -146,8 +152,8 @@
def __init__( self, config_element, tool_data_path ):
super( TabularToolDataTable, self ).__init__( config_element, tool_data_path )
- self.configure_and_load( config_element )
- def configure_and_load( self, config_element ):
+ self.configure_and_load( config_element, tool_data_path )
+ def configure_and_load( self, config_element, tool_data_path ):
"""
Configure and load table from an XML element.
"""
@@ -157,9 +163,16 @@
self.parse_column_spec( config_element )
# Read every file
all_rows = []
- found = False
for file_element in config_element.findall( 'file' ):
- filename = file_element.get( 'path' )
+ found = False
+ if tool_data_path:
+ # We're loading a tool in the tool shed, so we cannot use the Galaxy tool-data
+ # directory which is hard-coded into the tool_data_table_conf.xml entries.
+ filepath = file_element.get( 'path' )
+ filename = os.path.split( filepath )[ 1 ]
+ filename = os.path.join( tool_data_path, filename )
+ else:
+ filename = file_element.get( 'path' )
if os.path.exists( filename ):
found = True
all_rows.extend( self.parse_file_fields( open( filename ) ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/bea6f76a652e/
changeset: bea6f76a652e
user: dannon
date: 2012-12-04 20:14:28
summary: Tabular Display: For extremely wide datasets, revert to the old behavior until we have a proper 'matrix' datatype w/ renderer. Cleanup/imports.
affected #: 1 file
diff -r 3a1f833d3463ff3b5fc82eeff5adc72f1a07c6a9 -r bea6f76a652e8f47e97c0995986bda8e939f714e lib/galaxy/datatypes/tabular.py
--- a/lib/galaxy/datatypes/tabular.py
+++ b/lib/galaxy/datatypes/tabular.py
@@ -4,15 +4,16 @@
"""
import pkg_resources
pkg_resources.require( "bx-python" )
-
+import gzip
import logging
-import data
+import os
+from cgi import escape
from galaxy import util
-from cgi import escape
+from galaxy.datatypes import data
from galaxy.datatypes import metadata
+from galaxy.datatypes.checkers import is_gzip
from galaxy.datatypes.metadata import MetadataElement
-import galaxy_utils.sequence.vcf
-from sniff import *
+from galaxy.datatypes.sniff import get_headers
from galaxy.util.json import to_json_string
log = logging.getLogger(__name__)
@@ -264,10 +265,21 @@
return to_json_string({'ck_data': ck_data, 'ck_index': ck_index+1})
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, chunk=None):
- #TODO Prevent failure when displaying extremely long > 50kb lines.
if chunk:
return self.get_chunk(trans, dataset, chunk)
- if to_ext or not preview:
+ elif dataset.metadata.columns > 50:
+ #Fancy tabular display is only suitable for datasets without an incredibly large number of columns.
+ #We should add a new datatype 'matrix', with it's own draw method, suitable for this kind of data.
+ #For now, default to the old behavior, ugly as it is. Remove this after adding 'matrix'.
+ max_peek_size = 1000000 # 1 MB
+ if not preview or os.stat( dataset.file_name ).st_size < max_peek_size:
+ return open( dataset.file_name )
+ else:
+ trans.response.set_content_type( "text/html" )
+ return trans.stream_template_mako( "/dataset/large_file.mako",
+ truncated_data = open( dataset.file_name ).read(max_peek_size),
+ data = dataset)
+ elif to_ext or not preview:
return self._serve_raw(trans, dataset, to_ext)
else:
column_names = 'null'
@@ -589,7 +601,6 @@
- LANE, TILEm X, Y, INDEX, READ_NO, SEQ, QUAL, POSITION, *STRAND, FILT must be correct
- We will only check that up to the first 5 alignments are correctly formatted.
"""
- import gzip
try:
compress = is_gzip(filename)
if compress:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/2635d1bcf9aa/
changeset: 2635d1bcf9aa
user: jgoecks
date: 2012-12-04 20:05:30
summary: Remove import *'s from visualization controller.
affected #: 1 file
diff -r 1af9c4734df58bc3ff7cdca54733144cc8e6b9e2 -r 2635d1bcf9aab951981ddced7315cc0d4bf657b0 lib/galaxy/webapps/galaxy/controllers/visualization.py
--- a/lib/galaxy/webapps/galaxy/controllers/visualization.py
+++ b/lib/galaxy/webapps/galaxy/controllers/visualization.py
@@ -1,8 +1,9 @@
from __future__ import absolute_import
-from galaxy import model
-from galaxy.model.item_attrs import *
-from galaxy.web.base.controller import *
+from sqlalchemy import desc
+from galaxy import model, web
+from galaxy.model.item_attrs import UsesAnnotations, UsesItemRatings
+from galaxy.web.base.controller import BaseUIController, SharableMixin, UsesVisualizationMixin
from galaxy.web.framework.helpers import time_ago, grids, iff
from galaxy.util.sanitize_html import sanitize_html
from galaxy.visualization.genomes import decode_dbkey
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/1af9c4734df5/
changeset: 1af9c4734df5
user: jgoecks
date: 2012-12-04 19:42:09
summary: Fix output definition for GTF filter by attribute values list/
affected #: 1 file
diff -r ae3469a913214c4d5d5e6a1c5b449df978056972 -r 1af9c4734df58bc3ff7cdca54733144cc8e6b9e2 tools/filters/gff/gtf_filter_by_attribute_values_list.xml
--- a/tools/filters/gff/gtf_filter_by_attribute_values_list.xml
+++ b/tools/filters/gff/gtf_filter_by_attribute_values_list.xml
@@ -14,7 +14,7 @@
<param format="tabular,txt" name="ids" type="data" label="And attribute values"/></inputs><outputs>
- <data format="input" name="output" metadata_source="input"/>
+ <data format="gtf" name="output"/></outputs><tests><!-- Test filtering with a simple list of values. -->
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/ebebacd0ccc4/
changeset: ebebacd0ccc4
user: jmchilton
date: 2012-11-10 23:08:05
summary: Allow a datatype's merge method (used for parallelism) to take in an additional argument - the output_dataset object - if needed. No existing merge methods should need to be modified, the only modification to Galaxy's effective runtime behavior will occur if a datatype is defined which takes in a third keyword keyword argument called output_dataset. This change can enable the composite dataset parallelism pioneered by Jorrit Boekel, but one can imagine other potential uses (setting metadata derived from multiple files for instance).
affected #: 1 file
diff -r 5fdb5348d968f5bb38aedba23e26bdb9032a0c0b -r ebebacd0ccc45e5c0f26e0e7ceb55d70a0270dec lib/galaxy/jobs/splitters/multi.py
--- a/lib/galaxy/jobs/splitters/multi.py
+++ b/lib/galaxy/jobs/splitters/multi.py
@@ -1,4 +1,5 @@
import os, logging, shutil
+import inspect
from galaxy import model, util
@@ -125,7 +126,8 @@
output_file_name = str(outputs[output][1])
base_output_name = os.path.basename(output_file_name)
if output in merge_outputs:
- output_type = outputs[output][0].datatype
+ output_dataset = outputs[output][0]
+ output_type = output_dataset.datatype
output_files = [os.path.join(dir,base_output_name) for dir in task_dirs]
# Just include those files f in the output list for which the
# file f exists; some files may not exist if a task fails.
@@ -135,7 +137,13 @@
if len(output_files) < len(task_dirs):
log.debug('merging only %i out of expected %i files for %s'
% (len(output_files), len(task_dirs), output_file_name))
- output_type.merge(output_files, output_file_name)
+ # First two args to merge always output_files and path of dataset. More
+ # complicated merge methods may require more parameters. Set those up here.
+ extra_merge_arg_names = inspect.getargspec( output_type.merge ).args[2:]
+ extra_merge_args = {}
+ if "output_dataset" in extra_merge_arg_names:
+ extra_merge_args["output_dataset"] = output_dataset
+ output_type.merge(output_files, output_file_name, **extra_merge_args)
log.debug('merge finished: %s' % output_file_name)
else:
msg = 'nothing to merge for %s (expected %i files)' \
https://bitbucket.org/galaxy/galaxy-central/changeset/ae3469a91321/
changeset: ae3469a91321
user: dannon
date: 2012-12-04 15:45:52
summary: Merged in galaxyp/galaxy-central-optional-merge-args (pull request #83)
affected #: 1 file
diff -r 7bd130c33ebc218d97df83a63555abed222b301b -r ae3469a913214c4d5d5e6a1c5b449df978056972 lib/galaxy/jobs/splitters/multi.py
--- a/lib/galaxy/jobs/splitters/multi.py
+++ b/lib/galaxy/jobs/splitters/multi.py
@@ -1,4 +1,5 @@
import os, logging, shutil
+import inspect
from galaxy import model, util
@@ -125,7 +126,8 @@
output_file_name = str(outputs[output][1])
base_output_name = os.path.basename(output_file_name)
if output in merge_outputs:
- output_type = outputs[output][0].datatype
+ output_dataset = outputs[output][0]
+ output_type = output_dataset.datatype
output_files = [os.path.join(dir,base_output_name) for dir in task_dirs]
# Just include those files f in the output list for which the
# file f exists; some files may not exist if a task fails.
@@ -135,7 +137,13 @@
if len(output_files) < len(task_dirs):
log.debug('merging only %i out of expected %i files for %s'
% (len(output_files), len(task_dirs), output_file_name))
- output_type.merge(output_files, output_file_name)
+ # First two args to merge always output_files and path of dataset. More
+ # complicated merge methods may require more parameters. Set those up here.
+ extra_merge_arg_names = inspect.getargspec( output_type.merge ).args[2:]
+ extra_merge_args = {}
+ if "output_dataset" in extra_merge_arg_names:
+ extra_merge_args["output_dataset"] = output_dataset
+ output_type.merge(output_files, output_file_name, **extra_merge_args)
log.debug('merge finished: %s' % output_file_name)
else:
msg = 'nothing to merge for %s (expected %i files)' \
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.