galaxy-commits
Threads by month
- ----- 2025 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- 15302 discussions
commit/galaxy-central: greg: Fix for displaying repository dependencies when installing a tool shed repository.
by Bitbucket 05 Dec '12
by Bitbucket 05 Dec '12
05 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/1b8fd73e2a54/
changeset: 1b8fd73e2a54
user: greg
date: 2012-12-05 16:55:29
summary: Fix for displaying repository dependencies when installing a tool shed repository.
affected #: 1 file
diff -r 6304eb6a91103121ed6d6bf960b6bc9984966259 -r 1b8fd73e2a5464fd2caab261dbf10017f119185d lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
--- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
+++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
@@ -1242,8 +1242,9 @@
encoded_repo_info_dicts=encoded_repo_info_dicts,
includes_tools=includes_tools,
includes_tool_dependencies=includes_tool_dependencies,
+ install_tool_dependencies_check_box=install_tool_dependencies_check_box,
+ includes_repository_dependencies=includes_repository_dependencies,
install_repository_dependencies_check_box=install_repository_dependencies_check_box,
- install_tool_dependencies_check_box=install_tool_dependencies_check_box,
new_tool_panel_section=new_tool_panel_section,
containers_dict=containers_dict,
shed_tool_conf=shed_tool_conf,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Various fixes and enhancements to support new repository dependencies.
by Bitbucket 05 Dec '12
by Bitbucket 05 Dec '12
05 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/6304eb6a9110/
changeset: 6304eb6a9110
user: greg
date: 2012-12-05 16:47:48
summary: Various fixes and enhancements to support new repository dependencies.
affected #: 5 files
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 lib/galaxy/util/shed_util_common.py
--- a/lib/galaxy/util/shed_util_common.py
+++ b/lib/galaxy/util/shed_util_common.py
@@ -79,7 +79,11 @@
lock.acquire( True )
if tool_dependencies:
# Add the install_dir attribute to the tool_dependencies.
- tool_dependencies = add_installation_directories_to_tool_dependencies( trans, repository_name, repository_owner, changeset_revision, tool_dependencies )
+ tool_dependencies = add_installation_directories_to_tool_dependencies( trans,
+ repository_name,
+ repository_owner,
+ changeset_revision,
+ tool_dependencies )
try:
folder_id = 0
if readme_files_dict:
@@ -321,6 +325,7 @@
log.debug( error_message )
return False, error_message
def compare_changeset_revisions( ancestor_changeset_revision, ancestor_metadata_dict, current_changeset_revision, current_metadata_dict ):
+ """Compare the contents of two changeset revisions to determine if a new repository metadata revision should be created."""
# The metadata associated with ancestor_changeset_revision is ancestor_metadata_dict. This changeset_revision is an ancestor of
# current_changeset_revision which is associated with current_metadata_dict. A new repository_metadata record will be created only
# when this method returns the string 'not equal and not subset'.
@@ -328,23 +333,38 @@
ancestor_tools = ancestor_metadata_dict.get( 'tools', [] )
ancestor_guids = [ tool_dict[ 'guid' ] for tool_dict in ancestor_tools ]
ancestor_guids.sort()
+ ancestor_repository_dependencies_dict = ancestor_metadata_dict.get( 'repository_dependencies', {} )
+ ancestor_repository_dependencies = ancestor_repository_dependencies_dict.get( 'repository_dependencies', [] )
ancestor_tool_dependencies = ancestor_metadata_dict.get( 'tool_dependencies', [] )
ancestor_workflows = ancestor_metadata_dict.get( 'workflows', [] )
current_datatypes = current_metadata_dict.get( 'datatypes', [] )
current_tools = current_metadata_dict.get( 'tools', [] )
current_guids = [ tool_dict[ 'guid' ] for tool_dict in current_tools ]
current_guids.sort()
+ current_repository_dependencies_dict = current_metadata_dict.get( 'repository_dependencies', {} )
+ current_repository_dependencies = current_repository_dependencies_dict.get( 'repository_dependencies', [] )
current_tool_dependencies = current_metadata_dict.get( 'tool_dependencies', [] )
current_workflows = current_metadata_dict.get( 'workflows', [] )
# Handle case where no metadata exists for either changeset.
- if not ancestor_guids and not current_guids and not ancestor_workflows and not current_workflows and not ancestor_datatypes and not current_datatypes:
+ no_datatypes = not ancestor_datatypes and not current_datatypes
+ no_repository_dependencies = not ancestor_repository_dependencies and not current_repository_dependencies
+ # Note: we currently don't need to check tool_dependencies since we're checking for guids - tool_dependencies always require tools (currently).
+ no_tool_dependencies = not ancestor_tool_dependencies and not current_tool_dependencies
+ no_tools = not ancestor_guids and not current_guids
+ no_workflows = not ancestor_workflows and not current_workflows
+ if no_datatypes and no_repository_dependencies and no_tool_dependencies and no_tools and no_workflows:
return 'no metadata'
+ repository_dependency_comparison = compare_repository_dependencies( ancestor_repository_dependencies, current_repository_dependencies )
workflow_comparison = compare_workflows( ancestor_workflows, current_workflows )
datatype_comparison = compare_datatypes( ancestor_datatypes, current_datatypes )
# Handle case where all metadata is the same.
- if ancestor_guids == current_guids and workflow_comparison == 'equal' and datatype_comparison == 'equal':
+ if ancestor_guids == current_guids and repository_dependency_comparison == 'equal' and workflow_comparison == 'equal' and datatype_comparison == 'equal':
return 'equal'
- if workflow_comparison in [ 'equal', 'subset' ] and datatype_comparison in [ 'equal', 'subset' ]:
+ # Handle case where ancestor metadata is a subset of current metadata.
+ repository_dependency_is_subset = repository_dependency_comparison in [ 'equal', 'subset' ]
+ workflow_dependency_is_subset = workflow_comparison in [ 'equal', 'subset' ]
+ datatype_is_subset = datatype_comparison in [ 'equal', 'subset' ]
+ if repository_dependency_is_subset and workflow_dependency_is_subset and datatype_is_subset:
is_subset = True
for guid in ancestor_guids:
if guid not in current_guids:
@@ -354,10 +374,8 @@
return 'subset'
return 'not equal and not subset'
def compare_datatypes( ancestor_datatypes, current_datatypes ):
- # Determine if ancestor_datatypes is the same as current_datatypes
- # or if ancestor_datatypes is a subset of current_datatypes. Each
- # datatype dict looks something like:
- # {"dtype": "galaxy.datatypes.images:Image", "extension": "pdf", "mimetype": "application/pdf"}
+ """Determine if ancestor_datatypes is the same as or a subset of current_datatypes."""
+ # Each datatype dict looks something like: {"dtype": "galaxy.datatypes.images:Image", "extension": "pdf", "mimetype": "application/pdf"}
if len( ancestor_datatypes ) <= len( current_datatypes ):
for ancestor_datatype in ancestor_datatypes:
# Currently the only way to differentiate datatypes is by name.
@@ -378,9 +396,31 @@
else:
return 'subset'
return 'not equal and not subset'
+def compare_repository_dependencies( ancestor_repository_dependencies, current_repository_dependencies ):
+ """Determine if ancestor_repository_dependencies is the same as or a subset of current_repository_dependencies."""
+ # The list of repository_dependencies looks something like: [["http://localhost:9009", "emboss_datatypes", "test", "ab03a2a5f407"]].
+ # Create a string from each tuple in the list for easier comparison.
+ if len( ancestor_repository_dependencies ) <= len( current_repository_dependencies ):
+ for ancestor_tup in ancestor_repository_dependencies:
+ ancestor_tool_shed, ancestor_repository_name, ancestor_repository_owner, ancestor_changeset_revision = ancestor_tup
+ found_in_current = False
+ for current_tup in current_repository_dependencies:
+ current_tool_shed, current_repository_name, current_repository_owner, current_changeset_revision = current_tup
+ if current_tool_shed == ancestor_tool_shed and \
+ current_repository_name == ancestor_repository_name and \
+ current_repository_owner == ancestor_repository_owner and \
+ current_changeset_revision == ancestor_changeset_revision:
+ found_in_current = True
+ break
+ if not found_in_current:
+ return 'not equal and not subset'
+ if len( ancestor_repository_dependencies ) == len( current_repository_dependencies ):
+ return 'equal'
+ else:
+ return 'subset'
+ return 'not equal and not subset'
def compare_workflows( ancestor_workflows, current_workflows ):
- # Determine if ancestor_workflows is the same as current_workflows
- # or if ancestor_workflows is a subset of current_workflows.
+ """Determine if ancestor_workflows is the same as current_workflows or if ancestor_workflows is a subset of current_workflows."""
if len( ancestor_workflows ) <= len( current_workflows ):
for ancestor_workflow_tup in ancestor_workflows:
# ancestor_workflows is a list of tuples where each contained tuple is
@@ -417,6 +457,7 @@
message = ''
return message
def copy_disk_sample_files_to_dir( trans, repo_files_dir, dest_path ):
+ """Copy all files currently on disk that end with the .sample extension to the directory to which dest_path refers."""
sample_files = []
for root, dirs, files in os.walk( repo_files_dir ):
if root.find( '.hg' ) < 0:
@@ -664,10 +705,12 @@
# If the list of sample files includes a tool_data_table_conf.xml.sample file, laad it's table elements into memory.
relative_path, filename = os.path.split( sample_file )
if filename == 'tool_data_table_conf.xml.sample':
- new_table_elems = app.tool_data_tables.add_new_entries_from_config_file( config_filename=sample_file,
- tool_data_path=original_tool_data_path,
- shed_tool_data_table_config=app.config.shed_tool_data_table_config,
- persist=persist )
+ new_table_elems, error_message = app.tool_data_tables.add_new_entries_from_config_file( config_filename=sample_file,
+ tool_data_path=original_tool_data_path,
+ shed_tool_data_table_config=app.config.shed_tool_data_table_config,
+ persist=persist )
+ if error_message:
+ invalid_file_tups.append( ( filename, error_message ) )
for root, dirs, files in os.walk( files_dir ):
if root.find( '.hg' ) < 0 and root.find( 'hgrc' ) < 0:
if '.hg' in dirs:
@@ -675,13 +718,8 @@
for name in files:
# See if we have a repository dependencies defined.
if name == 'repository_dependencies.xml':
- relative_path_to_repository_dependencies = get_relative_path_to_repository_file( root,
- name,
- relative_install_dir,
- work_dir,
- shed_config_dict,
- resetting_all_metadata_on_repository )
- metadata_dict = generate_repository_dependency_metadata( relative_path_to_repository_dependencies, metadata_dict )
+ path_to_repository_dependencies_config = os.path.join( root, name )
+ metadata_dict = generate_repository_dependency_metadata( path_to_repository_dependencies_config, metadata_dict )
# See if we have one or more READ_ME files.
elif name.lower() in readme_file_names:
relative_path_to_readme = get_relative_path_to_repository_file( root,
@@ -793,7 +831,9 @@
if repository_dependencies_tup not in repository_dependencies_tups:
repository_dependencies_tups.append( repository_dependencies_tup )
if repository_dependencies_tups:
- metadata_dict[ 'repository_dependencies' ] = repository_dependencies_tups
+ repository_dependencies_dict = dict( description=root.get( 'description' ),
+ repository_dependencies=repository_dependencies_tups )
+ metadata_dict[ 'repository_dependencies' ] = repository_dependencies_dict
return metadata_dict
def generate_tool_dependency_metadata( app, repository, tool_dependencies_config, metadata_dict, original_repository_metadata=None ):
"""
@@ -1087,25 +1127,47 @@
trans.model.Repository.table.c.user_id == user.id ) ) \
.first()
def get_repository_dependencies_for_changeset_revision( trans, repo, repository, repository_metadata, toolshed_base_url, repository_dependencies=None,
- all_repository_dependencies=None ):
+ all_repository_dependencies=None, handled=None ):
"""
Return a dictionary of all repositories upon which the contents of the received repository_metadata record depend. The dictionary keys
are name-spaced values consisting of toolshed_base_url/repository_name/repository_owner/changeset_revision and the values are lists of
repository_dependency tuples consisting of ( toolshed_base_url, repository_name, repository_owner, changeset_revision ). This method
ensures that all required repositories to the nth degree are returned.
"""
+ if handled is None:
+ handled = []
if all_repository_dependencies is None:
all_repository_dependencies = {}
if repository_dependencies is None:
repository_dependencies = []
metadata = repository_metadata.metadata
if metadata and 'repository_dependencies' in metadata:
+ repository_dependencies_dict = metadata[ 'repository_dependencies' ]
+ # The repository_dependencies entry in the metadata is a dictionary that may have a value for a 'description' key. We want to
+ # store the value of this key only once, the first time through this recursive method.
repository_dependencies_root_key = generate_repository_dependencies_key_for_repository( toolshed_base_url=toolshed_base_url,
repository_name=repository.name,
repository_owner=repository.user.username,
changeset_revision=repository_metadata.changeset_revision )
- for repository_dependency in metadata[ 'repository_dependencies' ]:
- if repository_dependency not in repository_dependencies:
+ if not all_repository_dependencies:
+ # Initialize the all_repository_dependencies dictionary.
+ all_repository_dependencies[ 'root_key' ] = repository_dependencies_root_key
+ all_repository_dependencies[ repository_dependencies_root_key ] = []
+ if 'description' not in all_repository_dependencies:
+ description = repository_dependencies_dict.get( 'description', None )
+ all_repository_dependencies[ 'description' ] = description
+
+ # The next key of interest in repository_dependencies_dict is 'repository_dependencies', which is a list of tuples.
+ repository_dependencies_tups = repository_dependencies_dict[ 'repository_dependencies' ]
+ for repository_dependency in repository_dependencies_tups:
+ # Skip repository dependencies that point to the root repository.
+ check_key = generate_repository_dependencies_key_for_repository( toolshed_base_url=repository_dependency[ 0 ],
+ repository_name=repository_dependency[ 1 ],
+ repository_owner=repository_dependency[ 2 ],
+ changeset_revision=repository_dependency[ 3 ] )
+ if check_key == repository_dependencies_root_key:
+ handled.append( repository_dependency )
+ elif repository_dependency not in handled and repository_dependency not in repository_dependencies:
repository_dependencies.append( repository_dependency )
else:
repository_dependencies_root_key = None
@@ -1121,6 +1183,7 @@
if repository_dependency not in all_repository_dependencies_val:
all_repository_dependencies_val.append( repository_dependency )
all_repository_dependencies[ repository_dependencies_root_key ] = all_repository_dependencies_val
+ handled.append( repository_dependency )
else:
# Insert this repository_dependency.
all_repository_dependencies[ repository_dependencies_root_key ] = [ repository_dependency ]
@@ -1141,6 +1204,23 @@
required_repository_metadata = get_repository_metadata_by_repository_id_changset_revision( trans,
trans.security.encode_id( required_repository.id ),
required_changeset_revision )
+ if required_repository_metadata:
+ # The changeset_revision defined in a repository_dependencies.xml file is outdated, so we need to fix appropriate
+ # entries in our all_repository_dependencies dictionary.
+ updated_repository_dependency = [ tool_shed, name, owner, required_changeset_revision ]
+ for k, v in all_repository_dependencies.items():
+ if k in [ 'root_key', 'description' ]:
+ continue
+ for i, current_repository_dependency in enumerate( v ):
+ current_tool_shed, current_name, current_owner, current_changeset_revision = current_repository_dependency
+ if tool_shed == current_tool_shed and name == current_name and owner == current_owner and changeset_revision == current_changeset_revision:
+ if updated_repository_dependency in v:
+ # We've already stored the updated repository_dependency, so remove the outdated one.
+ v = v.remove( repository_dependency )
+ else:
+ # Store the updated repository_dependency.
+ v[ i ] = updated_repository_dependency
+ all_repository_dependencies[ k ] = v
if required_repository_metadata:
# The required_repository_metadata changeset_revision is installable.
required_metadata = required_repository_metadata.metadata
@@ -1151,7 +1231,8 @@
repository_metadata=required_repository_metadata,
toolshed_base_url=tool_shed,
repository_dependencies=repository_dependencies,
- all_repository_dependencies=all_repository_dependencies )
+ all_repository_dependencies=all_repository_dependencies,
+ handled=handled )
else:
# The repository is in a different tool shed, so build an url and send a request.
raise Exception( "Repository dependencies that refer to repositories in other tool sheds is not yet supported." )
@@ -1321,16 +1402,18 @@
error = False
message = ''
try:
- new_table_elems = app.tool_data_tables.add_new_entries_from_config_file( config_filename=filename,
- tool_data_path=app.config.tool_data_path,
- shed_tool_data_table_config=app.config.shed_tool_data_table_config,
- persist=persist )
+ new_table_elems, message = app.tool_data_tables.add_new_entries_from_config_file( config_filename=filename,
+ tool_data_path=app.config.tool_data_path,
+ shed_tool_data_table_config=app.config.shed_tool_data_table_config,
+ persist=persist )
+ if message:
+ error = True
except Exception, e:
message = str( e )
error = True
return error, message
def is_downloadable( metadata_dict ):
- return 'datatypes' in metadata_dict or 'tools' in metadata_dict or 'workflows' in metadata_dict
+ return 'datatypes' in metadata_dict or 'repository_dependencies' in metadata_dict or 'tools' in metadata_dict or 'workflows' in metadata_dict
def load_tool_from_config( app, full_path ):
try:
tool = app.toolbox.load_tool( full_path )
@@ -1502,14 +1585,15 @@
cloned_ok, error_message = clone_repository( repository_clone_url, work_dir, str( ctx.rev() ) )
if cloned_ok:
log.debug( "Generating metadata for changset revision: %s", str( ctx.rev() ) )
- current_metadata_dict, invalid_file_tups = generate_metadata_for_changeset_revision( app=trans.app,
- repository=repository,
- repository_clone_url=repository_clone_url,
- relative_install_dir=repo_dir,
- repository_files_dir=work_dir,
- resetting_all_metadata_on_repository=True,
- updating_installed_repository=False,
- persist=False )
+ current_metadata_dict, invalid_tups = generate_metadata_for_changeset_revision( app=trans.app,
+ repository=repository,
+ repository_clone_url=repository_clone_url,
+ relative_install_dir=repo_dir,
+ repository_files_dir=work_dir,
+ resetting_all_metadata_on_repository=True,
+ updating_installed_repository=False,
+ persist=False )
+ invalid_file_tups.extend( invalid_tups )
if current_metadata_dict:
if not metadata_changeset_revision and not metadata_dict:
# We're at the first change set in the change log.
@@ -1707,11 +1791,7 @@
if new_dependency_name and new_dependency_type and new_dependency_version:
# Update all attributes of the tool_dependency record in the database.
log.debug( "Updating tool dependency '%s' with type '%s' and version '%s' to have new type '%s' and version '%s'." % \
- ( str( tool_dependency.name ),
- str( tool_dependency.type ),
- str( tool_dependency.version ),
- str( new_dependency_type ),
- str( new_dependency_version ) ) )
+ ( str( tool_dependency.name ), str( tool_dependency.type ), str( tool_dependency.version ), str( new_dependency_type ), str( new_dependency_version ) ) )
tool_dependency.type = new_dependency_type
tool_dependency.version = new_dependency_version
tool_dependency.status = app.model.ToolDependency.installation_status.UNINSTALLED
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 lib/galaxy/webapps/community/controllers/common.py
--- a/lib/galaxy/webapps/community/controllers/common.py
+++ b/lib/galaxy/webapps/community/controllers/common.py
@@ -191,17 +191,13 @@
tool.id,
tool.version )
def get_absolute_path_to_file_in_repository( repo_files_dir, file_name ):
+ stripped_file_name = strip_path( file_name )
file_path = None
- found = False
for root, dirs, files in os.walk( repo_files_dir ):
if root.find( '.hg' ) < 0:
for name in files:
- if name == file_name:
- file_path = os.path.abspath( os.path.join( root, name ) )
- found = True
- break
- if found:
- break
+ if name == stripped_file_name:
+ return os.path.abspath( os.path.join( root, name ) )
return file_path
def get_category( trans, id ):
"""Get a category from the database"""
@@ -501,7 +497,6 @@
as_html=True,
displaying_invalid_tool=True )
message = concat_messages( message, message2 )
- status = 'error'
else:
tool, message, sample_files = handle_sample_files_and_load_tool_from_tmp_config( trans, repo, changeset_revision, tool_config_filename, work_dir )
remove_dir( work_dir )
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -1019,6 +1019,8 @@
message = util.restore_text( params.get( 'message', '' ) )
status = params.get( 'status', 'done' )
repository, tool, message = load_tool_from_changeset_revision( trans, repository_id, changeset_revision, tool_config )
+ if message:
+ status = 'error'
tool_state = self.__new_state( trans )
is_malicious = changeset_is_malicious( trans, repository_id, repository.tip( trans.app ) )
metadata = self.get_metadata( trans, repository_id, changeset_revision )
@@ -1760,7 +1762,7 @@
repository_metadata_id = None
metadata = None
is_malicious = False
- repository_dependencies = []
+ repository_dependencies = None
if changeset_revision != INITIAL_CHANGELOG_HASH:
repository_metadata = get_repository_metadata_by_changeset_revision( trans, id, changeset_revision )
if repository_metadata:
@@ -1786,7 +1788,8 @@
repository_metadata,
str( url_for( '/', qualified=True ) ).rstrip( '/' ),
repository_dependencies=None,
- all_repository_dependencies=None )
+ all_repository_dependencies=None,
+ handled=None )
if is_malicious:
if trans.app.security_agent.can_push( trans.app, trans.user, repository ):
message += malicious_error_can_push
@@ -1898,7 +1901,8 @@
repository_metadata,
str( url_for( '/', qualified=True ) ).rstrip( '/' ),
repository_dependencies=None,
- all_repository_dependencies=None )
+ all_repository_dependencies=None,
+ handled=None )
else:
repository_metadata_id = None
metadata = None
@@ -2383,7 +2387,7 @@
email_alerts = from_json_string( repository.email_alerts )
else:
email_alerts = []
- repository_dependencies = []
+ repository_dependencies = None
user = trans.user
if user and params.get( 'receive_email_alerts_button', False ):
flush_needed = False
@@ -2419,7 +2423,8 @@
repository_metadata,
str( url_for( '/', qualified=True ) ).rstrip( '/' ),
repository_dependencies=None,
- all_repository_dependencies=None )
+ all_repository_dependencies=None,
+ handled=None )
else:
repository_metadata_id = None
metadata = None
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 lib/galaxy/webapps/community/util/container_util.py
--- a/lib/galaxy/webapps/community/util/container_util.py
+++ b/lib/galaxy/webapps/community/util/container_util.py
@@ -11,6 +11,7 @@
self.id = id
self.key = key
self.label = label
+ self.description = None
self.datatypes = []
self.folders = []
self.invalid_tools = []
@@ -24,7 +25,8 @@
if folder == contained_folder:
return index, contained_folder
return 0, None
-
+ def remove_repository_dependency( self, repository_dependency ):
+ self.repository_dependencies.remove( repository_dependency )
class Datatype( object ):
"""Datatype object"""
def __init__( self, id=None, extension=None, type=None, mimetype=None, subclass=None ):
@@ -57,6 +59,9 @@
self.repository_name = repository_name
self.repository_owner = repository_owner
self.changeset_revision = changeset_revision
+ @property
+ def listify( self ):
+ return [ self.toolshed, self.repository_name, self.repository_owner, self.changeset_revision ]
class Tool( object ):
"""Tool object"""
@@ -183,11 +188,15 @@
repository_dependencies_root_folder = Folder( id=folder_id, key='root', label='root' )
folder_id += 1
# Create the Repository dependencies folder and add it to the root folder.
- key = generate_repository_dependencies_key_for_repository( toolshed_base_url, repository_name, repository_owner, changeset_revision )
- repository_dependencies_folder = Folder( id=folder_id, key=key, label=label )
+ repository_dependencies_folder_key = repository_dependencies[ 'root_key' ]
+ repository_dependencies_folder = Folder( id=folder_id, key=repository_dependencies_folder_key, label=label )
+ # The received repository_dependencies is a dictionary with a single 'description' key, and one or more repository_dependency keys.
+ # We want the description value associated with the repository_dependencies_folder.
+ repository_dependencies_folder.description = repository_dependencies.get( 'description', None )
repository_dependencies_root_folder.folders.append( repository_dependencies_folder )
- # Process the repository dependencies.
for key, val in repository_dependencies.items():
+ if key in [ 'root_key', 'description' ]:
+ continue
# Only create a new folder object if necessary.
folder = get_folder( repository_dependencies_root_folder, key )
if not folder:
@@ -197,19 +206,20 @@
folder = Folder( id=folder_id, key=key, label=label )
for repository_dependency_tup in val:
toolshed, name, owner, changeset_revision = repository_dependency_tup
- if not is_folder( repository_dependencies.keys(), toolshed, name, owner, changeset_revision ):
- # Create a new repository_dependency.
- repository_dependency_id += 1
- repository_dependency = RepositoryDependency( id=repository_dependency_id,
- toolshed=toolshed,
- repository_name=name,
- repository_owner=owner,
- changeset_revision=changeset_revision )
- # Insert the repository_dependency into the folder.
- folder.repository_dependencies.append( repository_dependency )
+ # Create a new repository_dependency.
+ repository_dependency_id += 1
+ repository_dependency = RepositoryDependency( id=repository_dependency_id,
+ toolshed=toolshed,
+ repository_name=name,
+ repository_owner=owner,
+ changeset_revision=changeset_revision )
+ # Insert the repository_dependency into the folder.
+ folder.repository_dependencies.append( repository_dependency )
if not get_folder( repository_dependencies_folder, key ):
# Insert the folder into the list.
repository_dependencies_folder.folders.append( folder )
+ # Remove repository_dependencies that are also folders.
+ remove_unwanted_repository_dependencies( repository_dependencies_folder )
else:
repository_dependencies_root_folder = None
return folder_id, repository_dependencies_root_folder
@@ -378,4 +388,10 @@
def key_is_current_repositorys_key( repository_name, repository_owner, changeset_revision, key ):
toolshed_base_url, key_name, key_owner, key_changeset_revision = get_components_from_key( key )
return repository_name == key_name and repository_owner == key_owner and changeset_revision == key_changeset_revision
+def remove_unwanted_repository_dependencies( folder ):
+ for repository_dependency in folder.repository_dependencies:
+ toolshed, name, owner, changeset_revision = repository_dependency.listify
+ key = generate_repository_dependencies_key_for_repository( toolshed, name, owner, changeset_revision )
+ if get_folder( folder, key ):
+ folder.remove_repository_dependency( repository_dependency )
\ No newline at end of file
diff -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e -r 6304eb6a91103121ed6d6bf960b6bc9984966259 templates/webapps/community/repository/common.mako
--- a/templates/webapps/community/repository/common.mako
+++ b/templates/webapps/community/repository/common.mako
@@ -204,7 +204,10 @@
if folder.datatypes:
col_span_str = 'colspan="4"'
elif folder.label == 'Repository dependencies':
- folder_label = "%s<i> - this repository requires installation of these additional repositories</i>" % folder_label
+ if folder.description:
+ folder_label = "%s<i> - %s</i>" % ( folder_label, folder.description )
+ else:
+ folder_label = "%s<i> - this repository requires installation of these additional repositories</i>" % folder_label
elif folder.invalid_tools:
folder_label = "%s<i> - click the tool config file name to see why the tool is invalid</i>" % folder_label
elif folder.tool_dependencies:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
05 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/5c5249fac5c8/
changeset: 5c5249fac5c8
user: greg
date: 2012-12-05 16:46:50
summary: Handle exceptions when parsing proprietary tool_data_table_conf.xml.sample files in the tool shed, and use the webapp's configured tool_data_path when configuring and loading the ToolDataTableManager.
affected #: 1 file
diff -r e40fdd5f6e8936f936f7118b0b9521f7379972ee -r 5c5249fac5c8893a8da13043b8cc1ba23d3b303e lib/galaxy/tools/data/__init__.py
--- a/lib/galaxy/tools/data/__init__.py
+++ b/lib/galaxy/tools/data/__init__.py
@@ -77,8 +77,15 @@
</table>
"""
- tree = util.parse_xml( config_filename )
- root = tree.getroot()
+ error_message = ''
+ table_elems = []
+ try:
+ tree = util.parse_xml( config_filename )
+ root = tree.getroot()
+ except Exception, e:
+ error_message = 'Error attempting to parse file %s: %s' % ( str( os.path.split( config_filename )[ 1 ] ), str( e ) )
+ log.debug( error_message )
+ return table_elems, error_message
# Make a copy of the current list of data_table_elem_names so we can persist later if changes to the config file are necessary.
original_data_table_elem_names = [ name for name in self.data_table_elem_names ]
if root.tag == 'tables':
@@ -86,7 +93,6 @@
tool_data_path=tool_data_path,
from_shed_config=True )
else:
- table_elems = []
type = root.get( 'type', 'tabular' )
assert type in tool_data_table_types, "Unknown data table type '%s'" % type
table_elems.append( root )
@@ -101,7 +107,7 @@
if persist and self.data_table_elem_names != original_data_table_elem_names:
# Persist Galaxy's version of the changed tool_data_table_conf.xml file.
self.to_xml_file( shed_tool_data_table_config )
- return table_elems
+ return table_elems, error_message
def to_xml_file( self, shed_tool_data_table_config ):
"""Write the current in-memory version of the shed_tool_data_table_conf.xml file to disk."""
full_path = os.path.abspath( shed_tool_data_table_config )
@@ -146,8 +152,8 @@
def __init__( self, config_element, tool_data_path ):
super( TabularToolDataTable, self ).__init__( config_element, tool_data_path )
- self.configure_and_load( config_element )
- def configure_and_load( self, config_element ):
+ self.configure_and_load( config_element, tool_data_path )
+ def configure_and_load( self, config_element, tool_data_path ):
"""
Configure and load table from an XML element.
"""
@@ -157,9 +163,16 @@
self.parse_column_spec( config_element )
# Read every file
all_rows = []
- found = False
for file_element in config_element.findall( 'file' ):
- filename = file_element.get( 'path' )
+ found = False
+ if tool_data_path:
+ # We're loading a tool in the tool shed, so we cannot use the Galaxy tool-data
+ # directory which is hard-coded into the tool_data_table_conf.xml entries.
+ filepath = file_element.get( 'path' )
+ filename = os.path.split( filepath )[ 1 ]
+ filename = os.path.join( tool_data_path, filename )
+ else:
+ filename = file_element.get( 'path' )
if os.path.exists( filename ):
found = True
all_rows.extend( self.parse_file_fields( open( filename ) ) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
04 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/e40fdd5f6e89/
changeset: e40fdd5f6e89
user: carlfeberhard
date: 2012-12-04 20:43:49
summary: base/_panels.mako: IE console protection; history panel: move hda updater into history-model (from HDACollection), handle new hdas more gracefully, handle hidden hdas that move into the ready states.
affected #: 8 files
diff -r b175274d13e0be88c4504d51aa0f49d0802cc42b -r e40fdd5f6e8936f936f7118b0b9521f7379972ee static/scripts/mvc/dataset/hda-base.js
--- a/static/scripts/mvc/dataset/hda-base.js
+++ b/static/scripts/mvc/dataset/hda-base.js
@@ -72,7 +72,13 @@
if( urlKey === 'meta_download' ){
urls[ urlKey ] = hdaView._renderMetaDownloadUrls( urlTemplateOrObj, modelJson );
} else {
- urls[ urlKey ] = _.template( urlTemplateOrObj, modelJson );
+ try {
+ urls[ urlKey ] = _.template( urlTemplateOrObj, modelJson );
+ } catch( Error ){
+ throw( hdaView + '._renderUrls error: ' + Error +
+ '\n rendering:' + urlTemplateOrObj +
+ '\n with ' + JSON.stringify( modelJson ) );
+ }
}
}
});
@@ -109,7 +115,6 @@
itemWrapper = $( '<div/>' ).attr( 'id', 'historyItem-' + id ),
initialRender = ( this.$el.children().size() === 0 );
- //console.debug( this + '.render, initial?:', initialRender );
this.$el.attr( 'id', 'historyItemContainer-' + id );
itemWrapper
@@ -362,7 +367,7 @@
break;
default:
//??: no body?
- body.append( $( '<div>Error: unknown dataset state "' + state + '".</div>' ) );
+ body.append( $( '<div>Error: unknown dataset state "' + this.model.get( 'state' ) + '".</div>' ) );
}
body.append( '<div style="clear: both"></div>' );
@@ -520,6 +525,11 @@
}
},
+
+ remove : function(){
+
+ },
+
// ......................................................................... MISC
toString : function(){
var modelString = ( this.model )?( this.model + '' ):( '(no model)' );
diff -r b175274d13e0be88c4504d51aa0f49d0802cc42b -r e40fdd5f6e8936f936f7118b0b9521f7379972ee static/scripts/mvc/dataset/hda-edit.js
--- a/static/scripts/mvc/dataset/hda-edit.js
+++ b/static/scripts/mvc/dataset/hda-edit.js
@@ -193,7 +193,6 @@
if( !( this.model.hasData() )
|| !( visualizations && visualizations.length )
|| !( visualization_url ) ){
- //console.warn( 'NOT rendering visualization icon' )
this.visualizationsButton = null;
return null;
}
diff -r b175274d13e0be88c4504d51aa0f49d0802cc42b -r e40fdd5f6e8936f936f7118b0b9521f7379972ee static/scripts/mvc/dataset/hda-model.js
--- a/static/scripts/mvc/dataset/hda-model.js
+++ b/static/scripts/mvc/dataset/hda-model.js
@@ -32,13 +32,14 @@
// ---whereas these are Dataset related/inherited
id : null,
- name : '',
+ name : '(unnamed dataset)',
// one of HistoryDatasetAssociation.STATES
- state : '',
+ state : 'ok',
// sniffed datatype (sam, tabular, bed, etc.)
data_type : null,
// size in bytes
file_size : 0,
+ file_ext : '',
// array of associated file types (eg. [ 'bam_index', ... ])
meta_files : [],
@@ -48,10 +49,10 @@
deleted : false,
purged : false,
- // aka. !hidden
+ // aka. !hidden (start hidden)
visible : false,
// based on trans.user (is_admin or security_agent.can_access_dataset( <user_roles>, hda.dataset ))
- accessible : false
+ accessible : true
},
/** fetch location of this history in the api */
@@ -59,6 +60,7 @@
url : function(){
//TODO: get this via url router
return 'api/histories/' + this.get( 'history_id' ) + '/contents/' + this.get( 'id' );
+ //TODO: this breaks on save()
},
/** Set up the model, determine if accessible, bind listeners
@@ -222,6 +224,34 @@
return this.map( function( item ){ return item.id; });
},
+ /** If the given hid is in the collection, return it's index. If not, return the insertion point it would need.
+ * NOTE: assumes hids are unique and valid
+ * @param {Int} hid the hid to find or create. If hid is 0, null, undefined: return the last hid + 1
+ * @returns the collection index of the existing hda or an insertion point if it doesn't exist
+ */
+ hidToCollectionIndex : function( hid ){
+ // if the hid is 0, null, undefined: assume a request for a new hid (return the last index)
+ if( !hid ){
+ return this.models.length;
+ }
+
+ var endingIndex = this.models.length - 1;
+ //TODO: prob. more efficient to cycle backwards through these (assuming ordered by hid)
+ for( var i=endingIndex; i>=0; i-- ){
+ var hdaHid = this.at( i ).get( 'hid' );
+ //this.log( i, 'hdaHid:', hdaHid );
+ if( hdaHid == hid ){
+ //this.log( '\t match:', hdaHid, hid, ' returning:', i );
+ return i;
+ }
+ if( hdaHid < hid ){
+ //this.log( '\t past it, returning:', ( i + 1 ) );
+ return i + 1;
+ }
+ }
+ return null;
+ },
+
/** Get every 'shown' hda in this collection based on show_deleted/hidden
* @param {Boolean} show_deleted are we showing deleted hdas?
* @param {Boolean} show_hidden are we showing hidden hdas?
@@ -263,24 +293,31 @@
},
/** Update (fetch) the data of the hdas with the given ids.
+ * @param {String} historyId the id of the history containing this collection
* @param {String[]} ids an array of hda ids to update
+ * @returns {HistoryDatasetAssociation[]} hda models that were updated
* @see HistoryDatasetAssociation#fetch
*/
update : function( ids ){
this.log( this + 'update:', ids );
- if( !( ids && ids.length ) ){ return; }
+ if( !( ids && ids.length ) ){ return []; }
- var collection = this;
+ var collection = this,
+ updatedHdas = null;
_.each( ids, function( id, index ){
- var historyItem = collection.get( id );
- historyItem.fetch();
+ var hda = collection.get( id );
+ if( hda ){
+ hda.fetch();
+ updatedHdas.push( hda );
+ }
});
+ return updatedHdas;
},
/** String representation. */
toString : function(){
- return ( 'HDACollection(' + this.ids().join(',') + ')' );
+ return ( 'HDACollection()' );
}
});
diff -r b175274d13e0be88c4504d51aa0f49d0802cc42b -r e40fdd5f6e8936f936f7118b0b9521f7379972ee static/scripts/mvc/history/history-model.js
--- a/static/scripts/mvc/history/history-model.js
+++ b/static/scripts/mvc/history/history-model.js
@@ -88,8 +88,6 @@
history.fetch()
).then( function( userResponse, historyResponse ){
- //console.warn( 'fetched user: ', userResponse[0] );
- //console.warn( 'fetched history: ', historyResponse[0] );
history.attributes.user = userResponse[0]; //? meh.
history.trigger( 'loaded:user', userResponse[0] );
@@ -102,7 +100,6 @@
// reset the collection to the hdas returned
})).success( function( hdas ){
- //console.warn( 'fetched hdas', hdas );
history.hdas.reset( hdas );
history.checkForUpdates();
@@ -167,7 +164,7 @@
// send the changed ids (if any) to dataset collection to have them fetch their own model changes
if( changedIds.length ){
- history.hdas.update( changedIds );
+ history.updateHdas( changedIds );
}
// set up to keep pulling if this history in run/queue state
@@ -176,7 +173,7 @@
|| ( history.get( 'state' ) === HistoryDatasetAssociation.STATES.QUEUED ) ){
setTimeout( function(){
history.stateUpdater();
- }, 4000 );
+ }, History.UPDATE_DELAY );
// otherwise, we're now in a 'ready' state (no hdas running)
} else {
@@ -187,14 +184,70 @@
// if not interruption by iframe reload
//TODO: remove when iframes are removed
if( !( ( xhr.readyState === 0 ) && ( xhr.status === 0 ) ) ){
- //if( console && console.warn ){
- // console.warn( 'Error getting history updates from the server:', xhr, status, error );
- //}
alert( _l( 'Error getting history updates from the server.' ) + '\n' + error );
}
});
},
+ /** Update the models in the hdas collection that match the ids given by getting their data
+ * via the api/AJAX. If a model exists in the collection, set will be used with the new data.
+ * If it's not in the collection, addHdas will be used to create it.
+ * @param {String[]} hdaIds an array of the encoded ids of the hdas to get from the server.
+ */
+ updateHdas : function( hdaIds ){
+ //TODO:?? move to collection? still need proper url
+ var history = this;
+ jQuery.ajax({
+ url : this.url() + '/contents?' + jQuery.param({ ids : hdaIds.join(',') }),
+
+ error : function( xhr, status, error ){
+ var msg = 'ERROR updating hdas from api history contents:';
+ history.log( msg, hdaIds, xhr, status, error );
+ alert( msg + hdaIds.join(',') );
+ },
+
+ /** when the proper models for the requested ids are returned,
+ * either update existing or create new entries in the hdas collection
+ * @inner
+ */
+ success : function( hdaDataList, status, xhr ){
+ history.log( history + '.updateHdas, success:', hdaDataList, status, xhr );
+ //TODO: compile new models to be added in one go
+ var hdasToAdd = [];
+
+ _.each( hdaDataList, function( hdaData, index ){
+ var existingModel = history.hdas.get( hdaData.id );
+ // if this model exists already, update it
+ if( existingModel ){
+ history.log( 'found existing model in list for id ' + hdaData.id + ', updating...:' );
+ existingModel.set( hdaData );
+
+ // if this model is new and isn't in the hda collection, cache it to be created
+ } else {
+ history.log( 'NO existing model for id ' + hdaData.id + ', creating...:' );
+ modelsToAdd.push( hdaData );
+ }
+ });
+ if( hdasToAdd.length ){
+ history.addHdas( hdasToAdd );
+ }
+ }
+ });
+ },
+
+ /** Add multiple hda models to the hdas collection from an array of hda data.
+ */
+ addHdas : function( hdaDataList ){
+ //TODO: this is all probably easier if hdas is a relation
+ var history = this;
+ //TODO:?? what about hidden? deleted?
+ _.each( hdaDataList, function( hdaData, index ){
+ var indexFromHid = history.hdas.hidToCollectionIndex( hdaData.hid );
+ hdaData.history_id = history.get( 'id' );
+ history.hdas.add( new HistoryDatasetAssociation( hdaData ), { at: indexFromHid });
+ });
+ },
+
toString : function(){
var nameString = ( this.get( 'name' ) )?
( ',' + this.get( 'name' ) ) : ( '' );
@@ -202,6 +255,13 @@
}
});
+//------------------------------------------------------------------------------ CLASS VARS
+/** When the history has running hdas,
+ * this is the amount of time between update checks from the server
+ */
+History.UPDATE_DELAY = 4000;
+
+
//==============================================================================
/** @class A collection of histories (per user).
* (stub) currently unused.
diff -r b175274d13e0be88c4504d51aa0f49d0802cc42b -r e40fdd5f6e8936f936f7118b0b9521f7379972ee static/scripts/mvc/history/history-panel.js
--- a/static/scripts/mvc/history/history-panel.js
+++ b/static/scripts/mvc/history/history-panel.js
@@ -160,6 +160,20 @@
this.model.bind( 'change:nice_size', this.updateHistoryDiskSize, this );
this.model.hdas.bind( 'add', this.add, this );
this.model.hdas.bind( 'reset', this.addAll, this );
+
+ // if an a hidden hda is created (gen. by a workflow), moves thru the updater to the ready state,
+ // then: remove it from the collection if the panel is set to NOT show hidden datasets
+ this.model.hdas.bind( 'change:state',
+ function( hda, newState, changedList ){
+ //TODO: magic string here - somehow use HDA.states
+ if( ( hda.inReadyState() )
+ && ( !hda.get( 'visible' ) )
+ && ( !this.storage.get( 'show_hidden' ) ) ){
+ this.removeHda( hda );
+ }
+ },
+ this );
+
//this.bind( 'all', function(){
// this.log( arguments );
//}, this );
@@ -178,6 +192,8 @@
* @see PersistantStorage
*/
_setUpWebStorage : function( initiallyExpanded, show_deleted, show_hidden ){
+ //this.log( '_setUpWebStorage, initiallyExpanded:', initiallyExpanded,
+ // 'show_deleted:', show_deleted, 'show_hidden', show_hidden );
// data that needs to be persistant over page refreshes
// (note the key function which uses the history id as well)
@@ -197,7 +213,6 @@
// get the show_deleted/hidden settings giving priority to values passed in,
// using web storage otherwise
- //this.log( 'show_deleted:', show_deleted, 'show_hidden', show_hidden );
// if the page has specifically requested show_deleted/hidden, these will be either true or false
// (as opposed to undefined, null) - and we give priority to that setting
if( ( show_deleted === true ) || ( show_deleted === false ) ){
@@ -218,18 +233,31 @@
* @param {HistoryDatasetAssociation} hda hda to add to the collection
*/
add : function( hda ){
- //console.debug( 'add.' + this, hda );
- //TODO
+ //this.log( 'add.' + this, hda );
+ //KISS: just re-render the entire thing when adding
+ this.render();
},
/** Event hander to respond when hdas are reset
*/
addAll : function(){
- //console.debug( 'addAll.' + this );
+ //this.log( 'addAll.' + this );
// re render when all hdas are reset
this.render();
},
+ /** Remove a view from the panel and the assoc. model from the collection
+ * @param {HistoryDataAssociation} the hda to remove
+ */
+ removeHda : function( hdaModel, callback ){
+ var hdaView = this.hdaViews[ hdaModel.get( 'id' ) ];
+ hdaView.$el.fadeOut( 'fast', function(){
+ hdaView.$el.remove();
+ if( callback ){ callback(); }
+ });
+ this.model.hdas.remove( hdaModel );
+ },
+
// ......................................................................... RENDERING
/** Render urls, historyPanel body, and hdas (if any are shown)
* @see Backbone.View#render
@@ -243,8 +271,6 @@
modelJson = this.model.toJSON(),
initialRender = ( this.$el.children().size() === 0 );
- //console.debug( this + '.render, initialRender:', initialRender );
-
// render the urls and add them to the model json
modelJson.urls = this._renderUrls( modelJson );
@@ -466,7 +492,7 @@
}
return false;
},
-
+
// ......................................................................... MISC
/** Return a string rep of the history
*/
diff -r b175274d13e0be88c4504d51aa0f49d0802cc42b -r e40fdd5f6e8936f936f7118b0b9521f7379972ee templates/base.mako
--- a/templates/base.mako
+++ b/templates/base.mako
@@ -23,29 +23,39 @@
## Default javascripts
<%def name="javascripts()">
- ## <!--[if lt IE 7]>
- ## <script type='text/javascript' src="/static/scripts/libs/IE/IE7.js"></script>
- ## <![endif]-->
-
- ${h.js(
- "libs/jquery/jquery",
- "libs/json2",
- "libs/bootstrap",
- "galaxy.base",
- "libs/underscore",
- "libs/backbone/backbone",
- "libs/backbone/backbone-relational",
- "libs/handlebars.runtime",
- "mvc/ui"
- )}
-
- <script type="text/javascript">
- // Set up needed paths.
- var galaxy_paths = new GalaxyPaths({
- root_path: '${h.url_for( "/" )}',
- image_path: '${h.url_for( "/static/images" )}'
- });
- </script>
+ ## <!--[if lt IE 7]>
+ ## <script type='text/javascript' src="/static/scripts/libs/IE/IE7.js"></script>
+ ## <![endif]-->
+
+ ${h.js(
+ "libs/jquery/jquery",
+ "libs/json2",
+ "libs/bootstrap",
+ "galaxy.base",
+ "libs/underscore",
+ "libs/backbone/backbone",
+ "libs/backbone/backbone-relational",
+ "libs/handlebars.runtime",
+ "mvc/ui"
+ )}
+
+ <script type="text/javascript">
+ // console protection
+ window.console = window.console || {
+ log : function(){},
+ debug : function(){},
+ info : function(){},
+ warn : function(){},
+ error : function(){},
+ assert : function(){},
+ };
+
+ // Set up needed paths.
+ var galaxy_paths = new GalaxyPaths({
+ root_path: '${h.url_for( "/" )}',
+ image_path: '${h.url_for( "/static/images" )}'
+ });
+ </script></%def>
## Additional metas can be defined by templates inheriting from this one.
diff -r b175274d13e0be88c4504d51aa0f49d0802cc42b -r e40fdd5f6e8936f936f7118b0b9521f7379972ee templates/base/base_panels.mako
--- a/templates/base/base_panels.mako
+++ b/templates/base/base_panels.mako
@@ -60,6 +60,16 @@
'galaxy.base'
)}
<script type="text/javascript">
+ // console protection
+ window.console = window.console || {
+ log : function(){},
+ debug : function(){},
+ info : function(){},
+ warn : function(){},
+ error : function(){},
+ assert : function(){},
+ };
+
// Set up needed paths.
var galaxy_paths = new GalaxyPaths({
root_path: '${h.url_for( "/" )}',
diff -r b175274d13e0be88c4504d51aa0f49d0802cc42b -r e40fdd5f6e8936f936f7118b0b9521f7379972ee templates/root/alternate_history.mako
--- a/templates/root/alternate_history.mako
+++ b/templates/root/alternate_history.mako
@@ -297,14 +297,13 @@
// add needed controller urls to GalaxyPaths
galaxy_paths.set( 'hda', ${get_hda_url_templates()} );
galaxy_paths.set( 'history', ${get_history_url_templates()} );
-//console.debug( 'galaxy_paths:', galaxy_paths );
$(function(){
galaxyPageSetUp();
Galaxy.historyFrame = window;
// ostensibly, this is the App
- //if( console && console.debug ){
+ //if( window.console && console.debug ){
// //if( console.clear ){ console.clear(); }
// console.pretty = function( o ){ $( '<pre/>' ).text( JSON.stringify( o, null, ' ' ) ).appendTo( 'body' ); }
// top.storage = jQuery.jStorage
@@ -315,9 +314,6 @@
var user = ${ get_current_user() },
history = ${ get_history( history.id ) },
hdas = ${ get_hdas( history.id, datasets ) };
- //console.debug( 'user:', user );
- //console.debug( 'history:', history );
- //console.debug( 'hdas:', hdas );
var currUser = new User( user );
if( !Galaxy.currUser ){ Galaxy.currUser = currUser; }
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
04 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/b175274d13e0/
changeset: b175274d13e0
user: jgoecks
date: 2012-12-04 20:27:50
summary: Clean up imports in datasets controller.
affected #: 1 file
diff -r 87c8970719b135a7b2c9804484dc0d96df171083 -r b175274d13e0be88c4504d51aa0f49d0802cc42b lib/galaxy/webapps/galaxy/api/datasets.py
--- a/lib/galaxy/webapps/galaxy/api/datasets.py
+++ b/lib/galaxy/webapps/galaxy/api/datasets.py
@@ -2,15 +2,8 @@
API operations on the contents of a dataset.
"""
import logging, os, string, shutil, urllib, re, socket
-from cgi import escape, FieldStorage
from galaxy import util, datatypes, jobs, web, util
-from galaxy.web.base.controller import *
-from galaxy.util.sanitize_html import sanitize_html
-from galaxy.model.orm import *
-from galaxy.visualization.data_providers.genome import *
-from galaxy.visualization.data_providers.basic import ColumnDataProvider
-from galaxy.datatypes.tabular import Vcf
-from galaxy.model import NoConverterException, ConverterDependencyException
+from galaxy.web.base.controller import BaseAPIController, UsesVisualizationMixin, get_highest_priority_msg, messages
from galaxy.web.framework.helpers import is_true
log = logging.getLogger( __name__ )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/b724301b1244/
changeset: b724301b1244
user: jgoecks
date: 2012-12-04 20:15:08
summary: Remove import *'s from page controller.
affected #: 1 file
diff -r 3a1f833d3463ff3b5fc82eeff5adc72f1a07c6a9 -r b724301b1244c4c48e1e98336680b5eafce1c2c5 lib/galaxy/webapps/galaxy/controllers/page.py
--- a/lib/galaxy/webapps/galaxy/controllers/page.py
+++ b/lib/galaxy/webapps/galaxy/controllers/page.py
@@ -1,6 +1,8 @@
-from galaxy import model
-from galaxy.model.item_attrs import *
-from galaxy.web.base.controller import *
+from sqlalchemy import desc
+from galaxy import model, web
+from galaxy.web import error, url_for
+from galaxy.model.item_attrs import UsesAnnotations, UsesItemRatings
+from galaxy.web.base.controller import BaseUIController, SharableMixin, UsesHistoryMixin, UsesStoredWorkflowMixin, UsesVisualizationMixin
from galaxy.web.framework.helpers import time_ago, grids
from galaxy.util.sanitize_html import sanitize_html, _BaseHTMLProcessor
from galaxy.util.odict import odict
https://bitbucket.org/galaxy/galaxy-central/changeset/87c8970719b1/
changeset: 87c8970719b1
user: jgoecks
date: 2012-12-04 20:15:52
summary: merge
affected #: 1 file
diff -r b724301b1244c4c48e1e98336680b5eafce1c2c5 -r 87c8970719b135a7b2c9804484dc0d96df171083 lib/galaxy/datatypes/tabular.py
--- a/lib/galaxy/datatypes/tabular.py
+++ b/lib/galaxy/datatypes/tabular.py
@@ -4,15 +4,16 @@
"""
import pkg_resources
pkg_resources.require( "bx-python" )
-
+import gzip
import logging
-import data
+import os
+from cgi import escape
from galaxy import util
-from cgi import escape
+from galaxy.datatypes import data
from galaxy.datatypes import metadata
+from galaxy.datatypes.checkers import is_gzip
from galaxy.datatypes.metadata import MetadataElement
-import galaxy_utils.sequence.vcf
-from sniff import *
+from galaxy.datatypes.sniff import get_headers
from galaxy.util.json import to_json_string
log = logging.getLogger(__name__)
@@ -264,10 +265,21 @@
return to_json_string({'ck_data': ck_data, 'ck_index': ck_index+1})
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, chunk=None):
- #TODO Prevent failure when displaying extremely long > 50kb lines.
if chunk:
return self.get_chunk(trans, dataset, chunk)
- if to_ext or not preview:
+ elif dataset.metadata.columns > 50:
+ #Fancy tabular display is only suitable for datasets without an incredibly large number of columns.
+ #We should add a new datatype 'matrix', with it's own draw method, suitable for this kind of data.
+ #For now, default to the old behavior, ugly as it is. Remove this after adding 'matrix'.
+ max_peek_size = 1000000 # 1 MB
+ if not preview or os.stat( dataset.file_name ).st_size < max_peek_size:
+ return open( dataset.file_name )
+ else:
+ trans.response.set_content_type( "text/html" )
+ return trans.stream_template_mako( "/dataset/large_file.mako",
+ truncated_data = open( dataset.file_name ).read(max_peek_size),
+ data = dataset)
+ elif to_ext or not preview:
return self._serve_raw(trans, dataset, to_ext)
else:
column_names = 'null'
@@ -589,7 +601,6 @@
- LANE, TILEm X, Y, INDEX, READ_NO, SEQ, QUAL, POSITION, *STRAND, FILT must be correct
- We will only check that up to the first 5 alignments are correctly formatted.
"""
- import gzip
try:
compress = is_gzip(filename)
if compress:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Tabular Display: For extremely wide datasets, revert to the old behavior until we have a proper 'matrix' datatype w/ renderer. Cleanup/imports.
by Bitbucket 04 Dec '12
by Bitbucket 04 Dec '12
04 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/bea6f76a652e/
changeset: bea6f76a652e
user: dannon
date: 2012-12-04 20:14:28
summary: Tabular Display: For extremely wide datasets, revert to the old behavior until we have a proper 'matrix' datatype w/ renderer. Cleanup/imports.
affected #: 1 file
diff -r 3a1f833d3463ff3b5fc82eeff5adc72f1a07c6a9 -r bea6f76a652e8f47e97c0995986bda8e939f714e lib/galaxy/datatypes/tabular.py
--- a/lib/galaxy/datatypes/tabular.py
+++ b/lib/galaxy/datatypes/tabular.py
@@ -4,15 +4,16 @@
"""
import pkg_resources
pkg_resources.require( "bx-python" )
-
+import gzip
import logging
-import data
+import os
+from cgi import escape
from galaxy import util
-from cgi import escape
+from galaxy.datatypes import data
from galaxy.datatypes import metadata
+from galaxy.datatypes.checkers import is_gzip
from galaxy.datatypes.metadata import MetadataElement
-import galaxy_utils.sequence.vcf
-from sniff import *
+from galaxy.datatypes.sniff import get_headers
from galaxy.util.json import to_json_string
log = logging.getLogger(__name__)
@@ -264,10 +265,21 @@
return to_json_string({'ck_data': ck_data, 'ck_index': ck_index+1})
def display_data(self, trans, dataset, preview=False, filename=None, to_ext=None, chunk=None):
- #TODO Prevent failure when displaying extremely long > 50kb lines.
if chunk:
return self.get_chunk(trans, dataset, chunk)
- if to_ext or not preview:
+ elif dataset.metadata.columns > 50:
+ #Fancy tabular display is only suitable for datasets without an incredibly large number of columns.
+ #We should add a new datatype 'matrix', with it's own draw method, suitable for this kind of data.
+ #For now, default to the old behavior, ugly as it is. Remove this after adding 'matrix'.
+ max_peek_size = 1000000 # 1 MB
+ if not preview or os.stat( dataset.file_name ).st_size < max_peek_size:
+ return open( dataset.file_name )
+ else:
+ trans.response.set_content_type( "text/html" )
+ return trans.stream_template_mako( "/dataset/large_file.mako",
+ truncated_data = open( dataset.file_name ).read(max_peek_size),
+ data = dataset)
+ elif to_ext or not preview:
return self._serve_raw(trans, dataset, to_ext)
else:
column_names = 'null'
@@ -589,7 +601,6 @@
- LANE, TILEm X, Y, INDEX, READ_NO, SEQ, QUAL, POSITION, *STRAND, FILT must be correct
- We will only check that up to the first 5 alignments are correctly formatted.
"""
- import gzip
try:
compress = is_gzip(filename)
if compress:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
04 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/3a1f833d3463/
changeset: 3a1f833d3463
user: greg
date: 2012-12-04 20:08:21
summary: Make the HgWebConfigManager thread-safe.
affected #: 1 file
diff -r 2635d1bcf9aab951981ddced7315cc0d4bf657b0 -r 3a1f833d3463ff3b5fc82eeff5adc72f1a07c6a9 lib/galaxy/webapps/community/util/hgweb_config.py
--- a/lib/galaxy/webapps/community/util/hgweb_config.py
+++ b/lib/galaxy/webapps/community/util/hgweb_config.py
@@ -1,4 +1,4 @@
-import sys, os, ConfigParser, logging, shutil
+import sys, os, ConfigParser, logging, shutil, threading
from time import strftime
from datetime import date
@@ -15,32 +15,51 @@
self.in_memory_config = None
def add_entry( self, lhs, rhs ):
"""Add an entry in the hgweb.config file for a new repository."""
- # Since we're changing the config, make sure the latest is loaded into memory.
- self.read_config( force_read=True )
- # An entry looks something like: repos/test/mira_assembler = database/community_files/000/repo_123.
- if rhs.startswith( './' ):
- rhs = rhs.replace( './', '', 1 )
- self.make_backup()
- # Add the new entry into memory.
- self.in_memory_config.set( 'paths', lhs, rhs )
- # Persist our in-memory configuration.
- self.write_config()
+ lock = threading.Lock()
+ lock.acquire( True )
+ try:
+ # Since we're changing the config, make sure the latest is loaded into memory.
+ self.read_config( force_read=True )
+ # An entry looks something like: repos/test/mira_assembler = database/community_files/000/repo_123.
+ if rhs.startswith( './' ):
+ rhs = rhs.replace( './', '', 1 )
+ self.make_backup()
+ # Add the new entry into memory.
+ self.in_memory_config.set( 'paths', lhs, rhs )
+ # Persist our in-memory configuration.
+ self.write_config()
+ except Exception, e:
+ log.debug( "Exception in HgWebConfigManager.add_entry(): %s" % str( e ) )
+ finally:
+ lock.release()
def change_entry( self, old_lhs, new_lhs, new_rhs ):
"""Change an entry in the hgweb.config file for a repository - this only happens when the owner changes the name of the repository."""
- self.make_backup()
- # Remove the old entry.
- self.in_memory_config.remove_option( 'paths', old_lhs )
- # Add the new entry.
- self.in_memory_config.set( 'paths', new_lhs, new_rhs )
- # Persist our in-memory configuration.
- self.write_config()
+ lock = threading.Lock()
+ lock.acquire( True )
+ try:
+ self.make_backup()
+ # Remove the old entry.
+ self.in_memory_config.remove_option( 'paths', old_lhs )
+ # Add the new entry.
+ self.in_memory_config.set( 'paths', new_lhs, new_rhs )
+ # Persist our in-memory configuration.
+ self.write_config()
+ except Exception, e:
+ log.debug( "Exception in HgWebConfigManager.change_entry(): %s" % str( e ) )
+ finally:
+ lock.release()
def get_entry( self, lhs ):
"""Return an entry in the hgweb.config file for a repository"""
self.read_config()
try:
entry = self.in_memory_config.get( 'paths', lhs )
except ConfigParser.NoOptionError:
- raise Exception( "Entry for repository %s missing in file %s." % ( lhs, self.hgweb_config ) )
+ try:
+ # We have a multi-threaded front-end, so one of the threads may not have the latest version of the hgweb.config file.
+ self.read_config( force_read=True )
+ entry = self.in_memory_config.get( 'paths', lhs )
+ except ConfigParser.NoOptionError:
+ raise Exception( "Entry for repository %s missing in file %s." % ( lhs, self.hgweb_config ) )
return entry
@property
def hgweb_config( self ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Remove import *'s from visualization controller.
by Bitbucket 04 Dec '12
by Bitbucket 04 Dec '12
04 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/2635d1bcf9aa/
changeset: 2635d1bcf9aa
user: jgoecks
date: 2012-12-04 20:05:30
summary: Remove import *'s from visualization controller.
affected #: 1 file
diff -r 1af9c4734df58bc3ff7cdca54733144cc8e6b9e2 -r 2635d1bcf9aab951981ddced7315cc0d4bf657b0 lib/galaxy/webapps/galaxy/controllers/visualization.py
--- a/lib/galaxy/webapps/galaxy/controllers/visualization.py
+++ b/lib/galaxy/webapps/galaxy/controllers/visualization.py
@@ -1,8 +1,9 @@
from __future__ import absolute_import
-from galaxy import model
-from galaxy.model.item_attrs import *
-from galaxy.web.base.controller import *
+from sqlalchemy import desc
+from galaxy import model, web
+from galaxy.model.item_attrs import UsesAnnotations, UsesItemRatings
+from galaxy.web.base.controller import BaseUIController, SharableMixin, UsesVisualizationMixin
from galaxy.web.framework.helpers import time_ago, grids, iff
from galaxy.util.sanitize_html import sanitize_html
from galaxy.visualization.genomes import decode_dbkey
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Fix output definition for GTF filter by attribute values list/
by Bitbucket 04 Dec '12
by Bitbucket 04 Dec '12
04 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/1af9c4734df5/
changeset: 1af9c4734df5
user: jgoecks
date: 2012-12-04 19:42:09
summary: Fix output definition for GTF filter by attribute values list/
affected #: 1 file
diff -r ae3469a913214c4d5d5e6a1c5b449df978056972 -r 1af9c4734df58bc3ff7cdca54733144cc8e6b9e2 tools/filters/gff/gtf_filter_by_attribute_values_list.xml
--- a/tools/filters/gff/gtf_filter_by_attribute_values_list.xml
+++ b/tools/filters/gff/gtf_filter_by_attribute_values_list.xml
@@ -14,7 +14,7 @@
<param format="tabular,txt" name="ids" type="data" label="And attribute values"/></inputs><outputs>
- <data format="input" name="output" metadata_source="input"/>
+ <data format="gtf" name="output"/></outputs><tests><!-- Test filtering with a simple list of values. -->
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/ebebacd0ccc4/
changeset: ebebacd0ccc4
user: jmchilton
date: 2012-11-10 23:08:05
summary: Allow a datatype's merge method (used for parallelism) to take in an additional argument - the output_dataset object - if needed. No existing merge methods should need to be modified, the only modification to Galaxy's effective runtime behavior will occur if a datatype is defined which takes in a third keyword keyword argument called output_dataset. This change can enable the composite dataset parallelism pioneered by Jorrit Boekel, but one can imagine other potential uses (setting metadata derived from multiple files for instance).
affected #: 1 file
diff -r 5fdb5348d968f5bb38aedba23e26bdb9032a0c0b -r ebebacd0ccc45e5c0f26e0e7ceb55d70a0270dec lib/galaxy/jobs/splitters/multi.py
--- a/lib/galaxy/jobs/splitters/multi.py
+++ b/lib/galaxy/jobs/splitters/multi.py
@@ -1,4 +1,5 @@
import os, logging, shutil
+import inspect
from galaxy import model, util
@@ -125,7 +126,8 @@
output_file_name = str(outputs[output][1])
base_output_name = os.path.basename(output_file_name)
if output in merge_outputs:
- output_type = outputs[output][0].datatype
+ output_dataset = outputs[output][0]
+ output_type = output_dataset.datatype
output_files = [os.path.join(dir,base_output_name) for dir in task_dirs]
# Just include those files f in the output list for which the
# file f exists; some files may not exist if a task fails.
@@ -135,7 +137,13 @@
if len(output_files) < len(task_dirs):
log.debug('merging only %i out of expected %i files for %s'
% (len(output_files), len(task_dirs), output_file_name))
- output_type.merge(output_files, output_file_name)
+ # First two args to merge always output_files and path of dataset. More
+ # complicated merge methods may require more parameters. Set those up here.
+ extra_merge_arg_names = inspect.getargspec( output_type.merge ).args[2:]
+ extra_merge_args = {}
+ if "output_dataset" in extra_merge_arg_names:
+ extra_merge_args["output_dataset"] = output_dataset
+ output_type.merge(output_files, output_file_name, **extra_merge_args)
log.debug('merge finished: %s' % output_file_name)
else:
msg = 'nothing to merge for %s (expected %i files)' \
https://bitbucket.org/galaxy/galaxy-central/changeset/ae3469a91321/
changeset: ae3469a91321
user: dannon
date: 2012-12-04 15:45:52
summary: Merged in galaxyp/galaxy-central-optional-merge-args (pull request #83)
affected #: 1 file
diff -r 7bd130c33ebc218d97df83a63555abed222b301b -r ae3469a913214c4d5d5e6a1c5b449df978056972 lib/galaxy/jobs/splitters/multi.py
--- a/lib/galaxy/jobs/splitters/multi.py
+++ b/lib/galaxy/jobs/splitters/multi.py
@@ -1,4 +1,5 @@
import os, logging, shutil
+import inspect
from galaxy import model, util
@@ -125,7 +126,8 @@
output_file_name = str(outputs[output][1])
base_output_name = os.path.basename(output_file_name)
if output in merge_outputs:
- output_type = outputs[output][0].datatype
+ output_dataset = outputs[output][0]
+ output_type = output_dataset.datatype
output_files = [os.path.join(dir,base_output_name) for dir in task_dirs]
# Just include those files f in the output list for which the
# file f exists; some files may not exist if a task fails.
@@ -135,7 +137,13 @@
if len(output_files) < len(task_dirs):
log.debug('merging only %i out of expected %i files for %s'
% (len(output_files), len(task_dirs), output_file_name))
- output_type.merge(output_files, output_file_name)
+ # First two args to merge always output_files and path of dataset. More
+ # complicated merge methods may require more parameters. Set those up here.
+ extra_merge_arg_names = inspect.getargspec( output_type.merge ).args[2:]
+ extra_merge_args = {}
+ if "output_dataset" in extra_merge_arg_names:
+ extra_merge_args["output_dataset"] = output_dataset
+ output_type.merge(output_files, output_file_name, **extra_merge_args)
log.debug('merge finished: %s' % output_file_name)
else:
msg = 'nothing to merge for %s (expected %i files)' \
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Update Cuff* labels to reflect GFF3 compatibility.
by Bitbucket 04 Dec '12
by Bitbucket 04 Dec '12
04 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/7bd130c33ebc/
changeset: 7bd130c33ebc
user: jgoecks
date: 2012-12-04 15:09:36
summary: Update Cuff* labels to reflect GFF3 compatibility.
affected #: 3 files
diff -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 -r 7bd130c33ebc218d97df83a63555abed222b301b tools/ngs_rna/cuffcompare_wrapper.xml
--- a/tools/ngs_rna/cuffcompare_wrapper.xml
+++ b/tools/ngs_rna/cuffcompare_wrapper.xml
@@ -48,7 +48,7 @@
<option value="Yes">Yes</option></param><when value="Yes">
- <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/>
+ <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Requires an annotation file in GFF3 or GTF format."/><param name="ignore_nonoverlapping_reference" type="boolean" label="Ignore reference transcripts that are not overlapped by any transcript in input files"/></when><when value="No">
diff -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 -r 7bd130c33ebc218d97df83a63555abed222b301b tools/ngs_rna/cuffdiff_wrapper.xml
--- a/tools/ngs_rna/cuffdiff_wrapper.xml
+++ b/tools/ngs_rna/cuffdiff_wrapper.xml
@@ -72,7 +72,7 @@
</command><inputs>
- <param format="gtf,gff3" name="gtf_input" type="data" label="Transcripts" help="A transcript GFF3/GTF file produced by cufflinks, cuffcompare, or other source."/>
+ <param format="gtf,gff3" name="gtf_input" type="data" label="Transcripts" help="A transcript GFF3 or GTF file produced by cufflinks, cuffcompare, or other source."/><conditional name="group_analysis"><param name="do_groups" type="select" label="Perform replicate analysis" help="Perform cuffdiff with replicates in each group."><option value="No">No</option>
diff -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 -r 7bd130c33ebc218d97df83a63555abed222b301b tools/ngs_rna/cuffmerge_wrapper.xml
--- a/tools/ngs_rna/cuffmerge_wrapper.xml
+++ b/tools/ngs_rna/cuffmerge_wrapper.xml
@@ -47,7 +47,7 @@
<option value="Yes">Yes</option></param><when value="Yes">
- <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/>
+ <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Requires an annotation file in GFF3 or GTF format."/></when><when value="No"></when>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/088960e4d379/
changeset: 088960e4d379
user: Kyle Ellrott
date: 2012-11-30 19:58:39
summary: Patch to make extended_metadata loader work better, and fixing library loader to catch non-ascii errors
affected #: 2 files
diff -r 88aba66bb81351cbd625ca7fb9ed39874016b36a -r 088960e4d3793d473ac0b657215a3336c9f1d263 lib/galaxy/webapps/galaxy/api/library_contents.py
--- a/lib/galaxy/webapps/galaxy/api/library_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/library_contents.py
@@ -181,7 +181,7 @@
else:
#BUG: Everything is cast to string, which can lead to false positives
#for cross type comparisions, ie "True" == True
- yield prefix, str(meta)
+ yield prefix, ("%s" % (meta)).encode("utf8", errors='replace')
@web.expose_api
def update( self, trans, id, library_id, payload, **kwd ):
diff -r 88aba66bb81351cbd625ca7fb9ed39874016b36a -r 088960e4d3793d473ac0b657215a3336c9f1d263 scripts/api/load_data_with_metadata.py
--- a/scripts/api/load_data_with_metadata.py
+++ b/scripts/api/load_data_with_metadata.py
@@ -6,7 +6,7 @@
system in the library
Sample call:
-python load_data_with_metadata.py <api_key><api_url> /tmp/g_inbox/ /tmp/g_inbox/done/ "API Imports"
+python load_data_with_metadata.py <api_key><api_url> /data/folder "API Imports"
NOTE: The upload method used requires the data library filesystem upload allow_library_path_paste
"""
@@ -18,7 +18,7 @@
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import submit, display
-def main(api_key, api_url, in_folder, out_folder, data_library):
+def main(api_key, api_url, in_folder, data_library):
# Find/Create data library with the above name. Assume we're putting datasets in the root folder '/'
libs = display(api_key, api_url + 'libraries', return_formatted=False)
library_id = None
@@ -36,11 +36,11 @@
if not library_id or not library_folder_id:
print "Failure to configure library destination."
sys.exit(1)
- # Watch in_folder, upload anything that shows up there to data library and get ldda,
- # invoke workflow, move file to out_folder.
+
for fname in os.listdir(in_folder):
fullpath = os.path.join(in_folder, fname)
if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"):
+ print "Loading", fullpath
data = {}
data['folder_id'] = library_folder_id
data['file_type'] = 'auto'
@@ -49,6 +49,8 @@
data['filesystem_paths'] = fullpath
data['create_type'] = 'file'
+ data['link_data_only'] = 'link_to_files'
+
handle = open( fullpath + ".json" )
smeta = handle.read()
handle.close()
@@ -61,10 +63,9 @@
api_key = sys.argv[1]
api_url = sys.argv[2]
in_folder = sys.argv[3]
- out_folder = sys.argv[4]
- data_library = sys.argv[5]
+ data_library = sys.argv[4]
except IndexError:
- print 'usage: %s key url in_folder out_folder data_library' % os.path.basename( sys.argv[0] )
+ print 'usage: %s key url in_folder data_library' % os.path.basename( sys.argv[0] )
sys.exit( 1 )
- main(api_key, api_url, in_folder, out_folder, data_library )
+ main(api_key, api_url, in_folder, data_library )
https://bitbucket.org/galaxy/galaxy-central/changeset/0c1f8a06eb56/
changeset: 0c1f8a06eb56
user: dannon
date: 2012-12-04 15:07:31
summary: Merged in kellrott/galaxy-central (pull request #90)
affected #: 2 files
diff -r 89deda81b0c949de68941f37e77c4c55097199f0 -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 lib/galaxy/webapps/galaxy/api/library_contents.py
--- a/lib/galaxy/webapps/galaxy/api/library_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/library_contents.py
@@ -181,7 +181,7 @@
else:
#BUG: Everything is cast to string, which can lead to false positives
#for cross type comparisions, ie "True" == True
- yield prefix, str(meta)
+ yield prefix, ("%s" % (meta)).encode("utf8", errors='replace')
@web.expose_api
def update( self, trans, id, library_id, payload, **kwd ):
diff -r 89deda81b0c949de68941f37e77c4c55097199f0 -r 0c1f8a06eb563f312d5e90818b34358430c6edf9 scripts/api/load_data_with_metadata.py
--- a/scripts/api/load_data_with_metadata.py
+++ b/scripts/api/load_data_with_metadata.py
@@ -6,7 +6,7 @@
system in the library
Sample call:
-python load_data_with_metadata.py <api_key><api_url> /tmp/g_inbox/ /tmp/g_inbox/done/ "API Imports"
+python load_data_with_metadata.py <api_key><api_url> /data/folder "API Imports"
NOTE: The upload method used requires the data library filesystem upload allow_library_path_paste
"""
@@ -18,7 +18,7 @@
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import submit, display
-def main(api_key, api_url, in_folder, out_folder, data_library):
+def main(api_key, api_url, in_folder, data_library):
# Find/Create data library with the above name. Assume we're putting datasets in the root folder '/'
libs = display(api_key, api_url + 'libraries', return_formatted=False)
library_id = None
@@ -36,11 +36,11 @@
if not library_id or not library_folder_id:
print "Failure to configure library destination."
sys.exit(1)
- # Watch in_folder, upload anything that shows up there to data library and get ldda,
- # invoke workflow, move file to out_folder.
+
for fname in os.listdir(in_folder):
fullpath = os.path.join(in_folder, fname)
if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"):
+ print "Loading", fullpath
data = {}
data['folder_id'] = library_folder_id
data['file_type'] = 'auto'
@@ -49,6 +49,8 @@
data['filesystem_paths'] = fullpath
data['create_type'] = 'file'
+ data['link_data_only'] = 'link_to_files'
+
handle = open( fullpath + ".json" )
smeta = handle.read()
handle.close()
@@ -61,10 +63,9 @@
api_key = sys.argv[1]
api_url = sys.argv[2]
in_folder = sys.argv[3]
- out_folder = sys.argv[4]
- data_library = sys.argv[5]
+ data_library = sys.argv[4]
except IndexError:
- print 'usage: %s key url in_folder out_folder data_library' % os.path.basename( sys.argv[0] )
+ print 'usage: %s key url in_folder data_library' % os.path.basename( sys.argv[0] )
sys.exit( 1 )
- main(api_key, api_url, in_folder, out_folder, data_library )
+ main(api_key, api_url, in_folder, data_library )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/cb81a84db021/
changeset: cb81a84db021
user: Kyle Ellrott
date: 2012-11-30 23:01:28
summary: Fixing missing JobWrapper import
affected #: 1 file
diff -r 03ec137ca8a3148aa771769e80963b65194b7895 -r cb81a84db021353757f1cdc1e40fb19e38662ec8 lib/galaxy/jobs/manager.py
--- a/lib/galaxy/jobs/manager.py
+++ b/lib/galaxy/jobs/manager.py
@@ -12,7 +12,7 @@
from sqlalchemy.sql.expression import and_, or_
from galaxy import model
-from galaxy.jobs import handler, Sleeper, NoopQueue
+from galaxy.jobs import handler, Sleeper, NoopQueue, JobWrapper
from galaxy.util.json import from_json_string
log = logging.getLogger( __name__ )
https://bitbucket.org/galaxy/galaxy-central/changeset/89deda81b0c9/
changeset: 89deda81b0c9
user: dannon
date: 2012-12-04 14:49:22
summary: Merged in kellrott/galaxy-central (pull request #91)
affected #: 1 file
diff -r 9aec3401a724eeb8e47a13de6de13c1eb122026c -r 89deda81b0c949de68941f37e77c4c55097199f0 lib/galaxy/jobs/manager.py
--- a/lib/galaxy/jobs/manager.py
+++ b/lib/galaxy/jobs/manager.py
@@ -12,7 +12,7 @@
from sqlalchemy.sql.expression import and_, or_
from galaxy import model
-from galaxy.jobs import handler, Sleeper, NoopQueue
+from galaxy.jobs import handler, Sleeper, NoopQueue, JobWrapper
from galaxy.util.json import from_json_string
log = logging.getLogger( __name__ )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: jgoecks: Enable and document GFF annotation support for Cuffcompare/merge/diff.
by Bitbucket 04 Dec '12
by Bitbucket 04 Dec '12
04 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/9aec3401a724/
changeset: 9aec3401a724
user: jgoecks
date: 2012-12-04 03:02:45
summary: Enable and document GFF annotation support for Cuffcompare/merge/diff.
affected #: 3 files
diff -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad -r 9aec3401a724eeb8e47a13de6de13c1eb122026c tools/ngs_rna/cuffcompare_wrapper.xml
--- a/tools/ngs_rna/cuffcompare_wrapper.xml
+++ b/tools/ngs_rna/cuffcompare_wrapper.xml
@@ -48,7 +48,7 @@
<option value="Yes">Yes</option></param><when value="Yes">
- <param format="gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/>
+ <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/><param name="ignore_nonoverlapping_reference" type="boolean" label="Ignore reference transcripts that are not overlapped by any transcript in input files"/></when><when value="No">
diff -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad -r 9aec3401a724eeb8e47a13de6de13c1eb122026c tools/ngs_rna/cuffdiff_wrapper.xml
--- a/tools/ngs_rna/cuffdiff_wrapper.xml
+++ b/tools/ngs_rna/cuffdiff_wrapper.xml
@@ -72,7 +72,7 @@
</command><inputs>
- <param format="gtf,gff3" name="gtf_input" type="data" label="Transcripts" help="A transcript GTF file produced by cufflinks, cuffcompare, or other source."/>
+ <param format="gtf,gff3" name="gtf_input" type="data" label="Transcripts" help="A transcript GFF3/GTF file produced by cufflinks, cuffcompare, or other source."/><conditional name="group_analysis"><param name="do_groups" type="select" label="Perform replicate analysis" help="Perform cuffdiff with replicates in each group."><option value="No">No</option>
diff -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad -r 9aec3401a724eeb8e47a13de6de13c1eb122026c tools/ngs_rna/cuffmerge_wrapper.xml
--- a/tools/ngs_rna/cuffmerge_wrapper.xml
+++ b/tools/ngs_rna/cuffmerge_wrapper.xml
@@ -47,7 +47,7 @@
<option value="Yes">Yes</option></param><when value="Yes">
- <param format="gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/>
+ <param format="gff3,gtf" name="reference_annotation" type="data" label="Reference Annotation" help="Make sure your annotation file is in GTF format and that Galaxy knows that your file is GTF--not GFF."/></when><when value="No"></when>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
6 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/c2c7e22ee51c/
changeset: c2c7e22ee51c
user: jmchilton
date: 2012-11-14 22:49:53
summary: Fixes for multiple input data parameters, they would fail if only a single input and in production settings where multiple Galaxy processes are used (the input parameters must take a different path through the code in this case). This changeset address both of those issues. The introduction of the DatasetListWrapper class to address this may seem like overkill right now, but I think over the coming months it will prove useful. Once there are multiple ways of selecting many files (be it this, by tag, the composite dataset stuff I am working on) it will prove valuable to have uniform ways of accessing those files in Cheetah templates, this class will help pave the way for that.
affected #: 2 files
diff -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 -r c2c7e22ee51c001e4a19ce397c90ccb61e4d4ca2 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2256,12 +2256,11 @@
current = values["__current_case__"]
wrap_values( input.cases[current].inputs, values )
elif isinstance( input, DataToolParameter ) and input.multiple:
- values = input_values[ input.name ]
input_values[ input.name ] = \
- [DatasetFilenameWrapper( value,
- datatypes_registry = self.app.datatypes_registry,
- tool = self,
- name = input.name ) for value in values]
+ DatasetListWrapper( input_values[ input.name ],
+ datatypes_registry = self.app.datatypes_registry,
+ tool = self,
+ name = input.name )
elif isinstance( input, DataToolParameter ):
## FIXME: We're populating param_dict with conversions when
## wrapping values, this should happen as a separate
@@ -2333,10 +2332,13 @@
# but this should be considered DEPRECATED, instead use:
# $dataset.get_child( 'name' ).filename
for name, data in input_datasets.items():
- param_dict[name] = DatasetFilenameWrapper( data,
- datatypes_registry = self.app.datatypes_registry,
- tool = self,
- name = name )
+ param_dict_value = param_dict.get(name, None)
+ if not isinstance(param_dict_value, (DatasetFilenameWrapper, DatasetListWrapper)):
+ param_dict[name] = DatasetFilenameWrapper( data,
+ datatypes_registry = self.app.datatypes_registry,
+ tool = self,
+ name = name )
+
if data:
for child in data.children:
param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
@@ -3102,7 +3104,16 @@
return getattr( self.dataset, key )
def __nonzero__( self ):
return bool( self.dataset )
-
+
+class DatasetListWrapper( list ):
+ """
+ """
+ def __init__( self, datasets, **kwargs ):
+ if not isinstance(datasets, list):
+ datasets = [datasets]
+ list.__init__( self, [DatasetFilenameWrapper(dataset, **kwargs) for dataset in datasets] )
+
+
def json_fix( val ):
if isinstance( val, list ):
return [ json_fix( v ) for v in val ]
diff -r cc7df5ca1d47dbbd98614c21589435f84c67f9f5 -r c2c7e22ee51c001e4a19ce397c90ccb61e4d4ca2 lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -145,6 +145,12 @@
values = input_values[ input.name ]
current = values[ "__current_case__" ]
wrap_values( input.cases[current].inputs, values, skip_missing_values = skip_missing_values )
+ elif isinstance( input, DataToolParameter ) and input.multiple:
+ input_values[ input.name ] = \
+ galaxy.tools.DatasetListWrapper( input_values[ input.name ],
+ datatypes_registry = trans.app.datatypes_registry,
+ tool = tool,
+ name = input.name )
elif isinstance( input, DataToolParameter ):
input_values[ input.name ] = \
galaxy.tools.DatasetFilenameWrapper( input_values[ input.name ],
https://bitbucket.org/galaxy/galaxy-central/changeset/e8c84dd71578/
changeset: e8c84dd71578
user: jmchilton
date: 2012-11-14 23:10:31
summary: Allow output data attributes `format` and `metadata_source` to work with multiple input data parameters - the first item selected will be used as the source. This decision was discussed with Dannon in IRC and he thought it was an acceptable approach.
affected #: 1 file
diff -r c2c7e22ee51c001e4a19ce397c90ccb61e4d4ca2 -r e8c84dd715782e7c1d709d8068e6033b835f7f39 lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -62,7 +62,11 @@
# If there are multiple inputs with the same name, they
# are stored as name1, name2, ...
for i, v in enumerate( value ):
- input_datasets[ prefix + input.name + str( i + 1 ) ] = process_dataset( v )
+ processed_dataset = process_dataset( v )
+ if i == 0:
+ # Allow copying metadata to output, first item will be source.
+ input_datasets[ prefix + input.name ] = processed_dataset
+ input_datasets[ prefix + input.name + str( i + 1 ) ] = processed_dataset
conversions = []
for conversion_name, conversion_extensions, conversion_datatypes in input.conversions:
new_data = process_dataset( input_datasets[ prefix + input.name + str( i + 1 ) ], conversion_datatypes )
https://bitbucket.org/galaxy/galaxy-central/changeset/df2eb3960fd9/
changeset: df2eb3960fd9
user: jmchilton
date: 2012-11-15 05:17:55
summary: Fix another error encountered only in multiple process mode, this one related to job rerunning.
affected #: 1 file
diff -r e8c84dd715782e7c1d709d8068e6033b835f7f39 -r df2eb3960fd92e7877bdd31cf5368cb062a9471c lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1551,6 +1551,8 @@
raise ValueError( "History does not include a dataset of the required format / build" )
if value in [None, "None"]:
return None
+ if isinstance( value, str ) and value.find( "," ) > 0:
+ value = [ int( value_part ) for value_part in value.split( "," ) ]
if isinstance( value, list ):
rval = [ trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( v ) for v in value ]
elif isinstance( value, trans.app.model.HistoryDatasetAssociation ):
https://bitbucket.org/galaxy/galaxy-central/changeset/71dbc3556ee6/
changeset: 71dbc3556ee6
user: jmchilton
date: 2012-11-28 18:24:20
summary: Fix for "View Details" display of jobs run with multiple input data parameters.
affected #: 1 file
diff -r df2eb3960fd92e7877bdd31cf5368cb062a9471c -r 71dbc3556ee69959d35bae887e7b783561d779f6 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1600,8 +1600,10 @@
return value.file_name
def value_to_display_text( self, value, app ):
+ if not isinstance(value, list):
+ value = [ value ]
if value:
- return "%s: %s" % ( value.hid, value.name )
+ return ", ".join( [ "%s: %s" % ( item.hid, item.name ) for item in value ] )
else:
return "No dataset"
https://bitbucket.org/galaxy/galaxy-central/changeset/caa480e454b3/
changeset: caa480e454b3
user: jmchilton
date: 2012-12-03 20:10:30
summary: Fix for this issue: http://dev.list.galaxyproject.org/workflow-input-param-issue-td4657311.html. Reporter of bug, Marc Logghe, verified this fixed the immediate problem caused by my previous pull request, though the underlying issues with workflows when parameter names are the same seem to still be a problem but that is outside the scope of this pull request.
affected #: 2 files
diff -r 71dbc3556ee69959d35bae887e7b783561d779f6 -r caa480e454b377a94a5b1ebc545c253b0594643e templates/workflow/display.mako
--- a/templates/workflow/display.mako
+++ b/templates/workflow/display.mako
@@ -45,9 +45,12 @@
%if isinstance( param, DataToolParameter ):
%if ( prefix + param.name ) in step.input_connections_by_name:
<%
- conn = step.input_connections_by_name[ prefix + param.name ]
+ conns = step.input_connections_by_name[ prefix + param.name ]
+ if not isinstance(conns, list):
+ conns = [conns]
+ vals = ["Output dataset '%s' from step %d" % (conn.output_name, int(conn.output_step.order_index)+1) for conn in conns]
%>
- Output dataset '${conn.output_name}' from step ${int(conn.output_step.order_index)+1}
+ ${",".join(vals)}
%else:
<i>select at runtime</i>
%endif
diff -r 71dbc3556ee69959d35bae887e7b783561d779f6 -r caa480e454b377a94a5b1ebc545c253b0594643e templates/workflow/run.mako
--- a/templates/workflow/run.mako
+++ b/templates/workflow/run.mako
@@ -243,9 +243,12 @@
%if isinstance( param, DataToolParameter ):
%if ( prefix + param.name ) in step.input_connections_by_name:
<%
- conn = step.input_connections_by_name[ prefix + param.name ]
+ conns = step.input_connections_by_name[ prefix + param.name ]
+ if not isinstance(conns, list):
+ conns = [conns]
+ vals = ["Output dataset '%s' from step %d" % (conn.output_name, int(conn.output_step.order_index)+1) for conn in conns]
%>
- Output dataset '${conn.output_name}' from step ${int(conn.output_step.order_index)+1}
+ ${",".join(vals)}
%else:
## FIXME: Initialize in the controller
<%
https://bitbucket.org/galaxy/galaxy-central/changeset/1ac27213bafb/
changeset: 1ac27213bafb
user: dannon
date: 2012-12-03 23:19:52
summary: Merged in jmchilton/galaxy-central-multi-input-tool-fixes-2 (pull request #85)
affected #: 5 files
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -2256,12 +2256,11 @@
current = values["__current_case__"]
wrap_values( input.cases[current].inputs, values )
elif isinstance( input, DataToolParameter ) and input.multiple:
- values = input_values[ input.name ]
input_values[ input.name ] = \
- [DatasetFilenameWrapper( value,
- datatypes_registry = self.app.datatypes_registry,
- tool = self,
- name = input.name ) for value in values]
+ DatasetListWrapper( input_values[ input.name ],
+ datatypes_registry = self.app.datatypes_registry,
+ tool = self,
+ name = input.name )
elif isinstance( input, DataToolParameter ):
## FIXME: We're populating param_dict with conversions when
## wrapping values, this should happen as a separate
@@ -2333,10 +2332,13 @@
# but this should be considered DEPRECATED, instead use:
# $dataset.get_child( 'name' ).filename
for name, data in input_datasets.items():
- param_dict[name] = DatasetFilenameWrapper( data,
- datatypes_registry = self.app.datatypes_registry,
- tool = self,
- name = name )
+ param_dict_value = param_dict.get(name, None)
+ if not isinstance(param_dict_value, (DatasetFilenameWrapper, DatasetListWrapper)):
+ param_dict[name] = DatasetFilenameWrapper( data,
+ datatypes_registry = self.app.datatypes_registry,
+ tool = self,
+ name = name )
+
if data:
for child in data.children:
param_dict[ "_CHILD___%s___%s" % ( name, child.designation ) ] = DatasetFilenameWrapper( child )
@@ -3102,7 +3104,16 @@
return getattr( self.dataset, key )
def __nonzero__( self ):
return bool( self.dataset )
-
+
+class DatasetListWrapper( list ):
+ """
+ """
+ def __init__( self, datasets, **kwargs ):
+ if not isinstance(datasets, list):
+ datasets = [datasets]
+ list.__init__( self, [DatasetFilenameWrapper(dataset, **kwargs) for dataset in datasets] )
+
+
def json_fix( val ):
if isinstance( val, list ):
return [ json_fix( v ) for v in val ]
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad lib/galaxy/tools/actions/__init__.py
--- a/lib/galaxy/tools/actions/__init__.py
+++ b/lib/galaxy/tools/actions/__init__.py
@@ -62,7 +62,11 @@
# If there are multiple inputs with the same name, they
# are stored as name1, name2, ...
for i, v in enumerate( value ):
- input_datasets[ prefix + input.name + str( i + 1 ) ] = process_dataset( v )
+ processed_dataset = process_dataset( v )
+ if i == 0:
+ # Allow copying metadata to output, first item will be source.
+ input_datasets[ prefix + input.name ] = processed_dataset
+ input_datasets[ prefix + input.name + str( i + 1 ) ] = processed_dataset
conversions = []
for conversion_name, conversion_extensions, conversion_datatypes in input.conversions:
new_data = process_dataset( input_datasets[ prefix + input.name + str( i + 1 ) ], conversion_datatypes )
@@ -145,6 +149,12 @@
values = input_values[ input.name ]
current = values[ "__current_case__" ]
wrap_values( input.cases[current].inputs, values, skip_missing_values = skip_missing_values )
+ elif isinstance( input, DataToolParameter ) and input.multiple:
+ input_values[ input.name ] = \
+ galaxy.tools.DatasetListWrapper( input_values[ input.name ],
+ datatypes_registry = trans.app.datatypes_registry,
+ tool = tool,
+ name = input.name )
elif isinstance( input, DataToolParameter ):
input_values[ input.name ] = \
galaxy.tools.DatasetFilenameWrapper( input_values[ input.name ],
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -1551,6 +1551,8 @@
raise ValueError( "History does not include a dataset of the required format / build" )
if value in [None, "None"]:
return None
+ if isinstance( value, str ) and value.find( "," ) > 0:
+ value = [ int( value_part ) for value_part in value.split( "," ) ]
if isinstance( value, list ):
rval = [ trans.sa_session.query( trans.app.model.HistoryDatasetAssociation ).get( v ) for v in value ]
elif isinstance( value, trans.app.model.HistoryDatasetAssociation ):
@@ -1598,8 +1600,10 @@
return value.file_name
def value_to_display_text( self, value, app ):
+ if not isinstance(value, list):
+ value = [ value ]
if value:
- return "%s: %s" % ( value.hid, value.name )
+ return ", ".join( [ "%s: %s" % ( item.hid, item.name ) for item in value ] )
else:
return "No dataset"
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad templates/workflow/display.mako
--- a/templates/workflow/display.mako
+++ b/templates/workflow/display.mako
@@ -45,9 +45,12 @@
%if isinstance( param, DataToolParameter ):
%if ( prefix + param.name ) in step.input_connections_by_name:
<%
- conn = step.input_connections_by_name[ prefix + param.name ]
+ conns = step.input_connections_by_name[ prefix + param.name ]
+ if not isinstance(conns, list):
+ conns = [conns]
+ vals = ["Output dataset '%s' from step %d" % (conn.output_name, int(conn.output_step.order_index)+1) for conn in conns]
%>
- Output dataset '${conn.output_name}' from step ${int(conn.output_step.order_index)+1}
+ ${",".join(vals)}
%else:
<i>select at runtime</i>
%endif
diff -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b -r 1ac27213bafb6d3fef210c17728eeef0338bd8ad templates/workflow/run.mako
--- a/templates/workflow/run.mako
+++ b/templates/workflow/run.mako
@@ -243,9 +243,12 @@
%if isinstance( param, DataToolParameter ):
%if ( prefix + param.name ) in step.input_connections_by_name:
<%
- conn = step.input_connections_by_name[ prefix + param.name ]
+ conns = step.input_connections_by_name[ prefix + param.name ]
+ if not isinstance(conns, list):
+ conns = [conns]
+ vals = ["Output dataset '%s' from step %d" % (conn.output_name, int(conn.output_step.order_index)+1) for conn in conns]
%>
- Output dataset '${conn.output_name}' from step ${int(conn.output_step.order_index)+1}
+ ${",".join(vals)}
%else:
## FIXME: Initialize in the controller
<%
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Added tool shed functional test script for repository dependencies.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/8a3f874b8e0a/
changeset: 8a3f874b8e0a
user: inithello
date: 2012-12-03 20:09:23
summary: Added tool shed functional test script for repository dependencies.
affected #: 6 files
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/base/twilltestcase.py
--- a/test/tool_shed/base/twilltestcase.py
+++ b/test/tool_shed/base/twilltestcase.py
@@ -1,6 +1,7 @@
from base.twilltestcase import *
from galaxy.webapps.community.util.hgweb_config import *
from test_db_util import *
+import string
from galaxy import eggs
eggs.require('mercurial')
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/functional/test_0000_basic_repository_features.py
--- a/test/tool_shed/functional/test_0000_basic_repository_features.py
+++ b/test/tool_shed/functional/test_0000_basic_repository_features.py
@@ -6,10 +6,10 @@
admin_email = 'test(a)bx.psu.edu'
admin_username = 'admin-user'
-regular_user = None
-regular_user_private_role = None
-regular_email = 'test-1(a)bx.psu.edu'
-regular_username = 'user1'
+test_user_1 = None
+test_user_1_private_role = None
+test_user_1_email = 'test-1(a)bx.psu.edu'
+test_user_1_name = 'user1'
repository_name = 'filtering'
repository_description = "Galaxy's filtering tool"
@@ -19,10 +19,10 @@
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
- self.login( email=regular_email, username=regular_username )
- regular_user = get_user( regular_email )
- assert regular_user is not None, 'Problem retrieving user with email %s from the database' % regular_email
- regular_user_private_role = get_private_role( regular_user )
+ self.login( email=test_user_1_email, username=test_user_1_name )
+ test_user_1 = get_user( test_user_1_email )
+ assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % test_user_1_email
+ test_user_1_private_role = get_private_role( test_user_1 )
self.logout()
self.login( email=admin_email, username=admin_username )
admin_user = get_user( admin_email )
@@ -55,8 +55,8 @@
def test_0025_grant_write_access( self ):
'''Grant write access to another user'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
- self.grant_write_access( repository, usernames=[ regular_username ] )
- self.revoke_write_access( repository, regular_username )
+ self.grant_write_access( repository, usernames=[ test_user_1_name ] )
+ self.revoke_write_access( repository, test_user_1_name )
def test_0030_upload_filtering_1_1_0( self ):
"""Upload filtering_1.1.0.tar to the repository"""
repository = get_repository_by_name_and_owner( repository_name, admin_username )
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
--- a/test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
+++ b/test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
@@ -6,10 +6,10 @@
admin_email = 'test(a)bx.psu.edu'
admin_username = 'admin-user'
-regular_user = None
-regular_user_private_role = None
-regular_email = 'test-1(a)bx.psu.edu'
-regular_username = 'user1'
+test_user_1 = None
+test_user_1_private_role = None
+test_user_1_email = 'test-1(a)bx.psu.edu'
+test_user_1_name = 'user1'
repository_name = 'freebayes'
repository_description = "Galaxy's freebayes tool"
@@ -18,17 +18,21 @@
class TestFreebayesRepository( ShedTwillTestCase ):
'''Testing freebayes with tool data table entries, .loc files, and tool dependencies.'''
def test_0000_create_or_login_admin_user( self ):
+ """Create necessary user accounts and login as an admin user."""
self.logout()
self.login( email=admin_email, username=admin_username )
admin_user = get_user( admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
admin_user_private_role = get_private_role( admin_user )
+ def test_0005_create_categories( self ):
+ """Create categories"""
+ self.create_category( 'SNP Analysis', 'Tools for single nucleotide polymorphism data such as WGA' )
def test_0005_create_freebayes_repository_and_upload_tool_xml( self ):
'''Upload freebayes.xml without tool_data_table_conf.xml.sample. This should result in an error and invalid tool.'''
self.create_repository( repository_name,
repository_description,
repository_long_description=repository_long_description,
- categories=[ 'Text Manipulation' ],
+ categories=[ 'SNP Analysis' ],
strings_displayed=[] )
repository = get_repository_by_name_and_owner( repository_name, admin_username )
self.upload_file( repository,
@@ -36,7 +40,7 @@
valid_tools_only=False,
strings_displayed=[ 'Metadata was defined', 'This file requires an entry', 'tool_data_table_conf' ],
commit_message='Uploaded the tool xml.' )
- self.display_manage_repository_page( repository, strings_not_displayed=[ 'Valid tools' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Invalid tools' ], strings_not_displayed=[ 'Valid tools' ] )
tip = self.get_repository_tip( repository )
self.check_repository_invalid_tools_for_changeset_revision( repository,
tip,
@@ -50,6 +54,8 @@
commit_message='Uploaded the tool data table sample file.' )
def test_0015_upload_missing_sample_loc_file( self ):
'''Upload the missing sam_fa_indices.loc.sample file to the repository.'''
+ # Freebayes does not generate an error when the loc file is missing.
+ # TODO: Generate a test case for that situation.
repository = get_repository_by_name_and_owner( repository_name, admin_username )
self.upload_file( repository,
'freebayes/sam_fa_indices.loc.sample',
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/functional/test_0020_emboss_repository_dependencies.py
--- /dev/null
+++ b/test/tool_shed/functional/test_0020_emboss_repository_dependencies.py
@@ -0,0 +1,95 @@
+from tool_shed.base.twilltestcase import *
+from tool_shed.base.test_db_util import *
+
+admin_user = None
+admin_user_private_role = None
+admin_email = 'test(a)bx.psu.edu'
+admin_username = 'admin-user'
+
+test_user_1 = None
+test_user_1_private_role = None
+test_user_1_email = 'test-1(a)bx.psu.edu'
+test_user_1_name = 'user1'
+
+datatypes_repository_name = 'emboss_datatypes'
+datatypes_repository_description = "Galaxy applicable data formats used by Emboss tools."
+datatypes_repository_long_description = "Galaxy applicable data formats used by Emboss tools. This repository contains no tools."
+
+emboss_repository_name = 'emboss_5'
+emboss_repository_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
+emboss_repository_long_description = 'Galaxy wrappers for Emboss version 5.0.0 tools'
+
+new_repository_dependencies_xml = '''<?xml version="1.0"?>
+<repositories>
+ <repository toolshed="${toolshed_url}" name="${repository_name}" owner="${owner}" changeset_revision="${changeset_revision}" />
+</repositories>
+'''
+
+class TestEmbossRepositoryDependencies( ShedTwillTestCase ):
+ '''Testing emboss 5 with repository dependencies.'''
+ def test_0000_initiate_users( self ):
+ """Create necessary user accounts and login as an admin user."""
+ self.logout()
+ self.login( email=admin_email, username=admin_username )
+ admin_user = get_user( admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
+ admin_user_private_role = get_private_role( admin_user )
+ self.logout()
+ self.login( email=test_user_1_email, username=test_user_1_name )
+ test_user_1 = get_user( test_user_1_email )
+ assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % regular_email
+ test_user_1_private_role = get_private_role( test_user_1 )
+ def test_0005_create_categories( self ):
+ """Create categories"""
+ self.logout()
+ self.login( email=admin_email, username=admin_username )
+ self.create_category( 'Sequence Analysis', 'Tools for performing Protein and DNA/RNA analysis' )
+ def test_0010_create_emboss_datatypes_repository_and_upload_tarball( self ):
+ '''Create the emboss_datatypes repository and upload the tarball.'''
+ self.logout()
+ self.login( email=test_user_1_email, username=test_user_1_name )
+ self.create_repository( datatypes_repository_name,
+ datatypes_repository_description,
+ repository_long_description=datatypes_repository_long_description,
+ categories=[ 'Sequence Analysis' ],
+ strings_displayed=[] )
+ repository = get_repository_by_name_and_owner( datatypes_repository_name, test_user_1_name )
+ self.upload_file( repository,
+ 'emboss_5/datatypes_conf.xml',
+ commit_message='Uploaded datatypes_conf.xml.' )
+ def test_0015_verify_datatypes_in_datatypes_repository( self ):
+ '''Verify that the emboss_datatypes repository contains datatype entries.'''
+ repository = get_repository_by_name_and_owner( datatypes_repository_name, test_user_1_name )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Datatypes', 'equicktandem', 'hennig86', 'vectorstrip' ] )
+ def test_0020_generate_repository_dependencies_xml( self ):
+ '''Generate the repository_dependencies.xml file for the emboss_5 repository.'''
+ datatypes_repository = get_repository_by_name_and_owner( datatypes_repository_name, test_user_1_name )
+ changeset_revision = self.get_repository_tip( datatypes_repository )
+ template_parser = string.Template( new_repository_dependencies_xml )
+ repository_dependency_xml = template_parser.safe_substitute( toolshed_url=self.url,
+ owner=test_user_1_name,
+ repository_name=datatypes_repository.name,
+ changeset_revision=changeset_revision )
+ # Save the generated xml to test-data/emboss_5/repository_dependencies.xml.
+ file( self.get_filename( 'emboss_5/repository_dependencies.xml' ), 'w' ).write( repository_dependency_xml )
+ def test_0025_create_emboss_5_repository_and_upload_files( self ):
+ '''Create the emboss_5 repository and upload a tool tarball, then generate and upload repository_dependencies.xml.'''
+ self.create_repository( emboss_repository_name,
+ emboss_repository_description,
+ repository_long_description=emboss_repository_long_description,
+ categories=[ 'Text Manipulation' ],
+ strings_displayed=[] )
+ repository = get_repository_by_name_and_owner( emboss_repository_name, test_user_1_name )
+ self.upload_file( repository, 'emboss_5/emboss_5.tar', commit_message='Uploaded emboss_5.tar' )
+ self.upload_file( repository, 'emboss_5/repository_dependencies.xml', commit_message='Uploaded repository_dependencies.xml' )
+ def test_0030_verify_emboss_5_repository_dependency_on_emboss_datatypes( self ):
+ '''Verify that the emboss_5 repository now depends on the emboss_datatypes repository with correct name, owner, and changeset revision.'''
+ repository = get_repository_by_name_and_owner( emboss_repository_name, test_user_1_name )
+ datatypes_repository = get_repository_by_name_and_owner( datatypes_repository_name, test_user_1_name )
+ changeset_revision = self.get_repository_tip( datatypes_repository )
+ strings_displayed = [ datatypes_repository_name, test_user_1_name, changeset_revision, 'Repository dependencies' ]
+ self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
+ def test_0035_cleanup( self ):
+ '''Clean up generated test data.'''
+ if os.path.exists( self.get_filename( 'emboss_5/repository_dependencies.xml' ) ):
+ os.remove( self.get_filename( 'emboss_5/repository_dependencies.xml' ) )
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/test_data/emboss_5/datatypes_conf.xml
--- /dev/null
+++ b/test/tool_shed/test_data/emboss_5/datatypes_conf.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<datatypes>
+ <registration>
+ <datatype extension="acedb" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="asn1" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="btwisted" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cai" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="charge" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="checktrans" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="chips" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="clustal" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="codata" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="codcmp" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="coderet" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="compseq" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cpgplot" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cpgreport" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cusp" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="cut" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="dan" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="dbmotif" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="diffseq" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="digest" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="dreg" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="einverted" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="embl" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="epestfind" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="equicktandem" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="est2genome" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="etandem" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="excel" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="feattable" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="fitch" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="freak" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="fuzznuc" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="fuzzpro" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="fuzztran" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="garnier" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="gcg" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="geecee" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="genbank" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="helixturnhelix" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="hennig86" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="hmoment" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="ig" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="isochore" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="jackknifer" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="jackknifernon" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx0" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx1" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx10" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx2" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="markx3" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="match" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="mega" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="meganon" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="motif" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="msf" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="nametable" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="ncbi" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="needle" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="newcpgreport" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="newcpgseek" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="nexus" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="nexusnon" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="noreturn" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pair" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="palindrome" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pepcoil" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pepinfo" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pepstats" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="phylip" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="phylipnon" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="pir" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="polydot" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="preg" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="prettyseq" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="primersearch" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="regions" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="score" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="selex" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="seqtable" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="showfeat" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="showorf" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="simple" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="sixpack" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="srs" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="srspair" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="staden" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="strider" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="supermatcher" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="swiss" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="syco" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="table" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="tagseq" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="textsearch" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="vectorstrip" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="wobble" type="galaxy.datatypes.data:Text" subclass="True"/>
+ <datatype extension="wordcount" type="galaxy.datatypes.data:Text" subclass="True"/>
+ </registration>
+</datatypes>
diff -r 8510ddb2a507285890f95af5d2a8d88606d99d47 -r 8a3f874b8e0a20afb362b8c4e92989a6cf76509b test/tool_shed/test_data/emboss_5/emboss_5.tar
Binary file test/tool_shed/test_data/emboss_5/emboss_5.tar has changed
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: james_taylor: History: Restore underline to history item titles
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/8510ddb2a507/
changeset: 8510ddb2a507
user: james_taylor
date: 2012-12-03 19:22:18
summary: History: Restore underline to history item titles
affected #: 1 file
diff -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b -r 8510ddb2a507285890f95af5d2a8d88606d99d47 templates/root/alternate_history.mako
--- a/templates/root/alternate_history.mako
+++ b/templates/root/alternate_history.mako
@@ -454,7 +454,7 @@
}
.historyItemTitle {
- text-decoration: none;
+ text-decoration: underline;
cursor: pointer;
-webkit-user-select: none;
-moz-user-select: none;
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Tool shed functional tests for repository with tool dependencies.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/83e46d828ddc/
changeset: 83e46d828ddc
user: inithello
date: 2012-12-03 17:02:22
summary: Tool shed functional tests for repository with tool dependencies.
affected #: 6 files
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
--- a/test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
+++ b/test/tool_shed/functional/test_0010_repository_with_tool_dependencies.py
@@ -15,5 +15,62 @@
repository_description = "Galaxy's freebayes tool"
repository_long_description = "Long description of Galaxy's freebayes tool"
-class TestRepositoryWithToolDependencies( ShedTwillTestCase ):
- pass
\ No newline at end of file
+class TestFreebayesRepository( ShedTwillTestCase ):
+ '''Testing freebayes with tool data table entries, .loc files, and tool dependencies.'''
+ def test_0000_create_or_login_admin_user( self ):
+ self.logout()
+ self.login( email=admin_email, username=admin_username )
+ admin_user = get_user( admin_email )
+ assert admin_user is not None, 'Problem retrieving user with email %s from the database' % admin_email
+ admin_user_private_role = get_private_role( admin_user )
+ def test_0005_create_freebayes_repository_and_upload_tool_xml( self ):
+ '''Upload freebayes.xml without tool_data_table_conf.xml.sample. This should result in an error and invalid tool.'''
+ self.create_repository( repository_name,
+ repository_description,
+ repository_long_description=repository_long_description,
+ categories=[ 'Text Manipulation' ],
+ strings_displayed=[] )
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ 'freebayes/freebayes.xml',
+ valid_tools_only=False,
+ strings_displayed=[ 'Metadata was defined', 'This file requires an entry', 'tool_data_table_conf' ],
+ commit_message='Uploaded the tool xml.' )
+ self.display_manage_repository_page( repository, strings_not_displayed=[ 'Valid tools' ] )
+ tip = self.get_repository_tip( repository )
+ self.check_repository_invalid_tools_for_changeset_revision( repository,
+ tip,
+ strings_displayed=[ 'requires an entry', 'tool_data_table_conf.xml' ] )
+ def test_0010_upload_missing_tool_data_table_conf_file( self ):
+ '''Upload the missing tool_data_table_conf.xml.sample file to the repository.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ 'freebayes/tool_data_table_conf.xml.sample',
+ strings_displayed=[],
+ commit_message='Uploaded the tool data table sample file.' )
+ def test_0015_upload_missing_sample_loc_file( self ):
+ '''Upload the missing sam_fa_indices.loc.sample file to the repository.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ 'freebayes/sam_fa_indices.loc.sample',
+ strings_displayed=[],
+ commit_message='Uploaded tool data table .loc file.' )
+ def test_0020_upload_invalid_tool_dependency_xml( self ):
+ '''Upload tool_dependencies.xml defining version 0.9.5 of the freebayes package.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ os.path.join( 'freebayes', 'invalid_deps', 'tool_dependencies.xml' ),
+ strings_displayed=[ 'Name, version and type from a tool requirement tag does not match' ],
+ commit_message='Uploaded invalid tool dependency XML.' )
+ def test_0025_upload_valid_tool_dependency_xml( self ):
+ '''Upload tool_dependencies.xml defining version 0.9.4_9696d0ce8a962f7bb61c4791be5ce44312b81cf8 of the freebayes package.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.upload_file( repository,
+ os.path.join( 'freebayes', 'tool_dependencies.xml' ),
+ commit_message='Uploaded valid tool dependency XML.' )
+ def test_0030_verify_tool_dependencies( self ):
+ '''Verify that the uploaded tool_dependencies.xml specifies the correct package versions.'''
+ repository = get_repository_by_name_and_owner( repository_name, admin_username )
+ self.display_manage_repository_page( repository,
+ strings_displayed=[ 'freebayes', '0.9.4_9696d0ce8a9', 'samtools', '0.1.18', 'Valid tools' ],
+ strings_not_displayed=[ 'Invalid tools' ] )
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/freebayes.xml
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/freebayes.xml
@@ -0,0 +1,669 @@
+<?xml version="1.0"?>
+<tool id="freebayes" name="FreeBayes" version="0.0.2">
+ <requirements>
+ <requirement type="package" version="0.9.4_9696d0ce8a962f7bb61c4791be5ce44312b81cf8">freebayes</requirement>
+ <requirement type="package" version="0.1.18">samtools</requirement>
+ </requirements>
+ <description> - Bayesian genetic variant detector</description>
+ <command>
+ ##set up input files
+ #set $reference_fasta_filename = "localref.fa"
+ #if str( $reference_source.reference_source_selector ) == "history":
+ ln -s "${reference_source.ref_file}" "${reference_fasta_filename}" &&
+ samtools faidx "${reference_fasta_filename}" 2>&1 || echo "Error running samtools faidx for FreeBayes" >&2 &&
+ #else:
+ #set $reference_fasta_filename = str( $reference_source.ref_file.fields.path )
+ #end if
+ #for $bam_count, $input_bam in enumerate( $reference_source.input_bams ):
+ ln -s "${input_bam.input_bam}" "localbam_${bam_count}.bam" &&
+ ln -s "${input_bam.input_bam.metadata.bam_index}" "localbam_${bam_count}.bam.bai" &&
+ #end for
+ ##finished setting up inputs
+
+ ##start FreeBayes commandline
+ freebayes
+ #for $bam_count, $input_bam in enumerate( $reference_source.input_bams ):
+ --bam "localbam_${bam_count}.bam"
+ #end for
+ --fasta-reference "${reference_fasta_filename}"
+
+ ##outputs
+ --vcf "${output_vcf}"
+
+ ##advanced options
+ #if str( $options_type.options_type_selector ) == "advanced":
+ ##additional outputs
+ #if $options_type.output_trace_option:
+ --trace "${output_trace}"
+ #end if
+ #if $options_type.output_failed_alleles_option:
+ --failed-alleles "${output_failed_alleles_bed}"
+ #end if
+
+ ##additional inputs
+ #if str( $options_type.target_limit_type.target_limit_type_selector ) == "limit_by_target_file":
+ --targets "${options_type.target_limit_type.input_target_bed}"
+ #elif str( $options_type.target_limit_type.target_limit_type_selector ) == "limit_by_region":
+ --region "${options_type.target_limit_type.region_chromosome}:${options_type.target_limit_type.region_start}..${options_type.target_limit_type.region_end}"
+ #end if
+ #if $options_type.input_sample_file:
+ --samples "${options_type.input_sample_file}"
+ #end if
+ #if $options_type.input_populations_file:
+ --populations "${options_type.input_populations_file}"
+ #end if
+ #if $options_type.input_cnv_map_bed:
+ --cnv-map "${options_type.input_cnv_map_bed}"
+ #end if
+ #if str( $options_type.input_variant_type.input_variant_type_selector ) == "provide_vcf":
+ --variant-input "${options_type.input_variant_type.input_variant_vcf}"
+ ${options_type.input_variant_type.only_use_input_alleles}
+ #end if
+
+ ##reporting
+ #if str( $options_type.section_reporting_type.section_reporting_type_selector ) == "set":
+ --pvar "${options_type.section_reporting_type.pvar}"
+ ${options_type.section_reporting_type.show_reference_repeats}
+ #end if
+
+ ##population model
+ #if str( $options_type.section_population_model_type.section_population_model_type_selector ) == "set":
+ --theta "${options_type.section_population_model_type.theta}"
+ --ploidy "${options_type.section_population_model_type.ploidy}"
+ ${options_type.section_population_model_type.pooled}
+ #end if
+
+ ##reference allele
+ #if str( $options_type.use_reference_allele_type.use_reference_allele_type_selector ) == "include_reference_allele":
+ --use-reference-allele
+ ${options_type.use_reference_allele_type.diploid_reference}
+ --reference-quality "${options_type.use_reference_allele_type.reference_quality_mq},${options_type.use_reference_allele_type.reference_quality_bq}"
+ #end if
+
+ ##allele scope
+ #if str( $options_type.section_allele_scope_type.section_allele_scope_type_selector ) == "set":
+ ${options_type.section_allele_scope_type.no_snps}
+ ${options_type.section_allele_scope_type.no_indels}
+ ${options_type.section_allele_scope_type.no_mnps}
+ ${options_type.section_allele_scope_type.no_complex}
+ --use-best-n-alleles "${options_type.section_allele_scope_type.use_best_n_alleles}"
+ #if $options_type.section_allele_scope_type.max_complex_gap:
+ --max-complex-gap "${options_type.section_allele_scope_type.max_complex_gap}"
+ #end if
+ #end if
+
+ ##indel realignment
+ ${options_type.left_align_indels}
+
+ ##input filters
+ #if str( $options_type.section_input_filters_type.section_input_filters_type_selector ) == "set":
+ ${options_type.section_input_filters_type.use_duplicate_reads}
+ #if str( $options_type.section_input_filters_type.no_filter_type.no_filter_type_selector ) == "apply_filters":
+ --min-mapping-quality "${options_type.section_input_filters_type.no_filter_type.min_mapping_quality}"
+ --min-base-quality "${options_type.section_input_filters_type.no_filter_type.min_base_quality}"
+ --min-supporting-quality "${options_type.section_input_filters_type.no_filter_type.min_supporting_quality_mq},${options_type.section_input_filters_type.no_filter_type.min_supporting_quality_bq}"
+ #else:
+ --no-filters
+ #end if
+ --mismatch-base-quality-threshold "${options_type.section_input_filters_type.mismatch_base_quality_threshold}"
+ #if $options_type.section_input_filters_type.read_mismatch_limit:
+ --read-mismatch-limit "${options_type.section_input_filters_type.read_mismatch_limit}"
+ #end if
+ --read-max-mismatch-fraction "${options_type.section_input_filters_type.read_max_mismatch_fraction}"
+ #if $options_type.section_input_filters_type.read_snp_limit:
+ --read-snp-limit "${options_type.section_input_filters_type.read_snp_limit}"
+ #end if
+ #if $options_type.section_input_filters_type.read_indel_limit:
+ --read-indel-limit "${options_type.section_input_filters_type.read_indel_limit}"
+ #end if
+ --indel-exclusion-window "${options_type.section_input_filters_type.indel_exclusion_window}"
+ --min-alternate-fraction "${options_type.section_input_filters_type.min_alternate_fraction}"
+ --min-alternate-count "${options_type.section_input_filters_type.min_alternate_count}"
+ --min-alternate-qsum "${options_type.section_input_filters_type.min_alternate_qsum}"
+ --min-alternate-total "${options_type.section_input_filters_type.min_alternate_total}"
+ --min-coverage "${options_type.section_input_filters_type.min_coverage}"
+ #end if
+
+ ##bayesian priors
+ #if str( $options_type.section_bayesian_priors_type.section_bayesian_priors_type_selector ) == "set":
+ ${options_type.section_bayesian_priors_type.no_ewens_priors}
+ ${options_type.section_bayesian_priors_type.no_population_priors}
+ ${options_type.section_bayesian_priors_type.hwe_priors}
+ #end if
+
+ ##observation prior expectations
+ #if str( $options_type.section_observation_prior_expectations_type.section_observation_prior_expectations_type_selector ) == "set":
+ ${options_type.section_observation_prior_expectations_type.binomial_obs_priors}
+ ${options_type.section_observation_prior_expectations_type.allele_balance_priors}
+ #end if
+
+ ##algorithmic features
+ #if str( $options_type.section_algorithmic_features_type.section_algorithmic_features_type_selector ) == "set":
+ --site-selection-max-iterations "${options_type.section_algorithmic_features_type.site_selection_max_iterations}"
+ --genotyping-max-iterations "${options_type.section_algorithmic_features_type.genotyping_max_iterations}"
+ --genotyping-max-banddepth "${options_type.section_algorithmic_features_type.genotyping_max_banddepth}"
+ --posterior-integration-limits "${options_type.section_algorithmic_features_type.posterior_integration_limits_n},${options_type.section_algorithmic_features_type.posterior_integration_limits_m}"
+ ${options_type.section_algorithmic_features_type.no_permute}
+ ${options_type.section_algorithmic_features_type.exclude_unobserved_genotypes}
+ #if $options_type.section_algorithmic_features_type.genotype_variant_threshold:
+ --genotype-variant-threshold "${options_type.section_algorithmic_features_type.genotype_variant_threshold}"
+ #end if
+ ${options_type.section_algorithmic_features_type.use_mapping_quality}
+ --read-dependence-factor "${options_type.section_algorithmic_features_type.read_dependence_factor}"
+ ${options_type.section_algorithmic_features_type.no_marginals}
+ #end if
+
+ #end if
+ </command>
+ <inputs>
+ <conditional name="reference_source">
+ <param name="reference_source_selector" type="select" label="Choose the source for the reference list">
+ <option value="cached">Locally cached</option>
+ <option value="history">History</option>
+ </param>
+ <when value="cached">
+ <repeat name="input_bams" title="Sample BAM file" min="1">
+ <param name="input_bam" type="data" format="bam" label="BAM file">
+ <validator type="unspecified_build" />
+ </param>
+ </repeat>
+ <param name="ref_file" type="select" label="Using reference genome">
+ <options from_data_table="sam_fa_indexes">
+ <!-- <filter type="sam_fa_indexes" key="dbkey" ref="input_bam" column="value"/> does not yet work in a repeat...-->
+ </options>
+ <validator type="no_options" message="A built-in reference genome is not available for the build associated with the selected input file"/>
+ </param>
+ </when>
+ <when value="history"><!-- FIX ME!!!! -->
+ <repeat name="input_bams" title="Sample BAM file" min="1">
+ <param name="input_bam" type="data" format="bam" label="BAM file" />
+ </repeat>
+ <param name="ref_file" type="data" format="fasta" label="Using reference file" />
+ </when>
+ </conditional>
+
+ <conditional name="options_type">
+ <param name="options_type_selector" type="select" label="Basic or Advanced options">
+ <option value="basic" selected="True">Basic</option>
+ <option value="advanced">Advanced</option>
+ </param>
+ <when value="basic">
+ <!-- Do nothing here -->
+ </when>
+ <when value="advanced">
+
+ <!-- output -->
+ <param name="output_failed_alleles_option" type="boolean" truevalue="--failed-alleles" falsevalue="" checked="False" label="Write out failed alleles file" />
+ <param name="output_trace_option" type="boolean" truevalue="--trace" falsevalue="" checked="False" label="Write out algorithm trace file" />
+
+
+ <!-- input -->
+ <conditional name="target_limit_type">
+ <param name="target_limit_type_selector" type="select" label="Limit analysis to listed targets">
+ <option value="do_not_limit" selected="True">Do not limit</option>
+ <option value="limit_by_target_file">Limit by target file</option>
+ <option value="limit_by_region">Limit to region</option>
+ </param>
+ <when value="do_not_limit">
+ <!-- Do nothing here -->
+ </when>
+ <when value="limit_by_target_file">
+ <param name="input_target_bed" type="data" format="bed" label="Limit analysis to targets listed in the BED-format FILE." />
+ </when>
+ <when value="limit_by_region">
+ <param name="region_chromosome" type="text" label="Region Chromosome" value="" /><!--only once? -->
+ <param name="region_start" type="integer" label="Region Start" value="" />
+ <param name="region_end" type="integer" label="Region End" value="" />
+ </when>
+ </conditional>
+ <param name="input_sample_file" type="data" format="txt" label="Limit analysis to samples listed (one per line) in the FILE" optional="True" />
+ <param name="input_populations_file" type="data" format="txt" label="Populations File" optional="True" />
+ <param name="input_cnv_map_bed" type="data" format="bed" label="Read a copy number map from the BED file FILE" optional="True" />
+ <conditional name="input_variant_type">
+ <param name="input_variant_type_selector" type="select" label="Provide variants file">
+ <option value="do_not_provide" selected="True">Do not provide</option>
+ <option value="provide_vcf">Provide VCF file</option>
+ </param>
+ <when value="do_not_provide">
+ <!-- Do nothing here -->
+ </when>
+ <when value="provide_vcf">
+ <param name="input_variant_vcf" type="data" format="vcf" label="Use variants reported in VCF file as input to the algorithm" />
+ <param name="only_use_input_alleles" type="boolean" truevalue="--only-use-input-alleles" falsevalue="" checked="False" label="Only provide variant calls and genotype likelihoods for sites in VCF" />
+ </when>
+ </conditional>
+
+
+ <!-- reporting -->
+ <conditional name="section_reporting_type">
+ <param name="section_reporting_type_selector" type="select" label="Set Reporting options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="pvar" type="float" label="Report sites if the probability that there is a polymorphism at the site is greater" value="0.0001" />
+ <param name="show_reference_repeats" type="boolean" truevalue="--show-reference-repeats" falsevalue="" checked="False" label="Calculate and show information about reference repeats" />
+ </when>
+ </conditional>
+
+
+ <!-- population model -->
+ <conditional name="section_population_model_type">
+ <param name="section_population_model_type_selector" type="select" label="Set population model options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="theta" type="float" label="expected mutation rate or pairwise nucleotide diversity among the population" value="0.001" help="This serves as the single parameter to the Ewens Sampling Formula prior model"/>
+ <param name="ploidy" type="integer" label="default ploidy for the analysis" value="2" />
+ <param name="pooled" type="boolean" truevalue="--pooled" falsevalue="" checked="False" label="Assume that samples result from pooled sequencing" help="When using this flag, set --ploidy to the number of alleles in each sample." />
+ </when>
+ </conditional>
+
+ <!-- reference allele -->
+ <conditional name="use_reference_allele_type">
+ <param name="use_reference_allele_type_selector" type="select" label="Include the reference allele in the analysis">
+ <option value="do_not_include_reference_allele" selected="True">Do not include</option>
+ <option value="include_reference_allele">Include</option>
+ </param>
+ <when value="do_not_include_reference_allele">
+ <!-- Do nothing here -->
+ </when>
+ <when value="include_reference_allele">
+ <param name="diploid_reference" type="boolean" truevalue="--diploid-reference" falsevalue="" checked="False" label="Treat reference as diploid" />
+ <param name="reference_quality_mq" type="integer" label="Assign mapping quality" value="100" />
+ <param name="reference_quality_bq" type="integer" label="Assign base quality" value="60" />
+ </when>
+ </conditional>
+
+ <!-- allele scope -->
+ <conditional name="section_allele_scope_type">
+ <param name="section_allele_scope_type_selector" type="select" label="Set allele scope options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="no_snps" type="boolean" truevalue="--no-snps" falsevalue="" checked="False" label="Ignore SNP alleles" />
+ <param name="no_indels" type="boolean" truevalue="--no-indels" falsevalue="" checked="False" label="Ignore insertion and deletion alleles" />
+ <param name="no_mnps" type="boolean" truevalue="--no-mnps" falsevalue="" checked="False" label="Ignore multi-nuceotide polymorphisms, MNPs" />
+ <param name="no_complex" type="boolean" truevalue="--no-complex" falsevalue="" checked="False" label="Ignore complex events (composites of other classes)" />
+ <param name="use_best_n_alleles" type="integer" label="Evaluate only the best N SNP alleles" value="0" min="0" help="Ranked by sum of supporting quality scores; Set to 0 to use all" />
+ <param name="max_complex_gap" type="integer" label="Allow complex alleles with contiguous embedded matches of up to this length" value="" optional="True"/>
+ </when>
+ </conditional>
+
+ <!-- indel realignment -->
+ <param name="left_align_indels" type="boolean" truevalue="--left-align-indels" falsevalue="" checked="False" label="Left-realign and merge gaps embedded in reads" />
+
+ <!-- input filters -->
+ <conditional name="section_input_filters_type">
+ <param name="section_input_filters_type_selector" type="select" label="Set input filters options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="use_duplicate_reads" type="boolean" truevalue="--use-duplicate-reads" falsevalue="" checked="False" label="Include duplicate-marked alignments in the analysis" />
+ <conditional name="no_filter_type">
+ <param name="no_filter_type_selector" type="select" label="Apply filters">
+ <option value="apply_filters" selected="True">Apply</option>
+ <option value="no_filters">Do not apply</option>
+ </param>
+ <when value="no_filters">
+ <!-- Do nothing here --><!-- no-filters -->
+ </when>
+ <when value="apply_filters">
+ <param name="min_mapping_quality" type="integer" label="Exclude alignments from analysis if they have a mapping quality less than" value="30" />
+ <param name="min_base_quality" type="integer" label="Exclude alleles from analysis if their supporting base quality less than" value="20" />
+ <param name="min_supporting_quality_mq" type="integer" label="In order to consider an alternate allele, at least one supporting alignment must have mapping quality" value="0" />
+ <param name="min_supporting_quality_bq" type="integer" label="In order to consider an alternate allele, at least one supporting alignment must have base quality" value="0" />
+ </when>
+ </conditional>
+ <param name="mismatch_base_quality_threshold" type="integer" label="Count mismatches toward read-mismatch-limit if the base quality of the mismatch is >=" value="10" />
+ <param name="read_mismatch_limit" type="integer" label="Exclude reads with more than N mismatches where each mismatch has base quality >= mismatch-base-quality-threshold" value="" optional="True" />
+ <param name="read_max_mismatch_fraction" type="float" label="Exclude reads with more than N [0,1] fraction of mismatches where each mismatch has base quality >= mismatch-base-quality-threshold" value="1.0" />
+ <param name="read_snp_limit" type="integer" label="Exclude reads with more than N base mismatches, ignoring gaps with quality >= mismatch-base-quality-threshold" value="" optional="True" />
+ <param name="read_indel_limit" type="integer" label="Exclude reads with more than N separate gaps" value="" optional="True" />
+ <param name="indel_exclusion_window" type="integer" label="Ignore portions of alignments this many bases from a putative insertion or deletion allele" value="0" />
+ <param name="min_alternate_fraction" type="float" label="Require at least this fraction of observations supporting an alternate allele within a single individual in the in order to evaluate the position" value="0" />
+ <param name="min_alternate_count" type="integer" label="Require at least this count of observations supporting an alternate allele within a single individual in order to evaluate the position" value="1" />
+ <param name="min_alternate_qsum" type="integer" label="Require at least this sum of quality of observations supporting an alternate allele within a single individual in order to evaluate the position" value="0" />
+ <param name="min_alternate_total" type="integer" label="Require at least this count of observations supporting an alternate allele within the total population in order to use the allele in analysis" value="1" />
+ <param name="min_coverage" type="integer" label="Require at least this coverage to process a site" value="0" />
+ </when>
+ </conditional>
+
+
+ <!-- bayesian priors -->
+ <conditional name="section_bayesian_priors_type">
+ <param name="section_bayesian_priors_type_selector" type="select" label="Set bayesian priors options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="no_ewens_priors" type="boolean" truevalue="--no-ewens-priors" falsevalue="" checked="False" label="Turns off the Ewens' Sampling Formula component of the priors" />
+ <param name="no_population_priors" type="boolean" truevalue="--no-population-priors" falsevalue="" checked="False" label="No population priors" help="Equivalent to --pooled --no-ewens-priors" />
+ <param name="hwe_priors" type="boolean" truevalue="--hwe-priors" falsevalue="" checked="False" label="Use the probability of the combination arising under HWE given the allele frequency as estimated by observation frequency" />
+ </when>
+ </conditional>
+
+ <!-- observation prior expectations -->
+ <conditional name="section_observation_prior_expectations_type">
+ <param name="section_observation_prior_expectations_type_selector" type="select" label="Set observation prior expectations options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="binomial_obs_priors" type="boolean" truevalue="--binomial-obs-priors" falsevalue="" checked="False" label="Incorporate expectations about osbervations into the priors, Uses read placement probability, strand balance probability, and read position (5'-3') probability" />
+ <param name="allele_balance_priors" type="boolean" truevalue="--allele-balance-priors" falsevalue="" checked="False" label="Use aggregate probability of observation balance between alleles as a component of the priors. Best for observations with minimal inherent reference bias" />
+ </when>
+ </conditional>
+
+
+ <!-- algorithmic features -->
+ <conditional name="section_algorithmic_features_type">
+ <param name="section_algorithmic_features_type_selector" type="select" label="Set algorithmic features options">
+ <option value="do_not_set" selected="True">Do not set</option>
+ <option value="set">Set</option>
+ </param>
+ <when value="do_not_set">
+ <!-- do nothing here -->
+ </when>
+ <when value="set">
+ <param name="site_selection_max_iterations" type="integer" label="Uses hill-climbing algorithm to search posterior space for N iterations to determine if the site should be evaluated." value="5" help="Set to 0 to prevent use of this algorithm for site selection, and to a low integer for improvide site selection at a slight performance penalty" />
+ <param name="genotyping_max_iterations" type="integer" label="Iterate no more than N times during genotyping step" value="25" />
+ <param name="genotyping_max_banddepth" type="integer" label="Integrate no deeper than the Nth best genotype by likelihood when genotyping" value="6" />
+ <param name="posterior_integration_limits_n" type="integer" label="Posteriror integration limit N" help="Integrate all genotype combinations in our posterior space which include no more than N samples with their Mth best data likelihood." value="1" />
+ <param name="posterior_integration_limits_m" type="integer" label="Posteriror integration limit M" help="Integrate all genotype combinations in our posterior space which include no more than N samples with their Mth best data likelihood." value="3" />
+ <param name="no_permute" type="boolean" truevalue="--no-permute" falsevalue="" checked="False" label="Do not scale prior probability of genotype combination given allele frequency by the number of permutations of included genotypes" />
+ <param name="exclude_unobserved_genotypes" type="boolean" truevalue="--exclude-unobserved-genotypes" falsevalue="" checked="False" label="Skip sample genotypings for which the sample has no supporting reads" />
+ <param name="genotype_variant_threshold" type="integer" label="Limit posterior integration to samples where the second-best genotype likelihood is no more than log(N) from the highest genotype likelihood for the sample" value="" optional="True" />
+ <param name="use_mapping_quality" type="boolean" truevalue="--use-mapping-quality" falsevalue="" checked="False" label="Use mapping quality of alleles when calculating data likelihoods" />
+ <param name="read_dependence_factor" type="float" label="Incorporate non-independence of reads by scaling successive observations by this factor during data likelihood calculations" value="0.9" />
+ <param name="no_marginals" type="boolean" truevalue="--no-marginals" falsevalue="" checked="False" label="Do not calculate the marginal probability of genotypes. Saves time and improves scaling performance in large populations" />
+ </when>
+ </conditional>
+
+
+ </when>
+ </conditional>
+
+ </inputs>
+ <outputs>
+ <data format="vcf" name="output_vcf" label="${tool.name} on ${on_string} (variants)" />
+ <data format="bed" name="output_failed_alleles_bed" label="${tool.name} on ${on_string} (failed alleles)">
+ <filter>options_type['options_type_selector'] == "advanced" and options_type['output_failed_alleles_option'] is True</filter>
+ </data>
+ <data format="txt" name="output_trace" label="${tool.name} on ${on_string} (trace)">
+ <filter>options_type['options_type_selector'] == "advanced" and options_type['output_trace_option'] is True</filter>
+ </data>
+ </outputs>
+ <tests>
+ <test>
+ <param name="reference_source_selector" value="history" />
+ <param name="ref_file" ftype="fasta" value="phiX.fasta"/>
+ <param name="input_bam" ftype="bam" value="gatk/fake_phiX_reads_1.bam"/>
+ <param name="options_type_selector" value="basic"/>
+ <output name="output_vcf" file="variant_detection/freebayes/freebayes_out_1.vcf.contains" compare="contains"/>
+ <!-- <output name="output_failed_alleles_bed" file="empty_file.dat" />
+ <output name="output_trace" file="variant_detection/freebayes/freebayes_out_1.output_trace" /> -->
+ </test>
+ </tests>
+ <help>
+**What it does**
+
+This tool uses FreeBayes to call SNPS given a reference sequence and a BAM alignment file.
+
+FreeBayes is a high-performance, flexible, and open-source Bayesian genetic variant detector. It operates on BAM alignment files, which are produced by most contemporary short-read aligners.
+
+In addition to substantial performance improvements over its predecessors (PolyBayes, GigaBayes, and BamBayes), it expands the scope of SNP and small-indel variant calling to populations of individuals with heterogeneous copy number. FreeBayes is currently under active development.
+
+Go `here <http://bioinformatics.bc.edu/marthlab/FreeBayes>`_ for details on FreeBayes.
+
+------
+
+**Inputs**
+
+FreeBayes accepts an input aligned BAM file.
+
+
+**Outputs**
+
+The output is in the VCF format.
+
+-------
+
+**Settings**::
+
+ input and output:
+
+ -b --bam FILE Add FILE to the set of BAM files to be analyzed.
+ -c --stdin Read BAM input on stdin.
+ -v --vcf FILE Output VCF-format results to FILE.
+ -f --fasta-reference FILE
+ Use FILE as the reference sequence for analysis.
+ An index file (FILE.fai) will be created if none exists.
+ If neither --targets nor --region are specified, FreeBayes
+ will analyze every position in this reference.
+ -t --targets FILE
+ Limit analysis to targets listed in the BED-format FILE.
+ -r --region <chrom>:<start_position>..<end_position>
+ Limit analysis to the specified region, 0-base coordinates,
+ end_position not included (same as BED format).
+ -s --samples FILE
+ Limit analysis to samples listed (one per line) in the FILE.
+ By default FreeBayes will analyze all samples in its input
+ BAM files.
+ --populations FILE
+ Each line of FILE should list a sample and a population which
+ it is part of. The population-based bayesian inference model
+ will then be partitioned on the basis of the populations.
+ -A --cnv-map FILE
+ Read a copy number map from the BED file FILE, which has
+ the format:
+ reference sequence, start, end, sample name, copy number
+ ... for each region in each sample which does not have the
+ default copy number as set by --ploidy.
+ -L --trace FILE Output an algorithmic trace to FILE.
+ --failed-alleles FILE
+ Write a BED file of the analyzed positions which do not
+ pass --pvar to FILE.
+ -@ --variant-input VCF
+ Use variants reported in VCF file as input to the algorithm.
+ A report will be generated for every record in the VCF file.
+ -l --only-use-input-alleles
+ Only provide variant calls and genotype likelihoods for sites
+ and alleles which are provided in the VCF input, and provide
+ output in the VCF for all input alleles, not just those which
+ have support in the data.
+
+ reporting:
+
+ -P --pvar N Report sites if the probability that there is a polymorphism
+ at the site is greater than N. default: 0.0001
+ -_ --show-reference-repeats
+ Calculate and show information about reference repeats in
+ the VCF output.
+
+ population model:
+
+ -T --theta N The expected mutation rate or pairwise nucleotide diversity
+ among the population under analysis. This serves as the
+ single parameter to the Ewens Sampling Formula prior model
+ default: 0.001
+ -p --ploidy N Sets the default ploidy for the analysis to N. default: 2
+ -J --pooled Assume that samples result from pooled sequencing.
+ When using this flag, set --ploidy to the number of
+ alleles in each sample.
+
+ reference allele:
+
+ -Z --use-reference-allele
+ This flag includes the reference allele in the analysis as
+ if it is another sample from the same population.
+ -H --diploid-reference
+ If using the reference sequence as a sample (-Z),
+ treat it as diploid. default: false (reference is haploid)
+ --reference-quality MQ,BQ
+ Assign mapping quality of MQ to the reference allele at each
+ site and base quality of BQ. default: 100,60
+
+ allele scope:
+
+ -I --no-snps Ignore SNP alleles.
+ -i --no-indels Ignore insertion and deletion alleles.
+ -X --no-mnps Ignore multi-nuceotide polymorphisms, MNPs.
+ -u --no-complex Ignore complex events (composites of other classes).
+ -n --use-best-n-alleles N
+ Evaluate only the best N SNP alleles, ranked by sum of
+ supporting quality scores. (Set to 0 to use all; default: all)
+ -E --max-complex-gap N
+ Allow complex alleles with contiguous embedded matches of up
+ to this length.
+
+ indel realignment:
+
+ -O --left-align-indels
+ Left-realign and merge gaps embedded in reads. default: false
+
+ input filters:
+
+ -4 --use-duplicate-reads
+ Include duplicate-marked alignments in the analysis.
+ default: exclude duplicates
+ -m --min-mapping-quality Q
+ Exclude alignments from analysis if they have a mapping
+ quality less than Q. default: 30
+ -q --min-base-quality Q
+ Exclude alleles from analysis if their supporting base
+ quality is less than Q. default: 20
+ -R --min-supporting-quality MQ,BQ
+ In order to consider an alternate allele, at least one supporting
+ alignment must have mapping quality MQ, and one supporting
+ allele must have base quality BQ. default: 0,0, unset
+ -Q --mismatch-base-quality-threshold Q
+ Count mismatches toward --read-mismatch-limit if the base
+ quality of the mismatch is >= Q. default: 10
+ -U --read-mismatch-limit N
+ Exclude reads with more than N mismatches where each mismatch
+ has base quality >= mismatch-base-quality-threshold.
+ default: ~unbounded
+ -z --read-max-mismatch-fraction N
+ Exclude reads with more than N [0,1] fraction of mismatches where
+ each mismatch has base quality >= mismatch-base-quality-threshold
+ default: 1.0
+ -$ --read-snp-limit N
+ Exclude reads with more than N base mismatches, ignoring gaps
+ with quality >= mismatch-base-quality-threshold.
+ default: ~unbounded
+ -e --read-indel-limit N
+ Exclude reads with more than N separate gaps.
+ default: ~unbounded
+ -0 --no-filters Do not use any input base and mapping quality filters
+ Equivalent to -m 0 -q 0 -R 0 -S 0
+ -x --indel-exclusion-window
+ Ignore portions of alignments this many bases from a
+ putative insertion or deletion allele. default: 0
+ -F --min-alternate-fraction N
+ Require at least this fraction of observations supporting
+ an alternate allele within a single individual in the
+ in order to evaluate the position. default: 0.0
+ -C --min-alternate-count N
+ Require at least this count of observations supporting
+ an alternate allele within a single individual in order
+ to evaluate the position. default: 1
+ -3 --min-alternate-qsum N
+ Require at least this sum of quality of observations supporting
+ an alternate allele within a single individual in order
+ to evaluate the position. default: 0
+ -G --min-alternate-total N
+ Require at least this count of observations supporting
+ an alternate allele within the total population in order
+ to use the allele in analysis. default: 1
+ -! --min-coverage N
+ Require at least this coverage to process a site. default: 0
+
+ bayesian priors:
+
+ -Y --no-ewens-priors
+ Turns off the Ewens' Sampling Formula component of the priors.
+ -k --no-population-priors
+ Equivalent to --pooled --no-ewens-priors
+ -w --hwe-priors Use the probability of the combination arising under HWE given
+ the allele frequency as estimated by observation frequency.
+
+ observation prior expectations:
+
+ -V --binomial-obs-priors
+ Incorporate expectations about osbervations into the priors,
+ Uses read placement probability, strand balance probability,
+ and read position (5'-3') probability.
+ -a --allele-balance-priors
+ Use aggregate probability of observation balance between alleles
+ as a component of the priors. Best for observations with minimal
+ inherent reference bias.
+
+ algorithmic features:
+
+ -M --site-selection-max-iterations N
+ Uses hill-climbing algorithm to search posterior space for N
+ iterations to determine if the site should be evaluated. Set to 0
+ to prevent use of this algorithm for site selection, and
+ to a low integer for improvide site selection at a slight
+ performance penalty. default: 5.
+ -B --genotyping-max-iterations N
+ Iterate no more than N times during genotyping step. default: 25.
+ --genotyping-max-banddepth N
+ Integrate no deeper than the Nth best genotype by likelihood when
+ genotyping. default: 6.
+ -W --posterior-integration-limits N,M
+ Integrate all genotype combinations in our posterior space
+ which include no more than N samples with their Mth best
+ data likelihood. default: 1,3.
+ -K --no-permute
+ Do not scale prior probability of genotype combination given allele
+ frequency by the number of permutations of included genotypes.
+ -N --exclude-unobserved-genotypes
+ Skip sample genotypings for which the sample has no supporting reads.
+ -S --genotype-variant-threshold N
+ Limit posterior integration to samples where the second-best
+ genotype likelihood is no more than log(N) from the highest
+ genotype likelihood for the sample. default: ~unbounded
+ -j --use-mapping-quality
+ Use mapping quality of alleles when calculating data likelihoods.
+ -D --read-dependence-factor N
+ Incorporate non-independence of reads by scaling successive
+ observations by this factor during data likelihood
+ calculations. default: 0.9
+ -= --no-marginals
+ Do not calculate the marginal probability of genotypes. Saves
+ time and improves scaling performance in large populations.
+
+
+------
+
+**Citation**
+
+For the underlying tool, please cite `FreeBayes <http://bioinformatics.bc.edu/marthlab/FreeBayes>`_.
+
+If you use this tool in Galaxy, please cite Blankenberg D, et al. *In preparation.*
+
+ </help>
+</tool>
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/invalid_deps/tool_dependencies.xml
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/invalid_deps/tool_dependencies.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<tool_dependency>
+ <package name="freebayes" version="0.9.5">
+ <install version="1.0">
+ <actions>
+ <action type="shell_command">git clone --recursive git://github.com/ekg/freebayes.git</action>
+ <action type="shell_command">git reset --hard 9696d0ce8a962f7bb61c4791be5ce44312b81cf8</action>
+ <action type="shell_command">make</action>
+ <action type="move_directory_files">
+ <source_directory>bin</source_directory>
+ <destination_directory>$INSTALL_DIR/bin</destination_directory>
+ </action>
+ <action type="set_environment">
+ <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
+ </action>
+ </actions>
+ </install>
+ <readme>
+FreeBayes requires g++ and the standard C and C++ development libraries.
+Additionally, cmake is required for building the BamTools API.
+ </readme>
+ </package>
+ <package name="samtools" version="0.2.15">
+ <install version="1.0">
+ <actions>
+ <action type="download_by_url">http://sourceforge.net/projects/samtools/files/samtools/0.1.18/samtools-0.1…</action>
+ <action type="shell_command">sed -i .bak -e 's/-lcurses/-lncurses/g' Makefile</action>
+ <action type="shell_command">make</action>
+ <action type="move_file">
+ <source>samtools</source>
+ <destination>$INSTALL_DIR/bin</destination>
+ </action>
+ <action type="move_file">
+ <source>misc/maq2sam-long</source>
+ <destination>$INSTALL_DIR/bin</destination>
+ </action>
+ <action type="set_environment">
+ <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
+ </action>
+ </actions>
+ </install>
+ <readme>
+Compiling SAMtools requires the ncurses and zlib development libraries.
+ </readme>
+ </package>
+</tool_dependency>
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/sam_fa_indices.loc.sample
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/sam_fa_indices.loc.sample
@@ -0,0 +1,28 @@
+#This is a sample file distributed with Galaxy that enables tools
+#to use a directory of Samtools indexed sequences data files. You will need
+#to create these data files and then create a sam_fa_indices.loc file
+#similar to this one (store it in this directory) that points to
+#the directories in which those files are stored. The sam_fa_indices.loc
+#file has this format (white space characters are TAB characters):
+#
+#index <seq><location>
+#
+#So, for example, if you had hg18 indexed stored in
+#/depot/data2/galaxy/sam/,
+#then the sam_fa_indices.loc entry would look like this:
+#
+#index hg18 /depot/data2/galaxy/sam/hg18.fa
+#
+#and your /depot/data2/galaxy/sam/ directory
+#would contain hg18.fa and hg18.fa.fai files:
+#
+#-rw-r--r-- 1 james universe 830134 2005-09-13 10:12 hg18.fa
+#-rw-r--r-- 1 james universe 527388 2005-09-13 10:12 hg18.fa.fai
+#
+#Your sam_fa_indices.loc file should include an entry per line for
+#each index set you have stored. The file in the path does actually
+#exist, but it should never be directly used. Instead, the name serves
+#as a prefix for the index file. For example:
+#
+#index hg18 /depot/data2/galaxy/sam/hg18.fa
+#index hg19 /depot/data2/galaxy/sam/hg19.fa
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/tool_data_table_conf.xml.sample
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/tool_data_table_conf.xml.sample
@@ -0,0 +1,8 @@
+<!-- Use the file tool_data_table_conf.xml.oldlocstyle if you don't want to update your loc files as changed in revision 4550:535d276c92bc-->
+<tables>
+ <!-- Location of SAMTools indexes and other files -->
+ <table name="sam_fa_indexes" comment_char="#">
+ <columns>line_type, value, path</columns>
+ <file path="tool-data/sam_fa_indices.loc" />
+ </table>
+</tables>
diff -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 -r 83e46d828ddce8e561f7f4922fb7671dec8cf22b test/tool_shed/test_data/freebayes/tool_dependencies.xml
--- /dev/null
+++ b/test/tool_shed/test_data/freebayes/tool_dependencies.xml
@@ -0,0 +1,46 @@
+<?xml version="1.0"?>
+<tool_dependency>
+ <package name="freebayes" version="0.9.4_9696d0ce8a962f7bb61c4791be5ce44312b81cf8">
+ <install version="1.0">
+ <actions>
+ <action type="shell_command">git clone --recursive git://github.com/ekg/freebayes.git</action>
+ <action type="shell_command">git reset --hard 9696d0ce8a962f7bb61c4791be5ce44312b81cf8</action>
+ <action type="shell_command">make</action>
+ <action type="move_directory_files">
+ <source_directory>bin</source_directory>
+ <destination_directory>$INSTALL_DIR/bin</destination_directory>
+ </action>
+ <action type="set_environment">
+ <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
+ </action>
+ </actions>
+ </install>
+ <readme>
+FreeBayes requires g++ and the standard C and C++ development libraries.
+Additionally, cmake is required for building the BamTools API.
+ </readme>
+ </package>
+ <package name="samtools" version="0.1.18">
+ <install version="1.0">
+ <actions>
+ <action type="download_by_url">http://sourceforge.net/projects/samtools/files/samtools/0.1.18/samtools-0.1…</action>
+ <action type="shell_command">sed -i .bak -e 's/-lcurses/-lncurses/g' Makefile</action>
+ <action type="shell_command">make</action>
+ <action type="move_file">
+ <source>samtools</source>
+ <destination>$INSTALL_DIR/bin</destination>
+ </action>
+ <action type="move_file">
+ <source>misc/maq2sam-long</source>
+ <destination>$INSTALL_DIR/bin</destination>
+ </action>
+ <action type="set_environment">
+ <environment_variable name="PATH" action="prepend_to">$INSTALL_DIR/bin</environment_variable>
+ </action>
+ </actions>
+ </install>
+ <readme>
+Compiling SAMtools requires the ncurses and zlib development libraries.
+ </readme>
+ </package>
+</tool_dependency>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Don't allow uploading files to a deprecated repository.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/3ff1e4ec000a/
changeset: 3ff1e4ec000a
user: inithello
date: 2012-12-03 16:41:21
summary: Don't allow uploading files to a deprecated repository.
affected #: 1 file
diff -r 12f55a688c82bb3e26de0ebba3cb202b16cf82e3 -r 3ff1e4ec000a8dae6a533af70b0489d2bf0d4824 templates/webapps/community/repository/browse_repository.mako
--- a/templates/webapps/community/repository/browse_repository.mako
+++ b/templates/webapps/community/repository/browse_repository.mako
@@ -9,7 +9,7 @@
is_new = repository.is_new( trans.app )
can_contact_owner = trans.user and trans.user != repository.user
can_push = trans.app.security_agent.can_push( trans.app, trans.user, repository )
- can_upload = can_push
+ can_upload = can_push and ( not repository.deprecated )
can_download = not is_new and ( not is_malicious or can_push )
can_browse_contents = not is_new
can_rate = trans.user and repository.user != trans.user
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: More tool shed functional test enhancements.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/12f55a688c82/
changeset: 12f55a688c82
user: inithello
date: 2012-12-03 16:07:31
summary: More tool shed functional test enhancements.
affected #: 2 files
diff -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 -r 12f55a688c82bb3e26de0ebba3cb202b16cf82e3 test/tool_shed/base/twilltestcase.py
--- a/test/tool_shed/base/twilltestcase.py
+++ b/test/tool_shed/base/twilltestcase.py
@@ -40,7 +40,7 @@
self.check_string_not_in_page( string )
def check_for_valid_tools( self, repository, strings_displayed=[], strings_not_displayed=[] ):
strings_displayed.append( 'Valid tools' )
- self.manage_repository( repository, strings_displayed, strings_not_displayed )
+ self.display_manage_repository_page( repository, strings_displayed, strings_not_displayed )
def check_count_of_metadata_revisions_associated_with_repository( self, repository, metadata_count ):
self.check_repository_changelog( repository )
self.check_string_count_in_page( 'Repository metadata is associated with this change set.', metadata_count )
@@ -63,6 +63,8 @@
'''
repository_metadata = get_repository_metadata_by_repository_id_changeset_revision( repository.id, changeset_revision )
metadata = repository_metadata.metadata
+ if 'tools' not in metadata:
+ raise AssertionError( 'No tools in %s revision %s.' % ( repository.name, changeset_revision ) )
for tool_dict in metadata[ 'tools' ]:
metadata_strings_displayed = [ tool_dict[ 'guid' ],
tool_dict[ 'version' ],
@@ -78,18 +80,18 @@
changeset_revision=changeset_revision,
strings_displayed=[ '%s (version %s)' % ( tool_dict[ 'name' ], tool_dict[ 'version' ] ) ],
strings_not_displayed=[] )
- def check_repository_invalid_tools_for_changeset_revision( self, repository, changeset_revision ):
+ def check_repository_invalid_tools_for_changeset_revision( self, repository, changeset_revision, strings_displayed=[], strings_not_displayed=[] ):
+ '''Load the invalid tool page for each invalid tool associated with this changeset revision and verify the received error messages.'''
repository_metadata = get_repository_metadata_by_repository_id_changeset_revision( repository.id, changeset_revision )
metadata = repository_metadata.metadata
if 'invalid_tools' not in metadata:
return
- for tool_xml in metadata[ 'tools' ]:
- tool_path = '%s/%s' % ( self.get_repo_path( repository ), tool_xml )
- self.load_display_tool_page( repository,
- tool_xml_path=tool_path,
+ for tool_xml in metadata[ 'invalid_tools' ]:
+ self.load_invalid_tool_page( repository,
+ tool_xml=tool_xml,
changeset_revision=changeset_revision,
- strings_displayed=[ 'properly loaded' ],
- strings_not_displayed=[] )
+ strings_displayed=strings_displayed,
+ strings_not_displayed=strings_not_displayed )
def check_string_count_in_page( self, pattern, min_count, max_count=None ):
"""Checks the number of 'pattern' occurrences in the current browser page"""
page = self.last_page()
@@ -260,19 +262,23 @@
invalid_tools.append( dict( tools=repository_metadata.metadata[ 'invalid_tools' ], changeset_revision=repository_metadata.changeset_revision ) )
return valid_tools, invalid_tools
def grant_write_access( self, repository, usernames=[], strings_displayed=[], strings_not_displayed=[] ):
- self.manage_repository( repository )
+ self.display_manage_repository_page( repository )
tc.fv( "3", "allow_push", '-Select one' )
for username in usernames:
tc.fv( "3", "allow_push", '+%s' % username )
tc.submit( 'user_access_button' )
self.check_for_strings( strings_displayed, strings_not_displayed )
- def load_display_tool_page( self, repository, tool_xml_path, changeset_revision, strings_displayed=[], strings_not_displayed=[] ):
- repository_id = self.security.encode_id( repository.id )
- url = '/repository/display_tool?repository_id=%s&tool_config=%s&changeset_revision=%s' % \
- ( repository_id, tool_xml_path, changeset_revision )
+ def load_invalid_tool_page( self, repository, tool_xml, changeset_revision, strings_displayed=[], strings_not_displayed=[] ):
+ url = '/repository/load_invalid_tool?repository_id=%s&tool_config=%s&changeset_revision=%s' % \
+ ( self.security.encode_id( repository.id ), tool_xml, changeset_revision )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
- def manage_repository( self, repository, strings_displayed=[], strings_not_displayed=[] ):
+ def load_display_tool_page( self, repository, tool_xml_path, changeset_revision, strings_displayed=[], strings_not_displayed=[] ):
+ url = '/repository/display_tool?repository_id=%s&tool_config=%s&changeset_revision=%s' % \
+ ( self.security.encode_id( repository.id ), tool_xml_path, changeset_revision )
+ self.visit_url( url )
+ self.check_for_strings( strings_displayed, strings_not_displayed )
+ def display_manage_repository_page( self, repository, strings_displayed=[], strings_not_displayed=[] ):
url = '/repository/manage_repository?id=%s' % self.security.encode_id( repository.id )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
@@ -280,21 +286,20 @@
url = '/repository/manage_repository?user_access_button=Remove&id=%s&remove_auth=%s' % \
( self.security.encode_id( repository.id ), username )
self.visit_url( url )
- def search_for_valid_tools( self, search_options={}, strings_displayed=[], strings_not_displayed=[] ):
- for exact_matches in [ True, False ]:
- for key, value in search_options.items():
- url = '/repository/find_tools'
- self.visit_url( url )
- tc.fv( "1", "exact_matches", exact_matches )
- tc.fv( "1", key, value )
- tc.submit()
- self.check_for_strings( strings_displayed, strings_not_displayed )
+ def search_for_valid_tools( self, search_fields={}, exact_matches=False, strings_displayed=[], strings_not_displayed=[] ):
+ for field_name, search_string in search_fields.items():
+ url = '/repository/find_tools'
+ self.visit_url( url )
+ tc.fv( "1", "exact_matches", exact_matches )
+ tc.fv( "1", field_name, search_string )
+ tc.submit()
+ self.check_for_strings( strings_displayed, strings_not_displayed )
def set_repository_deprecated( self, repository, set_deprecated=True, strings_displayed=[], strings_not_displayed=[] ):
url = '/repository/deprecate?id=%s&mark_deprecated=%s' % ( self.security.encode_id( repository.id ), set_deprecated )
self.visit_url( url )
self.check_for_strings( strings_displayed, strings_not_displayed )
def set_repository_malicious( self, repository, set_malicious=True, strings_displayed=[], strings_not_displayed=[] ):
- self.manage_repository( repository )
+ self.display_manage_repository_page( repository )
tc.fv( "malicious", "malicious", set_malicious )
tc.submit( "malicious_button" )
self.check_for_strings( strings_displayed, strings_not_displayed )
diff -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 -r 12f55a688c82bb3e26de0ebba3cb202b16cf82e3 test/tool_shed/functional/test_0000_basic_repository_features.py
--- a/test/tool_shed/functional/test_0000_basic_repository_features.py
+++ b/test/tool_shed/functional/test_0000_basic_repository_features.py
@@ -83,9 +83,10 @@
strings_displayed=[ 'The repository tip has been defined as <b>not</b> malicious.' ] )
self.set_repository_deprecated( repository,
strings_displayed=[ 'has been marked as deprecated', 'Mark as not deprecated' ] )
- self.manage_repository( repository,
+ self.display_manage_repository_page( repository,
strings_displayed=[ 'This repository has been marked as deprecated' ],
strings_not_displayed=[ 'Upload files', 'Reset all repository metadata' ] )
+ self.browse_repository( repository, strings_not_displayed=[ 'Upload files' ] )
self.set_repository_deprecated( repository,
strings_displayed=[ 'has been marked as not deprecated', 'Mark as deprecated' ],
set_deprecated=False )
@@ -105,7 +106,7 @@
commit_message="Uploaded filtering.txt",
uncompress_file='No',
remove_repo_files_not_in_tar='No' )
- self.manage_repository( repository, strings_displayed=[ 'Readme file for filtering 1.1.0' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Readme file for filtering 1.1.0' ] )
def test_0055_upload_filtering_test_data( self ):
'''Upload filtering test data.'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
@@ -129,7 +130,7 @@
tip = self.get_repository_tip( repository )
self.check_for_valid_tools( repository )
strings_displayed = self.get_repository_metadata_revisions( repository ).append( 'Select a revision' )
- self.manage_repository( repository, strings_displayed=strings_displayed )
+ self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
self.check_count_of_metadata_revisions_associated_with_repository( repository, metadata_count=2 )
self.check_repository_tools_for_changeset_revision( repository, tip )
self.check_repository_metadata( repository, tip_only=False )
@@ -137,19 +138,19 @@
'''Upload readme.txt file associated with tool version 2.2.0.'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
self.upload_file( repository, 'readme.txt', commit_message="Uploaded readme.txt" )
- self.manage_repository( repository, strings_displayed=[ 'This is a readme file.' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'This is a readme file.' ] )
# Verify that there is a different readme file for each metadata revision.
metadata_revisions = self.get_repository_metadata_revisions( repository )
- self.manage_repository( repository, strings_displayed=[ 'Readme file for filtering 1.1.0', 'This is a readme file.' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Readme file for filtering 1.1.0', 'This is a readme file.' ] )
def test_0075_delete_readme_txt_file( self ):
'''Delete the readme.txt file.'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
self.delete_files_from_repository( repository, filenames=[ 'readme.txt' ] )
self.check_count_of_metadata_revisions_associated_with_repository( repository, metadata_count=2 )
- self.manage_repository( repository, strings_displayed=[ 'Readme file for filtering 1.1.0' ] )
+ self.display_manage_repository_page( repository, strings_displayed=[ 'Readme file for filtering 1.1.0' ] )
def test_0080_search_for_valid_filter_tool( self ):
- '''Verify that the "search for valid tool" feature finds the filtering tool.'''
+ '''Search for the filtering tool by tool ID, name, and version.'''
repository = get_repository_by_name_and_owner( repository_name, admin_username )
tip_changeset = self.get_repository_tip( repository )
- search_options = dict( tool_id='Filter1', tool_name='filter', tool_version='2.2.0' )
- self.search_for_valid_tools( search_options=search_options, strings_displayed=[ tip_changeset ], strings_not_displayed=[] )
+ search_fields = dict( tool_id='Filter1', tool_name='filter', tool_version='2.2.0' )
+ self.search_for_valid_tools( search_fields=search_fields, strings_displayed=[ tip_changeset ], strings_not_displayed=[] )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Clean up page content rendering in the tool shed.
by Bitbucket 03 Dec '12
by Bitbucket 03 Dec '12
03 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/cdbee5bc4a6d/
changeset: cdbee5bc4a6d
user: greg
date: 2012-12-03 15:31:52
summary: Clean up page content rendering in the tool shed.
affected #: 7 files
diff -r 20a25a4f989d034c6be371ffa90947c8dc0269d3 -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 lib/galaxy/util/shed_util_common.py
--- a/lib/galaxy/util/shed_util_common.py
+++ b/lib/galaxy/util/shed_util_common.py
@@ -19,6 +19,9 @@
from elementtree import ElementTree, ElementInclude
from elementtree.ElementTree import Element, SubElement
+eggs.require( 'markupsafe' )
+import markupsafe
+
log = logging.getLogger( __name__ )
INITIAL_CHANGELOG_HASH = '000000000000'
@@ -1155,20 +1158,20 @@
return all_repository_dependencies
def get_repository_file_contents( file_path ):
if is_gzip( file_path ):
- safe_str = to_safe_str( '\ngzip compressed file\n' )
+ safe_str = to_safe_string( '\ngzip compressed file\n' )
elif is_bz2( file_path ):
- safe_str = to_safe_str( '\nbz2 compressed file\n' )
+ safe_str = to_safe_string( '\nbz2 compressed file\n' )
elif check_zip( file_path ):
- safe_str = to_safe_str( '\nzip compressed file\n' )
+ safe_str = to_safe_string( '\nzip compressed file\n' )
elif check_binary( file_path ):
- safe_str = to_safe_str( '\nBinary file\n' )
+ safe_str = to_safe_string( '\nBinary file\n' )
else:
safe_str = ''
for i, line in enumerate( open( file_path ) ):
- safe_str = '%s%s' % ( safe_str, to_safe_str( line ) )
+ safe_str = '%s%s' % ( safe_str, to_safe_string( line ) )
if len( safe_str ) > MAX_CONTENT_SIZE:
large_str = '\nFile contents truncated because file size is larger than maximum viewing size of %s\n' % util.nice_size( MAX_CONTENT_SIZE )
- safe_str = '%s%s' % ( safe_str, to_safe_str( large_str ) )
+ safe_str = '%s%s' % ( safe_str, to_safe_string( large_str ) )
break
return safe_str
def get_repository_files( trans, folder_path ):
@@ -1631,7 +1634,7 @@
except:
file_name = fpath
return file_name
-def to_safe_str( text, to_html=True ):
+def to_safe_string( text, to_html=True ):
"""Translates the characters in text to an html string"""
translated = []
for c in text:
@@ -1639,25 +1642,29 @@
translated.append( c )
elif c in MAPPED_CHARS:
translated.append( MAPPED_CHARS[ c ] )
- elif c in [ '\n', '\r' ]:
+ elif c in [ '\n' ]:
if to_html:
translated.append( '<br/>' )
else:
translated.append( c )
+ elif c in [ '\r' ]:
+ continue
elif c in [ ' ', ' ' ]:
translated.append( c )
else:
translated.append( '' )
+ if to_html:
+ str( markupsafe.escape( ''.join( translated ) ) )
return ''.join( translated )
def tool_shed_is_this_tool_shed( toolshed_base_url ):
return toolshed_base_url.rstrip( '/' ) == str( url_for( '/', qualified=True ) ).rstrip( '/' )
def translate_string( raw_text, to_html=True ):
if raw_text:
if len( raw_text ) <= MAX_CONTENT_SIZE:
- translated_string = to_safe_str( raw_text, to_html=to_html )
+ translated_string = to_safe_string( raw_text, to_html=to_html )
else:
large_str = '\nFile contents truncated because file size is larger than maximum viewing size of %s\n' % util.nice_size( MAX_CONTENT_SIZE )
- translated_string = to_safe_str( '%s%s' % ( raw_text[ 0:MAX_CONTENT_SIZE ], large_str ), to_html=to_html )
+ translated_string = to_safe_string( '%s%s' % ( raw_text[ 0:MAX_CONTENT_SIZE ], large_str ), to_html=to_html )
else:
translated_string = ''
return translated_string
diff -r 20a25a4f989d034c6be371ffa90947c8dc0269d3 -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 lib/galaxy/webapps/community/controllers/repository.py
--- a/lib/galaxy/webapps/community/controllers/repository.py
+++ b/lib/galaxy/webapps/community/controllers/repository.py
@@ -2263,17 +2263,6 @@
if list:
return ','.join( list )
return ''
- def to_html_escaped( self, text ):
- """Translates the characters in text to html values"""
- translated = []
- for c in text:
- if c in [ '\r\n', '\n', ' ', '\t' ] or c in VALID_CHARS:
- translated.append( c )
- elif c in MAPPED_CHARS:
- translated.append( MAPPED_CHARS[ c ] )
- else:
- translated.append( '' )
- return ''.join( translated )
def __validate_repository_name( self, name, user ):
# Repository names must be unique for each user, must be at least four characters
# in length and must contain only lower-case letters, numbers, and the '_' character.
@@ -2347,7 +2336,7 @@
anchors = modified + added + removed + deleted + unknown + ignored + clean
diffs = []
for diff in patch.diff( repo, node1=ctx_parent.node(), node2=ctx.node() ):
- diffs.append( self.to_html_escaped( diff ) )
+ diffs.append( to_safe_string( diff, to_html=True ) )
is_malicious = changeset_is_malicious( trans, id, repository.tip( trans.app ) )
metadata = self.get_metadata( trans, id, ctx_str )
return trans.fill_template( '/webapps/community/repository/view_changeset.mako',
diff -r 20a25a4f989d034c6be371ffa90947c8dc0269d3 -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 templates/webapps/community/common/common.mako
--- a/templates/webapps/community/common/common.mako
+++ b/templates/webapps/community/common/common.mako
@@ -31,16 +31,6 @@
</script></%def>
-<%def name="escape_html_add_breaks( value )">
- <%
- from galaxy import eggs
- eggs.require('markupsafe')
- import markupsafe
- value = str( markupsafe.escape( value ) ).replace( '\n', '<br/>' )
- %>
- ${value}
-</%def>
-
<%def name="render_star_rating( name, rating, disabled=False )"><%
if disabled:
@@ -71,7 +61,7 @@
<div class="form-row"><label>Detailed description:</label><table id="description_table">
- <tr><td>${ escape_html_add_breaks( description_text ) }</td></tr>
+ <tr><td>${description_text}</td></tr></table><div style="clear: both"></div></div>
diff -r 20a25a4f989d034c6be371ffa90947c8dc0269d3 -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 templates/webapps/community/repository/common.mako
--- a/templates/webapps/community/repository/common.mako
+++ b/templates/webapps/community/repository/common.mako
@@ -1,4 +1,3 @@
-<%namespace file="/webapps/community/common/common.mako" import="escape_html_add_breaks" /><%def name="common_javascripts(repository)"><script type="text/javascript">
@@ -316,14 +315,17 @@
</%def><%def name="render_readme( readme, pad, parent, row_counter )">
- <% encoded_id = trans.security.encode_id( readme.id ) %>
+ <%
+ from galaxy.util.shed_util_common import to_safe_string
+ encoded_id = trans.security.encode_id( readme.id )
+ %><tr class="datasetRow"
%if parent is not None:
parent="${parent}"
%endif
id="libraryItem-${encoded_id}"><td style="padding-left: ${pad+20}px;">
- ${escape_html_add_breaks( readme.text )}
+ ${ to_safe_string( readme.text, to_html=True ) }
</td></tr><%
diff -r 20a25a4f989d034c6be371ffa90947c8dc0269d3 -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 templates/webapps/community/repository/view_changeset.mako
--- a/templates/webapps/community/repository/view_changeset.mako
+++ b/templates/webapps/community/repository/view_changeset.mako
@@ -185,7 +185,7 @@
ctr += 1
%><tr><td bgcolor="#E0E0E0">${anchor_str}</td></tr>
- <tr><td>${ escape_html_add_breaks( diff ) }</td></tr>
+ <tr><td>${diff}</td></tr>
%endfor
</table></div>
diff -r 20a25a4f989d034c6be371ffa90947c8dc0269d3 -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 templates/webapps/community/repository/view_repository.mako
--- a/templates/webapps/community/repository/view_repository.mako
+++ b/templates/webapps/community/repository/view_repository.mako
@@ -5,6 +5,7 @@
<%
from galaxy.web.framework.helpers import time_ago
+ from galaxy.util.shed_util_common import to_safe_string
has_readme = metadata and 'readme' in metadata
has_metadata = repository.metadata_revisions
@@ -157,7 +158,7 @@
${repository.description | h}
</div>
%if repository.long_description:
- ${render_long_description( repository.long_description )}
+ ${render_long_description( to_safe_string( repository.long_description, to_html=True ) )}
%endif
<div class="form-row"><label>Revision:</label>
diff -r 20a25a4f989d034c6be371ffa90947c8dc0269d3 -r cdbee5bc4a6d2140401ba0ab1ce7de3e9b9882b0 templates/webapps/community/repository_review/browse_review.mako
--- a/templates/webapps/community/repository_review/browse_review.mako
+++ b/templates/webapps/community/repository_review/browse_review.mako
@@ -5,6 +5,7 @@
<%
from galaxy.web.form_builder import CheckboxField
from galaxy.webapps.community.util.container_util import STRSEP
+ from galaxy.util.shed_util_common import to_safe_string
can_manage_repository = is_admin or repository.user == trans.user
%>
@@ -93,7 +94,7 @@
<tr><td><div overflow-wrap:normal;overflow:hidden;word-break:keep-all;word-wrap:break-word;line-break:strict;>
- ${ escape_html_add_breaks( component_review.comment ) }
+ ${ to_safe_string( component_review.comment, to_html=True ) }
</div></td></tr>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Fixes for rendering certain tool shed repository content.
by Bitbucket 01 Dec '12
by Bitbucket 01 Dec '12
01 Dec '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/20a25a4f989d/
changeset: 20a25a4f989d
user: greg
date: 2012-12-01 01:37:37
summary: Fixes for rendering certain tool shed repository content.
affected #: 2 files
diff -r 03ec137ca8a3148aa771769e80963b65194b7895 -r 20a25a4f989d034c6be371ffa90947c8dc0269d3 lib/galaxy/util/shed_util.py
--- a/lib/galaxy/util/shed_util.py
+++ b/lib/galaxy/util/shed_util.py
@@ -1064,19 +1064,6 @@
return removed, error_message
def tool_shed_from_repository_clone_url( repository_clone_url ):
return clean_repository_clone_url( repository_clone_url ).split( 'repos' )[ 0 ].rstrip( '/' )
-def translate_string( raw_text, to_html=True ):
- if raw_text:
- if to_html:
- if len( raw_text ) <= MAX_CONTENT_SIZE:
- translated_string = to_html_str( raw_text )
- else:
- large_str = '\nFile contents truncated because file size is larger than maximum viewing size of %s\n' % util.nice_size( MAX_CONTENT_SIZE )
- translated_string = to_html_str( '%s%s' % ( raw_text[ 0:MAX_CONTENT_SIZE ], large_str ) )
- else:
- raise Exception( "String translation currently only supports text to HTML." )
- else:
- translated_string = ''
- return translated_string
def update_in_shed_tool_config( app, repository ):
# A tool shed repository is being updated so change the shed_tool_conf file. Parse the config file to generate the entire list
# of config_elems instead of using the in-memory list.
diff -r 03ec137ca8a3148aa771769e80963b65194b7895 -r 20a25a4f989d034c6be371ffa90947c8dc0269d3 lib/galaxy/util/shed_util_common.py
--- a/lib/galaxy/util/shed_util_common.py
+++ b/lib/galaxy/util/shed_util_common.py
@@ -62,7 +62,7 @@
f = open( full_path_to_readme_file, 'r' )
text = f.read()
f.close()
- readme_files_dict[ readme_file_name ] = str( text )
+ readme_files_dict[ readme_file_name ] = translate_string( text, to_html=False )
except Exception, e:
log.debug( "Error reading README file '%s' defined in metadata for repository '%s', revision '%s': %s" % \
( str( relative_path_to_readme_file ), str( repository_name ), str( changeset_revision ), str( e ) ) )
@@ -1155,22 +1155,22 @@
return all_repository_dependencies
def get_repository_file_contents( file_path ):
if is_gzip( file_path ):
- to_html = to_html_str( '\ngzip compressed file\n' )
+ safe_str = to_safe_str( '\ngzip compressed file\n' )
elif is_bz2( file_path ):
- to_html = to_html_str( '\nbz2 compressed file\n' )
+ safe_str = to_safe_str( '\nbz2 compressed file\n' )
elif check_zip( file_path ):
- to_html = to_html_str( '\nzip compressed file\n' )
+ safe_str = to_safe_str( '\nzip compressed file\n' )
elif check_binary( file_path ):
- to_html = to_html_str( '\nBinary file\n' )
+ safe_str = to_safe_str( '\nBinary file\n' )
else:
- to_html = ''
+ safe_str = ''
for i, line in enumerate( open( file_path ) ):
- to_html = '%s%s' % ( to_html, to_html_str( line ) )
- if len( to_html ) > MAX_CONTENT_SIZE:
+ safe_str = '%s%s' % ( safe_str, to_safe_str( line ) )
+ if len( safe_str ) > MAX_CONTENT_SIZE:
large_str = '\nFile contents truncated because file size is larger than maximum viewing size of %s\n' % util.nice_size( MAX_CONTENT_SIZE )
- to_html = '%s%s' % ( to_html, to_html_str( large_str ) )
+ safe_str = '%s%s' % ( safe_str, to_safe_str( large_str ) )
break
- return to_html
+ return safe_str
def get_repository_files( trans, folder_path ):
contents = []
for item in os.listdir( folder_path ):
@@ -1631,7 +1631,7 @@
except:
file_name = fpath
return file_name
-def to_html_str( text ):
+def to_safe_str( text, to_html=True ):
"""Translates the characters in text to an html string"""
translated = []
for c in text:
@@ -1639,17 +1639,28 @@
translated.append( c )
elif c in MAPPED_CHARS:
translated.append( MAPPED_CHARS[ c ] )
- elif c == ' ':
- translated.append( ' ' )
- elif c == '\t':
- translated.append( ' ' )
- elif c == '\n':
- translated.append( '<br/>' )
- elif c not in [ '\r' ]:
+ elif c in [ '\n', '\r' ]:
+ if to_html:
+ translated.append( '<br/>' )
+ else:
+ translated.append( c )
+ elif c in [ ' ', ' ' ]:
+ translated.append( c )
+ else:
translated.append( '' )
return ''.join( translated )
def tool_shed_is_this_tool_shed( toolshed_base_url ):
return toolshed_base_url.rstrip( '/' ) == str( url_for( '/', qualified=True ) ).rstrip( '/' )
+def translate_string( raw_text, to_html=True ):
+ if raw_text:
+ if len( raw_text ) <= MAX_CONTENT_SIZE:
+ translated_string = to_safe_str( raw_text, to_html=to_html )
+ else:
+ large_str = '\nFile contents truncated because file size is larger than maximum viewing size of %s\n' % util.nice_size( MAX_CONTENT_SIZE )
+ translated_string = to_safe_str( '%s%s' % ( raw_text[ 0:MAX_CONTENT_SIZE ], large_str ), to_html=to_html )
+ else:
+ translated_string = ''
+ return translated_string
def update_existing_tool_dependency( app, repository, original_dependency_dict, new_dependencies_dict ):
"""
Update an exsiting tool dependency whose definition was updated in a change set pulled by a Galaxy administrator when getting updates
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Several miscellaneoud fixes for rendering repository contents and dependencies.
by Bitbucket 30 Nov '12
by Bitbucket 30 Nov '12
30 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/03ec137ca8a3/
changeset: 03ec137ca8a3
user: greg
date: 2012-11-30 22:02:54
summary: Several miscellaneoud fixes for rendering repository contents and dependencies.
affected #: 6 files
diff -r 5328ebe51a0eba8b0e2315f2ce9fedf2c4a02e83 -r 03ec137ca8a3148aa771769e80963b65194b7895 lib/galaxy/util/shed_util_common.py
--- a/lib/galaxy/util/shed_util_common.py
+++ b/lib/galaxy/util/shed_util_common.py
@@ -996,6 +996,40 @@
fh.close()
return tmp_filename
return None
+def get_next_downloadable_changeset_revision( repository, repo, after_changeset_revision ):
+ """
+ Return the installable changeset_revision in the repository changelog after to the changeset to which after_changeset_revision
+ refers. If there isn't one, return None.
+ """
+ changeset_revisions = get_ordered_downloadable_changeset_revisions( repository, repo )
+ if len( changeset_revisions ) == 1:
+ changeset_revision = changeset_revisions[ 0 ]
+ if changeset_revision == after_changeset_revision:
+ return None
+ found_after_changeset_revision = False
+ for changeset in repo.changelog:
+ changeset_revision = str( repo.changectx( changeset ) )
+ if found_after_changeset_revision:
+ if changeset_revision in changeset_revisions:
+ return changeset_revision
+ elif not found_after_changeset_revision and changeset_revision == after_changeset_revision:
+ # We've found the changeset in the changelog for which we need to get the next downloadable changset.
+ found_after_changeset_revision = True
+ return None
+def get_ordered_downloadable_changeset_revisions( repository, repo ):
+ """Return an ordered list of changeset_revisions defined by a repository changelog."""
+ changeset_tups = []
+ for repository_metadata in repository.downloadable_revisions:
+ changeset_revision = repository_metadata.changeset_revision
+ ctx = get_changectx_for_changeset( repo, changeset_revision )
+ if ctx:
+ rev = '%04d' % ctx.rev()
+ else:
+ rev = '-1'
+ changeset_tups.append( ( rev, changeset_revision ) )
+ sorted_changeset_tups = sorted( changeset_tups )
+ sorted_changeset_revisions = [ changeset_tup[ 1 ] for changeset_tup in sorted_changeset_tups ]
+ return sorted_changeset_revisions
def get_parent_id( trans, id, old_id, version, guid, changeset_revisions ):
parent_id = None
# Compare from most recent to oldest.
@@ -1014,6 +1048,27 @@
if parent_id is None:
# The tool did not change through all of the changeset revisions.
return old_id
+def get_previous_downloadable_changset_revision( repository, repo, before_changeset_revision ):
+ """
+ Return the installable changeset_revision in the repository changelog prior to the changeset to which before_changeset_revision
+ refers. If there isn't one, return the hash value of an empty repository changelog, INITIAL_CHANGELOG_HASH.
+ """
+ changeset_revisions = get_ordered_downloadable_changeset_revisions( repository, repo )
+ if len( changeset_revisions ) == 1:
+ changeset_revision = changeset_revisions[ 0 ]
+ if changeset_revision == before_changeset_revision:
+ return INITIAL_CHANGELOG_HASH
+ return changeset_revision
+ previous_changeset_revision = None
+ for changeset_revision in changeset_revisions:
+ if changeset_revision == before_changeset_revision:
+ if previous_changeset_revision:
+ return previous_changeset_revision
+ else:
+ # Return the hash value of an empty repository changelog - note that this will not be a valid changeset revision.
+ return INITIAL_CHANGELOG_HASH
+ else:
+ previous_changeset_revision = changeset_revision
def get_readme_file_names( repository_name ):
readme_files = [ 'readme', 'read_me', 'install' ]
valid_filenames = [ r for r in readme_files ]
@@ -1079,7 +1134,10 @@
# The repository changeset_revision is no longer installable, so see if there's been an update.
required_repo_dir = required_repository.repo_path( trans.app )
required_repo = hg.repository( get_configured_ui(), required_repo_dir )
- required_repository_metadata = get_next_downloadable_changeset_revision( required_repository, required_repo, changeset_revision )
+ required_changeset_revision = get_next_downloadable_changeset_revision( required_repository, required_repo, changeset_revision )
+ required_repository_metadata = get_repository_metadata_by_repository_id_changset_revision( trans,
+ trans.security.encode_id( required_repository.id ),
+ required_changeset_revision )
if required_repository_metadata:
# The required_repository_metadata changeset_revision is installable.
required_metadata = required_repository_metadata.metadata
diff -r 5328ebe51a0eba8b0e2315f2ce9fedf2c4a02e83 -r 03ec137ca8a3148aa771769e80963b65194b7895 lib/galaxy/webapps/community/controllers/common.py
--- a/lib/galaxy/webapps/community/controllers/common.py
+++ b/lib/galaxy/webapps/community/controllers/common.py
@@ -270,61 +270,6 @@
fh.close()
return tmp_filename
return None
-def get_next_downloadable_changeset_revision( repository, repo, after_changeset_revision ):
- """
- Return the installable changeset_revision in the repository changelog after to the changeset to which after_changeset_revision
- refers. If there isn't one, return None.
- """
- changeset_revisions = get_ordered_downloadable_changeset_revisions( repository, repo )
- if len( changeset_revisions ) == 1:
- changeset_revision = changeset_revisions[ 0 ]
- if changeset_revision == after_changeset_revision:
- return None
- found_after_changeset_revision = False
- for changeset in repo.changelog:
- changeset_revision = str( repo.changectx( changeset ) )
- if found_after_changeset_revision:
- if changeset_revision in downloadable_changeset_revisions:
- return changeset_revision
- elif not found_after_changeset_revision and changeset_revision == after_changeset_revision:
- # We've found the changeset in the changelog for which we need to get the next downloadable changset.
- found_after_changeset_revision = True
- return None
-def get_ordered_downloadable_changeset_revisions( repository, repo ):
- """Return an ordered list of changeset_revisions defined by a repository changelog."""
- changeset_tups = []
- for repository_metadata in repository.downloadable_revisions:
- changeset_revision = repository_metadata.changeset_revision
- ctx = get_changectx_for_changeset( repo, changeset_revision )
- if ctx:
- rev = '%04d' % ctx.rev()
- else:
- rev = '-1'
- changeset_tups.append( ( rev, changeset_revision ) )
- sorted_changeset_tups = sorted( changeset_tups )
- sorted_changeset_revisions = [ changeset_tup[ 1 ] for changeset_tup in sorted_changeset_tups ]
- return sorted_changeset_revisions
-def get_previous_downloadable_changset_revision( repository, repo, before_changeset_revision ):
- """
- Return the installable changeset_revision in the repository changelog prior to the changeset to which before_changeset_revision
- refers. If there isn't one, return the hash value of an empty repository changelog, INITIAL_CHANGELOG_HASH.
- """
- changeset_revisions = get_ordered_downloadable_changeset_revisions( repository, repo )
- if len( changeset_revisions ) == 1:
- changeset_revision = changeset_revisions[ 0 ]
- if changeset_revision == before_changeset_revision:
- return INITIAL_CHANGELOG_HASH
- return changeset_revision
- previous_changeset_revision = None
- for changeset_revision in changeset_revisions:
- if changeset_revision == before_changeset_revision:
- if previous_changeset_revision:
- return previous_changeset_revision
- else:
- # Return the hash value of an empty repository changelog - note that this will not be a valid changeset revision.
- return INITIAL_CHANGELOG_HASH
- else:
- previous_changeset_revision = changeset_revision
def get_previous_repository_reviews( trans, repository, changeset_revision ):
"""Return an ordered dictionary of repository reviews up to and including the received changeset revision."""
repo = hg.repository( get_configured_ui(), repository.repo_path( trans.app ) )
diff -r 5328ebe51a0eba8b0e2315f2ce9fedf2c4a02e83 -r 03ec137ca8a3148aa771769e80963b65194b7895 lib/galaxy/webapps/community/util/container_util.py
--- a/lib/galaxy/webapps/community/util/container_util.py
+++ b/lib/galaxy/webapps/community/util/container_util.py
@@ -1,5 +1,4 @@
import os, logging
-from galaxy.web import url_for
log = logging.getLogger( __name__ )
@@ -148,21 +147,29 @@
def build_readme_files_folder( folder_id, readme_files_dict, label='Readme files' ):
"""Return a folder hierarchy containing readme text files."""
if readme_files_dict:
+ multiple_readme_files = len( readme_files_dict ) > 1
readme_id = 0
folder_id += 1
readme_files_root_folder = Folder( id=folder_id, key='root', label='root' )
- folder_id += 1
- readme_files_folder = Folder( id=folder_id, key='readme_files', label=label )
- readme_files_root_folder.folders.append( readme_files_folder )
+ if multiple_readme_files:
+ folder_id += 1
+ readme_files_folder = Folder( id=folder_id, key='readme_files', label=label )
+ readme_files_root_folder.folders.append( readme_files_folder )
for readme_file_name, readme_file_text in readme_files_dict.items():
readme_id += 1
readme = ReadMe( id=readme_id,
name=readme_file_name,
text=readme_file_text )
- folder_id += 1
- folder = Folder( id=folder_id, key=readme.name, label=readme.name )
- folder.readme_files.append( readme )
- readme_files_folder.folders.append( folder )
+ if multiple_readme_files:
+ folder_id += 1
+ folder = Folder( id=folder_id, key=readme.name, label=readme.name )
+ folder.readme_files.append( readme )
+ readme_files_folder.folders.append( folder )
+ else:
+ folder_id += 1
+ readme_files_folder = Folder( id=folder_id, key='readme_files', label=readme.name )
+ readme_files_folder.readme_files.append( readme )
+ readme_files_root_folder.folders.append( readme_files_folder )
else:
readme_files_root_folder = None
return folder_id, readme_files_root_folder
diff -r 5328ebe51a0eba8b0e2315f2ce9fedf2c4a02e83 -r 03ec137ca8a3148aa771769e80963b65194b7895 templates/admin/tool_shed_repository/reselect_tool_panel_section.mako
--- a/templates/admin/tool_shed_repository/reselect_tool_panel_section.mako
+++ b/templates/admin/tool_shed_repository/reselect_tool_panel_section.mako
@@ -15,7 +15,7 @@
%if readme_files_dict:
<div class="form-row"><table class="colored" width="100%">
- <th bgcolor="#EBD9B2">Repository README file (may contain important installation or license information)</th>
+ <th bgcolor="#EBD9B2">Repository README files - may contain important installation or license information</th></table></div>
${render_readme_section( containers_dict )}
diff -r 5328ebe51a0eba8b0e2315f2ce9fedf2c4a02e83 -r 03ec137ca8a3148aa771769e80963b65194b7895 templates/admin/tool_shed_repository/select_tool_panel_section.mako
--- a/templates/admin/tool_shed_repository/select_tool_panel_section.mako
+++ b/templates/admin/tool_shed_repository/select_tool_panel_section.mako
@@ -41,7 +41,7 @@
%if readme_files_dict:
<div class="form-row"><table class="colored" width="100%">
- <th bgcolor="#EBD9B2">Repository README file (may contain important installation or license information)</th>
+ <th bgcolor="#EBD9B2">Repository README file - may contain important installation or license information</th></table></div>
${render_readme_section( containers_dict )}
diff -r 5328ebe51a0eba8b0e2315f2ce9fedf2c4a02e83 -r 03ec137ca8a3148aa771769e80963b65194b7895 templates/webapps/community/repository/common.mako
--- a/templates/webapps/community/repository/common.mako
+++ b/templates/webapps/community/repository/common.mako
@@ -206,8 +206,6 @@
col_span_str = 'colspan="4"'
elif folder.label == 'Repository dependencies':
folder_label = "%s<i> - this repository requires installation of these additional repositories</i>" % folder_label
- elif folder.key == 'readme_files':
- folder_label = "%s<i> - may contain important installation or license information</i>" % folder_label
elif folder.invalid_tools:
folder_label = "%s<i> - click the tool config file name to see why the tool is invalid</i>" % folder_label
elif folder.tool_dependencies:
@@ -484,7 +482,7 @@
%>
%if readme_files_root_folder:
<div class="toolForm">
- <div class="toolFormTitle">Repository README files (may contain important installation or license information)</div>
+ <div class="toolFormTitle">Repository README files - may contain important installation or license information</div><div class="toolFormBody"><p/><% row_counter = RowCounter() %>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Fix for installing tool shed repositories into Galaxy.
by Bitbucket 30 Nov '12
by Bitbucket 30 Nov '12
30 Nov '12
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/changeset/5328ebe51a0e/
changeset: 5328ebe51a0e
user: greg
date: 2012-11-30 21:19:11
summary: Fix for installing tool shed repositories into Galaxy.
affected #: 1 file
diff -r 14589278b02d0dea916319811ddebf17ecca0747 -r 5328ebe51a0eba8b0e2315f2ce9fedf2c4a02e83 lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
--- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
+++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
@@ -670,7 +670,7 @@
# Clone each repository to the configured location.
update_tool_shed_repository_status( trans.app, tool_shed_repository, trans.model.ToolShedRepository.installation_status.CLONING )
repo_info_tuple = repo_info_dict[ tool_shed_repository.name ]
- description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, tool_dependencies = repo_info_tuple
+ description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = repo_info_tuple
relative_clone_dir = self.generate_tool_path( repository_clone_url, tool_shed_repository.installed_changeset_revision )
clone_dir = os.path.join( tool_path, relative_clone_dir )
relative_install_dir = os.path.join( relative_clone_dir, tool_shed_repository.name )
@@ -1395,7 +1395,7 @@
# Handle case where the repository was previously installed using an older changeset_revsion, but later the repository was updated
# in the tool shed and now we're trying to install the latest changeset revision of the same repository instead of updating the one
# that was previously installed. We'll look in the database instead of on disk since the repository may be uninstalled.
- description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, tool_dependencies = repo_info_tuple
+ description, repository_clone_url, changeset_revision, ctx_rev, repository_owner, repository_dependencies, tool_dependencies = repo_info_tuple
tool_shed = get_tool_shed_from_clone_url( repository_clone_url )
# Get all previous change set revisions from the tool shed for the repository back to, but excluding, the previous valid changeset
# revision to see if it was previously installed using one of them.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0