1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/eb923432a1ac/
Changeset: eb923432a1ac
User: inithello
Date: 2013-04-18 21:16:47
Summary: Correctly handle repository dependencies when running the automated install and test script.
Affected #: 1 file
diff -r 02a467df84a365dde3084370a097a0859e6a45d3 -r eb923432a1ac56dd7a2e5a3b640a79487acea1b3 test/install_and_test_tool_shed_repositories/base/twilltestcase.py
--- a/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
+++ b/test/install_and_test_tool_shed_repositories/base/twilltestcase.py
@@ -88,7 +88,15 @@
checkbox.selected = False
kwd[ 'install_tool_dependencies' ] = 'False'
if 'install_repository_dependencies' in self.last_page():
- kwd[ 'install_repository_dependencies' ] = str( install_repository_dependencies ).lower()
+ form = tc.browser.get_form( 'select_tool_panel_section' )
+ checkbox = form.find_control( id="install_repository_dependencies" )
+ checkbox.disabled = False
+ if install_repository_dependencies:
+ checkbox.selected = True
+ kwd[ 'install_repository_dependencies' ] = 'True'
+ else:
+ checkbox.selected = False
+ kwd[ 'install_repository_dependencies' ] = 'False'
if 'shed_tool_conf' not in kwd:
kwd[ 'shed_tool_conf' ] = self.shed_tool_conf
if new_tool_panel_section:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/18b23ed8de5b/
Changeset: 18b23ed8de5b
User: greg
Date: 2013-04-18 17:07:25
Summary: Add support for ordering installation of tool shed repositories into Galaxy via the Galaxy API.
Affected #: 1 file
diff -r aaf9f60d8be28a1a3a896c51a5d4895563c8c0a3 -r 18b23ed8de5b384f142db349e7379c03567758df lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
--- a/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
+++ b/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
@@ -240,10 +240,16 @@
# changeset_revision, there may be multiple repositories for installation at this point because repository dependencies may have added
# additional repositories for installation along with the single specified repository.
encoded_kwd, query, tool_shed_repositories, encoded_repository_ids = repository_util.initiate_repository_installation( trans, installation_dict )
+ # Some repositories may have repository dependencies that are required to be installed before the dependent repository, so we'll
+ # order the list of tsr_ids to ensure all repositories install in the required order.
+ tsr_ids = [ trans.security.encode_id( tool_shed_repository.id ) for tool_shed_repository in tool_shed_repositories ]
+ ordered_tsr_ids, ordered_repo_info_dicts, ordered_tool_panel_section_keys = \
+ repository_util.order_components_for_installation( trans, tsr_ids, repo_info_dicts, tool_panel_section_keys )
# Install the repositories, keeping track of each one for later display.
- for index, tool_shed_repository in enumerate( tool_shed_repositories ):
- repo_info_dict = repo_info_dicts[ index ]
- tool_panel_section_key = tool_panel_section_keys[ index ]
+ for index, tsr_id in enumerate( ordered_tsr_ids ):
+ tool_shed_repository = trans.sa_session.query( trans.model.ToolShedRepository ).get( trans.security.decode_id( tsr_id ) )
+ repo_info_dict = ordered_repo_info_dicts[ index ]
+ tool_panel_section_key = ordered_tool_panel_section_keys[ index ]
repository_util.install_tool_shed_repository( trans,
tool_shed_repository,
repo_info_dict,
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/56e9d6fb4201/
Changeset: 56e9d6fb4201
User: dan
Date: 2013-04-18 15:56:13
Summary: Remove leading slash from generation of tool_runner URL when a tool requires user log in, and still needs to.
Affected #: 1 file
diff -r eba52974bfaf93806b5177363a1d731190e28d8b -r 56e9d6fb4201122a7545abf417b908b944aeab4e lib/galaxy/webapps/galaxy/controllers/tool_runner.py
--- a/lib/galaxy/webapps/galaxy/controllers/tool_runner.py
+++ b/lib/galaxy/webapps/galaxy/controllers/tool_runner.py
@@ -65,7 +65,7 @@
if tool.require_login and not trans.user:
message = "You must be logged in to use this tool."
status = "info"
- redirect = url_for( controller='/tool_runner', action='index', tool_id=tool_id, **kwd )
+ redirect = url_for( controller='tool_runner', action='index', tool_id=tool_id, **kwd )
return trans.response.send_redirect( url_for( controller='user',
action='login',
cntrller='user',
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/eba52974bfaf/
Changeset: eba52974bfaf
User: dan
Date: 2013-04-18 15:50:27
Summary: Fix for old style display application link generation broken by recent route changes.
Affected #: 1 file
diff -r 781431663eb1c18bb9ab32eb8f3eff59e0848efb -r eba52974bfaf93806b5177363a1d731190e28d8b lib/galaxy/datatypes/interval.py
--- a/lib/galaxy/datatypes/interval.py
+++ b/lib/galaxy/datatypes/interval.py
@@ -244,7 +244,7 @@
# Accumulate links for valid sites
ret_val = []
for site_name, site_url in valid_sites:
- internal_url = url_for( controller='/dataset', dataset_id=dataset.id,
+ internal_url = url_for( controller='dataset', dataset_id=dataset.id,
action='display_at', filename='ucsc_' + site_name )
display_url = urllib.quote_plus( "%s%s/display_as?id=%i&display_app=%s&authz_method=display_at"
% (base_url, url_for( controller='root' ), dataset.id, type) )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d056556a1e63/
Changeset: d056556a1e63
Branch: stable
User: inithello
Date: 2013-04-17 20:31:27
Summary: Improve recording of invalid tests. Check for the existence each file required by functional tests.
Affected #: 1 file
diff -r c6d032ef7e420b158248aeb71c65914799a78122 -r d056556a1e632d7f0fd7f9d195538c8e5c6d917b lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -167,21 +167,22 @@
no_tools = 0
valid_revisions = 0
invalid_revisions = 0
- # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test
- # or tools_functionally_correct. Also filter out changeset revisions that are not downloadable, because it's redundant to test a revision that
- # a user can't install.
- # Initialize the repository_status dict with the test environment, but leave the test_errors empty.
- repository_status = {}
- repository_status[ 'invalid_tests' ] = []
+ # Get the list of metadata records to check for functional tests and test data. Limit this to records that have not been flagged do_not_test,
+ # since there's no need to check them again if they won't be tested anyway. Also filter out changeset revisions that are not downloadable,
+ # because it's redundant to test a revision that a user can't install.
metadata_records_to_check = app.sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
app.model.RepositoryMetadata.table.c.includes_tools == True,
- app.model.RepositoryMetadata.table.c.do_not_test == False,
- app.model.RepositoryMetadata.table.c.tools_functionally_correct == False ) ) \
+ app.model.RepositoryMetadata.table.c.do_not_test == False ) ) \
.all()
for metadata_record in metadata_records_to_check:
+ # Initialize the repository_status dict with the test environment, but leave the test_errors empty.
+ repository_status = {}
if metadata_record.tool_test_errors:
repository_status = metadata_record.tool_test_errors
+ # Clear any old invalid tests for this metadata revision, since this could lead to duplication of invalid test rows,
+ # or tests incorrectly labeled as invalid.
+ repository_status[ 'invalid_tests' ] = []
if 'test_environment' in repository_status:
repository_status[ 'test_environment' ] = get_test_environment( repository_status[ 'test_environment' ] )
else:
@@ -192,7 +193,6 @@
name = metadata_record.repository.name
owner = metadata_record.repository.user.username
changeset_revision = str( metadata_record.changeset_revision )
- repository_status[ 'invalid_tests' ] = []
if metadata_record.repository.id not in checked_repository_ids:
checked_repository_ids.append( metadata_record.repository.id )
if verbosity >= 1:
@@ -218,15 +218,13 @@
dirs.remove( '.hg' )
if 'test-data' in dirs:
has_test_data = True
+ test_data_path = os.path.join( root, dirs[ dirs.index( 'test-data' ) ] )
break
- # Remove the cloned repository path.
- if os.path.exists( work_dir ):
- shutil.rmtree( work_dir )
if verbosity >= 1:
if not has_test_data:
- print '# Test data missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ print '# Test data directory missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
else:
- print '# Test data found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ print '# Test data directory found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
( changeset_revision, name, owner )
# Loop through all the tools in this metadata record, checking each one for defined functional tests.
@@ -255,12 +253,22 @@
has_tests += 1
failure_reason = ''
problem_found = False
+ missing_test_files = []
+ if tool_has_tests and has_test_data:
+ missing_test_files = check_for_missing_test_files( tool_metadata[ 'tests' ], test_data_path )
+ if missing_test_files:
+ if verbosity >= 2:
+ print "# Tool ID '%s' in changeset revision %s of %s is missing one or more required test files: %s" % \
+ ( tool_id, changeset_revision, name, ', '.join( missing_test_files ) )
if not has_test_data:
failure_reason += 'Repository does not have a test-data directory. '
problem_found = True
if not tool_has_tests:
failure_reason += 'Functional test definitions missing for %s. ' % tool_id
problem_found = True
+ if missing_test_files:
+ failure_reason += 'One or more test files are missing for tool %s: %s' % ( tool_id, ', '.join( missing_test_files ) )
+ problem_found = True
test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid,
reason_test_is_invalid=failure_reason )
# The repository_metadata.tool_test_errors attribute should always have the following structure:
@@ -312,6 +320,9 @@
if problem_found:
if test_errors not in repository_status[ 'invalid_tests' ]:
repository_status[ 'invalid_tests' ].append( test_errors )
+ # Remove the cloned repository path. This has to be done after the check for required test files, for obvious reasons.
+ if os.path.exists( work_dir ):
+ shutil.rmtree( work_dir )
if not repository_status[ 'invalid_tests' ]:
valid_revisions += 1
if verbosity >= 1:
@@ -319,17 +330,21 @@
else:
invalid_revisions += 1
if verbosity >= 1:
- print '# Some tools missing functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ print '# Some tools have problematic functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
+ if verbosity >= 2:
+ for invalid_test in repository_status[ 'invalid_tests' ]:
+ if 'reason_test_is_invalid' in invalid_test:
+ print '# %s' % invalid_test[ 'reason_test_is_invalid' ]
if not info_only:
# If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform
# on which this script was run.
if repository_status[ 'invalid_tests' ]:
# If functional test definitions or test data are missing, set do_not_test = True if and only if:
- # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. In this case,
- # the revision will never be updated with correct data, and re-testing it would be redundant.
- # b) There are one or more downloadable revisions, and the revision being tested is the most recent downloadable revision. In this case, if
- # the repository is updated with test data or functional tests, the downloadable changeset revision that was tested will be replaced
- # with the new changeset revision, which will be automatically tested.
+ # a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision.
+ # In this case, the revision will never be updated with correct data, and re-testing it would be redundant.
+ # b) There are one or more downloadable revisions, and the revision being tested is the most recent downloadable revision.
+ # In this case, if the repository is updated with test data or functional tests, the downloadable changeset revision
+ # that was tested will be replaced with the new changeset revision, which will be automatically tested.
if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ):
metadata_record.do_not_test = True
metadata_record.tools_functionally_correct = False
@@ -357,6 +372,25 @@
changelog_tuples.append( ( ctx.rev(), str( ctx ) ) )
return changelog_tuples
+def check_for_missing_test_files( test_definition, test_data_path ):
+ '''Process the tool's functional test definitions and check for each file specified as an input or output.'''
+ missing_test_files = []
+ required_test_files = []
+ for test_dict in test_definition:
+ for input_file in test_dict[ 'required_files' ]:
+ if input_file not in required_test_files:
+ required_test_files.append( input_file )
+ for output in test_dict[ 'outputs' ]:
+ fieldname, filename = output
+ if filename not in required_test_files:
+ required_test_files.append( filename )
+ # Make sure each specified file actually does exist in the test data path of the cloned repository.
+ for required_file in required_test_files:
+ required_file_full_path = os.path.join( test_data_path, required_file )
+ if not os.path.exists( required_file_full_path ):
+ missing_test_files.append( required_file )
+ return missing_test_files
+
def is_most_recent_downloadable_revision( app, repository, changeset_revision, downloadable_revisions ):
# Get a list of ( numeric revision, changeset hash ) tuples from the changelog.
changelog = get_repo_changelog_tuples( repository.repo_path( app ) )
@@ -376,8 +410,10 @@
a) There are multiple downloadable revisions, and the provided changeset revision is not the most recent downloadable revision. In this case,
the revision will never be updated with correct data, and re-testing it would be redundant.
b) There are one or more downloadable revisions, and the provided changeset revision is the most recent downloadable revision. In this case, if
- the repository is updated with test data or functional tests, the downloadable changeset revision that was tested will be replaced
- with the new changeset revision, which will be automatically tested.
+ the repository is updated with test data or functional tests, the downloadable changeset revision that was tested will either be replaced
+ with the new changeset revision, or a new downloadable changeset revision will be created, either of which will be automatically checked and
+ flagged as appropriate. In the install and test script, this behavior is slightly different, since we do want to always run functional tests
+ on the most recent downloadable changeset revision.
'''
metadata_records = app.sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
https://bitbucket.org/galaxy/galaxy-central/commits/ae53deed05b6/
Changeset: ae53deed05b6
Branch: stable
User: inithello
Date: 2013-04-17 22:11:20
Summary: Account for condition where output filename is not defined.
Affected #: 1 file
diff -r d056556a1e632d7f0fd7f9d195538c8e5c6d917b -r ae53deed05b68f6febfcf3472c2a11514c12eb0c lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -382,6 +382,9 @@
required_test_files.append( input_file )
for output in test_dict[ 'outputs' ]:
fieldname, filename = output
+ # In rare cases, the filename may be None. If that is the case, skip that output definition.
+ if filename is None:
+ continue
if filename not in required_test_files:
required_test_files.append( filename )
# Make sure each specified file actually does exist in the test data path of the cloned repository.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7d5a1290587c/
Changeset: 7d5a1290587c
User: dannon
Date: 2013-04-18 04:28:09
Summary: Web Framework: Adjust logic for creating new sessions in the case of use_remote_user -- was preventing some display applications from functioning correctly
Affected #: 1 file
diff -r 6d84efb087eb040bef8d388d1ca9c29da3f48725 -r 7d5a1290587ccd49ad06d7a85adc571410cc305c lib/galaxy/web/framework/__init__.py
--- a/lib/galaxy/web/framework/__init__.py
+++ b/lib/galaxy/web/framework/__init__.py
@@ -539,27 +539,29 @@
.filter( and_( self.app.model.GalaxySession.table.c.session_key==session_key,
self.app.model.GalaxySession.table.c.is_valid==True ) ) \
.first()
- # If remote user is in use it can invalidate the session, so we need to to check some things now.
- if self.app.config.use_remote_user:
- #If this is an api request, and they've passed a key, we let this go.
- assert "HTTP_REMOTE_USER" in self.environ, \
- "use_remote_user is set but no HTTP_REMOTE_USER variable"
- remote_user_email = self.environ[ 'HTTP_REMOTE_USER' ]
- if galaxy_session:
- # An existing session, make sure correct association exists
- if galaxy_session.user is None:
- # No user, associate
- galaxy_session.user = self.get_or_create_remote_user( remote_user_email )
- galaxy_session_requires_flush = True
- elif galaxy_session.user.email != remote_user_email:
- # Session exists but is not associated with the correct remote user
- invalidate_existing_session = True
- user_for_new_session = self.get_or_create_remote_user( remote_user_email )
- log.warning( "User logged in as '%s' externally, but has a cookie as '%s' invalidating session",
- remote_user_email, galaxy_session.user.email )
- else:
- # No session exists, get/create user for new session
+ # If remote user is in use it can invalidate the session and in some
+ # cases won't have a cookie set above, so we need to to check some
+ # things now.
+ if self.app.config.use_remote_user:
+ #If this is an api request, and they've passed a key, we let this go.
+ assert "HTTP_REMOTE_USER" in self.environ, \
+ "use_remote_user is set but no HTTP_REMOTE_USER variable"
+ remote_user_email = self.environ[ 'HTTP_REMOTE_USER' ]
+ if galaxy_session:
+ # An existing session, make sure correct association exists
+ if galaxy_session.user is None:
+ # No user, associate
+ galaxy_session.user = self.get_or_create_remote_user( remote_user_email )
+ galaxy_session_requires_flush = True
+ elif galaxy_session.user.email != remote_user_email:
+ # Session exists but is not associated with the correct remote user
+ invalidate_existing_session = True
user_for_new_session = self.get_or_create_remote_user( remote_user_email )
+ log.warning( "User logged in as '%s' externally, but has a cookie as '%s' invalidating session",
+ remote_user_email, galaxy_session.user.email )
+ else:
+ # No session exists, get/create user for new session
+ user_for_new_session = self.get_or_create_remote_user( remote_user_email )
else:
if galaxy_session is not None and galaxy_session.user and galaxy_session.user.external:
# Remote user support is not enabled, but there is an existing
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.