galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
March 2013
- 1 participants
- 183 discussions
4 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/8e00d2fd674a/
changeset: 8e00d2fd674a
user: jmchilton
date: 2013-02-10 17:16:26
summary: Breakup big get_path_paste_uploaded_datasets function in library_common.py into smaller more extensible pieces. This is useful in implementing multiple file dataset uploads downstream but is also a good refactoring on its own.
affected #: 1 file
diff -r 506484344db3a370f8ae24096041d38557d1967e -r 8e00d2fd674acafb36e7118afaaf7f739235be2e lib/galaxy/webapps/galaxy/controllers/library_common.py
--- a/lib/galaxy/webapps/galaxy/controllers/library_common.py
+++ b/lib/galaxy/webapps/galaxy/controllers/library_common.py
@@ -1075,6 +1075,7 @@
return output
def make_library_uploaded_dataset( self, trans, cntrller, params, name, path, type, library_bunch, in_folder=None ):
link_data_only = params.get( 'link_data_only', 'copy_files' )
+ file_type = params.file_type
library_bunch.replace_dataset = None # not valid for these types of upload
uploaded_dataset = util.bunch.Bunch()
new_name = name
@@ -1089,7 +1090,7 @@
uploaded_dataset.path = path
uploaded_dataset.type = type
uploaded_dataset.ext = None
- uploaded_dataset.file_type = params.file_type
+ uploaded_dataset.file_type = file_type
uploaded_dataset.dbkey = params.dbkey
uploaded_dataset.space_to_tab = params.space_to_tab
if in_folder:
@@ -1145,44 +1146,57 @@
uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) )
return uploaded_datasets, 200, None
def get_path_paste_uploaded_datasets( self, trans, cntrller, params, library_bunch, response_code, message ):
+ preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) )
+ uploaded_datasets = []
+ (files_and_folders, _response_code, _message) = self._get_path_files_and_folders(params, preserve_dirs)
+ if _response_code:
+ return (uploaded_datasets, _response_code, _message)
+ for (path, name, folder) in files_and_folders:
+ uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, path, 'path_paste', library_bunch, folder ) )
+ return uploaded_datasets, 200, None
+
+ def _get_path_files_and_folders( self, params, preserve_dirs ):
+ problem_response = self._check_path_paste_params( params )
+ if problem_response:
+ return problem_response
+ files_and_folders = []
+ for (line, path) in self._paths_list( params ):
+ line_files_and_folders = self._get_single_path_files_and_folders( line, path, preserve_dirs )
+ files_and_folders.extend( line_files_and_folders )
+ return files_and_folders, None, None
+
+ def _get_single_path_files_and_folders(self, line, path, preserve_dirs):
+ files_and_folders = []
+ if os.path.isfile( path ):
+ name = os.path.basename( path )
+ files_and_folders.append((path, name, None))
+ for basedir, dirs, files in os.walk( line ):
+ for file in files:
+ file_path = os.path.abspath( os.path.join( basedir, file ) )
+ if preserve_dirs:
+ in_folder = os.path.dirname( file_path.replace( path, '', 1 ).lstrip( '/' ) )
+ else:
+ in_folder = None
+ files_and_folders.append((file_path, file, in_folder))
+ return files_and_folders
+ def _paths_list(self, params):
+ return [ (l.strip(), os.path.abspath(l.strip())) for l in params.filesystem_paths.splitlines() if l.strip() ]
+
+ def _check_path_paste_params(self, params):
if params.get( 'filesystem_paths', '' ) == '':
message = "No paths entered in the upload form"
response_code = 400
return None, response_code, message
- preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) )
- # locate files
bad_paths = []
- uploaded_datasets = []
- for line in [ l.strip() for l in params.filesystem_paths.splitlines() if l.strip() ]:
- path = os.path.abspath( line )
+ for (_, path) in self._paths_list( params ):
if not os.path.exists( path ):
bad_paths.append( path )
- continue
- # don't bother processing if we're just going to return an error
- if not bad_paths:
- if os.path.isfile( path ):
- name = os.path.basename( path )
- uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, path, 'path_paste', library_bunch ) )
- for basedir, dirs, files in os.walk( line ):
- for file in files:
- file_path = os.path.abspath( os.path.join( basedir, file ) )
- if preserve_dirs:
- in_folder = os.path.dirname( file_path.replace( path, '', 1 ).lstrip( '/' ) )
- else:
- in_folder = None
- uploaded_datasets.append( self.make_library_uploaded_dataset( trans,
- cntrller,
- params,
- file,
- file_path,
- 'path_paste',
- library_bunch,
- in_folder ) )
if bad_paths:
message = "Invalid paths:<br><ul><li>%s</li></ul>" % "</li><li>".join( bad_paths )
response_code = 400
return None, response_code, message
- return uploaded_datasets, 200, None
+ return None
+
@web.expose
def add_history_datasets_to_library( self, trans, cntrller, library_id, folder_id, hda_ids='', **kwd ):
params = util.Params( kwd )
https://bitbucket.org/galaxy/galaxy-central/commits/c85bf30a5c35/
changeset: c85bf30a5c35
user: jmchilton
date: 2013-02-10 17:16:27
summary: Breakup big get_server_dir_uploaded_datasets function in library_common.py into smaller more extensible pieces. This is useful in implementing multiple file dataset uploads downstream but is also a good refactoring on its own.
affected #: 1 file
diff -r 8e00d2fd674acafb36e7118afaaf7f739235be2e -r c85bf30a5c35a7fab80d9499b0852eeb2e433039 lib/galaxy/webapps/galaxy/controllers/library_common.py
--- a/lib/galaxy/webapps/galaxy/controllers/library_common.py
+++ b/lib/galaxy/webapps/galaxy/controllers/library_common.py
@@ -1106,6 +1106,16 @@
trans.sa_session.flush()
return uploaded_dataset
def get_server_dir_uploaded_datasets( self, trans, cntrller, params, full_dir, import_dir_desc, library_bunch, response_code, message ):
+ dir_response = self._get_server_dir_files(params, full_dir, import_dir_desc)
+ files = dir_response[0]
+ if not files:
+ return dir_response
+ uploaded_datasets = []
+ for file in files:
+ name = os.path.basename( file )
+ uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) )
+ return uploaded_datasets, 200, None
+ def _get_server_dir_files( self, params, full_dir, import_dir_desc ):
files = []
try:
for entry in os.listdir( full_dir ):
@@ -1140,11 +1150,7 @@
message = "The directory '%s' contains no valid files" % full_dir
response_code = 400
return None, response_code, message
- uploaded_datasets = []
- for file in files:
- name = os.path.basename( file )
- uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) )
- return uploaded_datasets, 200, None
+ return files, None, None
def get_path_paste_uploaded_datasets( self, trans, cntrller, params, library_bunch, response_code, message ):
preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) )
uploaded_datasets = []
https://bitbucket.org/galaxy/galaxy-central/commits/e1e8ddf3401a/
changeset: e1e8ddf3401a
user: dannon
date: 2013-03-08 20:32:34
summary: Merge pull request 108, https://bitbucket.org/galaxy/galaxy-central/pull-request/108/collaborate-on…
affected #: 1 file
diff -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b -r e1e8ddf3401ab1e028fbebd9fa8f4307b8d3ae03 lib/galaxy/webapps/galaxy/controllers/library_common.py
--- a/lib/galaxy/webapps/galaxy/controllers/library_common.py
+++ b/lib/galaxy/webapps/galaxy/controllers/library_common.py
@@ -1077,6 +1077,7 @@
def make_library_uploaded_dataset( self, trans, cntrller, params, name, path, type, library_bunch, in_folder=None ):
link_data_only = params.get( 'link_data_only', 'copy_files' )
uuid_str = params.get( 'uuid', None )
+ file_type = params.file_type
library_bunch.replace_dataset = None # not valid for these types of upload
uploaded_dataset = util.bunch.Bunch()
new_name = name
@@ -1091,7 +1092,7 @@
uploaded_dataset.path = path
uploaded_dataset.type = type
uploaded_dataset.ext = None
- uploaded_dataset.file_type = params.file_type
+ uploaded_dataset.file_type = file_type
uploaded_dataset.dbkey = params.dbkey
uploaded_dataset.space_to_tab = params.space_to_tab
if in_folder:
@@ -1108,6 +1109,16 @@
trans.sa_session.flush()
return uploaded_dataset
def get_server_dir_uploaded_datasets( self, trans, cntrller, params, full_dir, import_dir_desc, library_bunch, response_code, message ):
+ dir_response = self._get_server_dir_files(params, full_dir, import_dir_desc)
+ files = dir_response[0]
+ if not files:
+ return dir_response
+ uploaded_datasets = []
+ for file in files:
+ name = os.path.basename( file )
+ uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) )
+ return uploaded_datasets, 200, None
+ def _get_server_dir_files( self, params, full_dir, import_dir_desc ):
files = []
try:
for entry in os.listdir( full_dir ):
@@ -1142,50 +1153,59 @@
message = "The directory '%s' contains no valid files" % full_dir
response_code = 400
return None, response_code, message
+ return files, None, None
+ def get_path_paste_uploaded_datasets( self, trans, cntrller, params, library_bunch, response_code, message ):
+ preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) )
uploaded_datasets = []
- for file in files:
- name = os.path.basename( file )
- uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, file, 'server_dir', library_bunch ) )
+ (files_and_folders, _response_code, _message) = self._get_path_files_and_folders(params, preserve_dirs)
+ if _response_code:
+ return (uploaded_datasets, _response_code, _message)
+ for (path, name, folder) in files_and_folders:
+ uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, path, 'path_paste', library_bunch, folder ) )
return uploaded_datasets, 200, None
- def get_path_paste_uploaded_datasets( self, trans, cntrller, params, library_bunch, response_code, message ):
+
+ def _get_path_files_and_folders( self, params, preserve_dirs ):
+ problem_response = self._check_path_paste_params( params )
+ if problem_response:
+ return problem_response
+ files_and_folders = []
+ for (line, path) in self._paths_list( params ):
+ line_files_and_folders = self._get_single_path_files_and_folders( line, path, preserve_dirs )
+ files_and_folders.extend( line_files_and_folders )
+ return files_and_folders, None, None
+
+ def _get_single_path_files_and_folders(self, line, path, preserve_dirs):
+ files_and_folders = []
+ if os.path.isfile( path ):
+ name = os.path.basename( path )
+ files_and_folders.append((path, name, None))
+ for basedir, dirs, files in os.walk( line ):
+ for file in files:
+ file_path = os.path.abspath( os.path.join( basedir, file ) )
+ if preserve_dirs:
+ in_folder = os.path.dirname( file_path.replace( path, '', 1 ).lstrip( '/' ) )
+ else:
+ in_folder = None
+ files_and_folders.append((file_path, file, in_folder))
+ return files_and_folders
+ def _paths_list(self, params):
+ return [ (l.strip(), os.path.abspath(l.strip())) for l in params.filesystem_paths.splitlines() if l.strip() ]
+
+ def _check_path_paste_params(self, params):
if params.get( 'filesystem_paths', '' ) == '':
message = "No paths entered in the upload form"
response_code = 400
return None, response_code, message
- preserve_dirs = util.string_as_bool( params.get( 'preserve_dirs', False ) )
- # locate files
bad_paths = []
- uploaded_datasets = []
- for line in [ l.strip() for l in params.filesystem_paths.splitlines() if l.strip() ]:
- path = os.path.abspath( line )
+ for (_, path) in self._paths_list( params ):
if not os.path.exists( path ):
bad_paths.append( path )
- continue
- # don't bother processing if we're just going to return an error
- if not bad_paths:
- if os.path.isfile( path ):
- name = os.path.basename( path )
- uploaded_datasets.append( self.make_library_uploaded_dataset( trans, cntrller, params, name, path, 'path_paste', library_bunch ) )
- for basedir, dirs, files in os.walk( line ):
- for file in files:
- file_path = os.path.abspath( os.path.join( basedir, file ) )
- if preserve_dirs:
- in_folder = os.path.dirname( file_path.replace( path, '', 1 ).lstrip( '/' ) )
- else:
- in_folder = None
- uploaded_datasets.append( self.make_library_uploaded_dataset( trans,
- cntrller,
- params,
- file,
- file_path,
- 'path_paste',
- library_bunch,
- in_folder ) )
if bad_paths:
message = "Invalid paths:<br><ul><li>%s</li></ul>" % "</li><li>".join( bad_paths )
response_code = 400
return None, response_code, message
- return uploaded_datasets, 200, None
+ return None
+
@web.expose
def add_history_datasets_to_library( self, trans, cntrller, library_id, folder_id, hda_ids='', **kwd ):
params = util.Params( kwd )
https://bitbucket.org/galaxy/galaxy-central/commits/c937f80188d8/
changeset: c937f80188d8
user: dannon
date: 2013-03-08 20:41:10
summary: Merge
affected #: 3 files
diff -r e1e8ddf3401ab1e028fbebd9fa8f4307b8d3ae03 -r c937f80188d8026f2074f8a897e2ff2972d8da6d lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -38,7 +38,16 @@
'''Script that checks repositories to see if the tools contained within them have functional tests defined.'''
parser = OptionParser()
parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
- parser.add_option( "-v", "--verbose", action="store_true", dest="verbose", help="verbose mode, print the name, owner, and changeset revision of each repository", default=False )
+ parser.add_option(
+ "-v", "--verbose",
+ action="count", dest="verbosity",
+ default=1,
+ help="Control the amount of detail in the log output.")
+ parser.add_option(
+ "--verbosity", action="store", dest="verbosity",
+ metavar='VERBOSITY',
+ type="int", help="Control the amount of detail in the log output. --verbosity=1 is "
+ "the same as -v")
( options, args ) = parser.parse_args()
ini_file = args[0]
config_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} )
@@ -56,12 +65,12 @@
if options.info_only:
print "# Displaying info only ( --info_only )"
- if options.verbose:
- print "# Displaying extra information ( --verbose )"
+ if options.verbosity:
+ print "# Displaying extra information ( --verbosity = %d )" % options.verbosity
- check_and_flag_repositories( app, info_only=options.info_only, verbose=options.verbose )
+ check_and_flag_repositories( app, info_only=options.info_only, verbosity=options.verbosity )
-def check_and_flag_repositories( app, info_only=False, verbose=False ):
+def check_and_flag_repositories( app, info_only=False, verbosity=1 ):
'''
This method will iterate through all records in the repository_metadata table, checking each one for tool metadata,
then checking the tool metadata for tests.
@@ -97,10 +106,15 @@
and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision
not to be tested.
- If any error is encountered, the script will update the repository_metadata.tool_test_errors attribute with the following structure:
+ If any error is encountered, the script will update the repository_metadata.tool_test_errors attribute following this structure:
{
"test_environment":
{
+ "galaxy_revision": "9001:abcd1234",
+ "galaxy_database_version": "114",
+ "tool_shed_revision": "9001:abcd1234",
+ "tool_shed_mercurial_version": "2.3.1",
+ "tool_shed_database_version": "17",
"python_version": "2.7.2",
"architecture": "x86_64",
"system": "Darwin 12.2.0"
@@ -108,8 +122,28 @@
"test_errors":
[
{
- "test_id": "Something that will easily identify what the problem is",
- "stdout": "The output of the test, or a more detailed description of what was tested and why it failed."
+ "test_id": "The test ID, generated by twill",
+ "tool_id": "The tool ID that was tested",
+ "tool_version": "The tool version that was tested",
+ "stderr": "The output of the test, or a more detailed description of what was tested and what the error was."
+ "traceback": "The traceback, if any."
+ },
+ ]
+ "passed_tests":
+ [
+ {
+ "test_id": "The test ID, generated by twill",
+ "tool_id": "The tool ID that was tested",
+ "tool_version": "The tool version that was tested",
+ },
+ ]
+ "invalid_tests":
+ [
+ {
+ "tool_id": "The tool ID that does not have functional tests defined.",
+ "tool_version": "The version of the tool."
+ "tool_guid": "The guid of the tool."
+ "reason_test_is_invalid": "A short explanation of what is invalid.
},
]
}
@@ -129,19 +163,24 @@
repository_status[ 'test_environment' ][ 'tool_shed_database_version' ] = get_database_version( app )
repository_status[ 'test_environment' ][ 'tool_shed_mercurial_version' ] = __version__.version
repository_status[ 'test_environment' ][ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
- repository_status[ 'test_errors' ] = []
+ repository_status[ 'invalid_tests' ] = []
metadata_records_to_check = app.sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
app.model.RepositoryMetadata.table.c.do_not_test == False,
app.model.RepositoryMetadata.table.c.tools_functionally_correct == False ) ) \
.all()
for metadata_record in metadata_records_to_check:
- repository_status[ 'test_errors' ] = []
+ if metadata_record.tool_test_errors:
+ repository_status = metadata_record.tool_test_errors
name = metadata_record.repository.name
owner = metadata_record.repository.user.username
changeset_revision = str( metadata_record.changeset_revision )
+ repository_status[ 'invalid_tests' ] = []
if metadata_record.repository.id not in checked_repository_ids:
checked_repository_ids.append( metadata_record.repository.id )
+ if verbosity >= 1:
+ print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
+ ( changeset_revision, name, owner )
# If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
# only repositories that contain tools.
if 'tools' not in metadata_record.metadata:
@@ -152,19 +191,26 @@
for tool_metadata in metadata_record.metadata[ 'tools' ]:
tool_count += 1
tool_id = tool_metadata[ 'id' ]
- if verbose:
- print '# Checking for functional tests in changeset revision %s of %s, tool ID %s.' % \
- ( changeset_revision, name, tool_id )
+ tool_version = tool_metadata[ 'version' ]
+ tool_guid = tool_metadata[ 'guid' ]
+ if verbosity >= 2:
+ print "# Checking tool ID '%s' in changeset revision %s of %s." % \
+ ( tool_id, changeset_revision, name )
# If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
# not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
# automated functional test framework produces.
if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]:
- if verbose:
+ if verbosity >= 2:
print '# No functional tests defined for %s.' % tool_id
# The repository_metadata.tool_test_errors attribute should always have the following structure:
# {
# "test_environment":
# {
+ # "galaxy_revision": "9001:abcd1234",
+ # "galaxy_database_version": "114",
+ # "tool_shed_revision": "9001:abcd1234",
+ # "tool_shed_mercurial_version": "2.3.1",
+ # "tool_shed_database_version": "17",
# "python_version": "2.7.2",
# "architecture": "x86_64",
# "system": "Darwin 12.2.0"
@@ -172,22 +218,44 @@
# "test_errors":
# [
# {
- # "test_id": "Something that will easily identify what the problem is",
- # "stderr": "The output of the test, or a more detailed description of what was tested and why it failed."
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # "traceback": "The captured traceback."
+ # },
+ # ]
+ # "passed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # },
+ # ]
+ # "invalid_tests":
+ # [
+ # {
+ # "tool_id": "The tool ID that does not have functional tests defined.",
+ # "tool_version": "The version of the tool."
+ # "tool_guid": "The guid of the tool."
+ # "reason_test_is_invalid": "A short explanation of what is invalid.
# },
# ]
# }
- # Optionally, "stdout" and "traceback" may be included in a test_errors dict, if they are relevant.
- test_id = 'Functional tests for %s' % tool_id
- test_errors = dict( stderr='No functional tests defined for tool %s in changeset revision %s of repository %s owned by %s.' % \
- ( tool_id, changeset_revision, name, owner ) )
- repository_status[ 'test_errors' ].append( test_errors )
- repository_status[ 'status' ] = 'failed'
+ # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
+ # than the list relevant to what it is testing.
+ test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid )
+ repository_status[ 'invalid_tests' ].append( test_errors )
no_tests += 1
else:
+ if verbosity >= 2:
+ print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
+ ( tool_id, changeset_revision, name )
has_tests += 1
- if verbose:
- if not repository_status[ 'test_errors' ]:
+ if verbosity >= 1:
+ if not repository_status[ 'invalid_tests' ]:
print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
else:
print '# Some tools missing functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
@@ -211,12 +279,17 @@
if os.path.exists( work_dir ):
shutil.rmtree( work_dir )
if not has_test_data:
- if verbose:
+ if verbosity >= 1:
print '# Test data missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
# The repository_metadata.tool_test_errors attribute should always have the following structure:
# {
# "test_environment":
# {
+ # "galaxy_revision": "9001:abcd1234",
+ # "galaxy_database_version": "114",
+ # "tool_shed_revision": "9001:abcd1234",
+ # "tool_shed_mercurial_version": "2.3.1",
+ # "tool_shed_database_version": "17",
# "python_version": "2.7.2",
# "architecture": "x86_64",
# "system": "Darwin 12.2.0"
@@ -224,23 +297,44 @@
# "test_errors":
# [
# {
- # "test_id": "Something that will easily identify what the problem is",
- # "stderr": "The output of the test, or a more detailed description of what was tested and why it failed."
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # "traceback": "The captured traceback."
+ # },
+ # ]
+ # "passed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # },
+ # ]
+ # "invalid_tests":
+ # [
+ # {
+ # "tool_id": "The tool ID that does not have functional tests defined.",
+ # "tool_version": "The version of the tool."
+ # "tool_guid": "The guid of the tool."
+ # "reason_test_is_invalid": "A short explanation of what is invalid.
# },
# ]
# }
- # Optionally, "stdout" and "traceback" may be included in a test_errors dict, if they are relevant.
- test_id = 'Find functional test data for %s' % metadata_record.repository.name
- test_errors = dict( stderr='No test data found for changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
- repository_status[ 'test_errors' ].append( test_errors )
- repository_status[ 'status' ] = 'failed'
+ # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
+ # than the list relevant to what it is testing.
+ test_errors = dict( tool_id=None, tool_version=None, tool_guid=None,
+ reason_test_is_invalid="Repository %s is missing a test-data directory." % name )
+ repository_status[ 'invalid_tests' ].append( test_errors )
else:
- if verbose:
+ if verbosity >= 1:
print '# Test data found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
if not info_only:
# If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform
# on which this script was run.
- if repository_status[ 'test_errors' ]:
+ if repository_status[ 'invalid_tests' ]:
# If functional test definitions or test data are missing, set do_not_test = True if and only if:
# a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. In this case,
# the revision will never be updated with correct data, and re-testing it would be redundant.
@@ -250,8 +344,6 @@
if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ):
metadata_record.do_not_test = True
metadata_record.tools_functionally_correct = False
- else:
- repository_status[ 'status' ] = 'passed'
metadata_record.tool_test_errors = repository_status
metadata_record.time_last_tested = datetime.utcnow()
app.sa_session.add( metadata_record )
diff -r e1e8ddf3401ab1e028fbebd9fa8f4307b8d3ae03 -r c937f80188d8026f2074f8a897e2ff2972d8da6d test/base/util.py
--- a/test/base/util.py
+++ b/test/base/util.py
@@ -74,8 +74,8 @@
return None, repository_name, changeset_revision
return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision
-def get_test_environment():
- rval = {}
+def get_test_environment( current_environment={} ):
+ rval = current_environment
rval[ 'python_version' ] = platform.python_version()
rval[ 'architecture' ] = platform.machine()
os, hostname, os_version, uname, arch, processor = platform.uname()
diff -r e1e8ddf3401ab1e028fbebd9fa8f4307b8d3ae03 -r c937f80188d8026f2074f8a897e2ff2972d8da6d test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -60,6 +60,7 @@
import nose.config
import nose.loader
import nose.plugins.manager
+from nose.plugins import Plugin
from base.util import parse_tool_panel_config, get_database_version, get_test_environment, get_repository_current_revision
@@ -115,9 +116,36 @@
else:
galaxy_encode_secret = os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ]
+
+class ReportResults( Plugin ):
+ '''Simple Nose plugin to record the IDs of all tests run, regardless of success.'''
+ name = "reportresults"
+ passed = []
+
+ def options( self, parser, env=os.environ ):
+ super( ReportResults, self ).options( parser, env=env )
+
+ def configure(self, options, conf):
+ super( ReportResults, self ).configure( options, conf )
+ if not self.enabled:
+ return
+
+ def addSuccess( self, test ):
+ '''Only record test IDs that correspond to tool functional tests.'''
+ if 'TestForTool' in test.id():
+ test_id = test.id()
+ # Rearrange the test ID to match the format that is produced in test_results.failures
+ test_id_parts = test_id.split( '.' )
+ fixed_test_id = '%s (%s)' % ( test_id_parts[ -1 ], '.'.join( test_id_parts[ :-1 ] ) )
+ self.passed.append( fixed_test_id )
+
+ def getTestStatus( self ):
+ return self.passed
+
def execute_uninstall_method( repository_dict ):
# Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them
- # and try to re-run the tests after uninstalling the repository.
+ # and try to re-run the tests after uninstalling the repository, which will cause false failure reports,
+ # since the test data has been deleted from disk by now.
tests_to_delete = []
for key in test_toolbox.__dict__:
if key.startswith( 'TestForTool_' ):
@@ -132,7 +160,7 @@
test_config.configure( sys.argv )
# Run the uninstall method. This method uses the Galaxy web interface to uninstall the previously installed
# repository and delete it from disk.
- result = run_tests( test_config )
+ result, _ = run_tests( test_config )
success = result.wasSuccessful()
return success
@@ -191,6 +219,23 @@
else:
raise AssertonError( 'Unknown format %s.' % format )
+def get_tool_info_from_test_id( test_id ):
+ '''
+ Test IDs come in the form test_tool_number (functional.test_toolbox.TestForTool_toolshed_url/repos/owner/repository_name/tool_id/tool_version)
+ We want the tool ID and tool version.
+ '''
+ parts = test_id.replace( ')', '' ).split( '/' )
+ tool_version = parts[ -1 ]
+ tool_id = parts[ -2 ]
+ return tool_id, tool_version
+
+def get_tool_test_errors_from_api( tool_shed_url, metadata_dict ):
+ params = dict()
+ api_path = metadata_dict[ 'url' ].split( '/' )
+ api_url = get_api_url( base=tool_shed_url, parts=api_path )
+ repository_metadata = json_from_url( api_url )
+ return repository_metadata[ 'tool_test_errors' ]
+
def json_from_url( url ):
url_handle = urllib.urlopen( url )
url_contents = url_handle.read()
@@ -201,16 +246,15 @@
if tests_passed:
params[ 'tools_functionally_correct' ] = 'true'
params[ 'do_not_test' ] = 'true'
- test_results_dict[ 'status' ] = 'passed'
else:
params[ 'tools_functionally_correct' ] = 'false'
params[ 'do_not_test' ] = 'true'
- test_results_dict[ 'status' ] = 'failed'
params[ 'tool_test_errors' ] = test_results_dict
return update( tool_shed_api_key, '%s' % ( url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False )
def run_tests( test_config ):
loader = nose.loader.TestLoader( config=test_config )
+ test_config.plugins.addPlugin( ReportResults() )
plug_loader = test_config.plugins.prepareTestLoader( loader )
if plug_loader is not None:
loader = plug_loader
@@ -221,7 +265,8 @@
plug_runner = test_config.plugins.prepareTestRunner( test_runner )
if plug_runner is not None:
test_runner = plug_runner
- return test_runner.run( tests )
+ result = test_runner.run( tests )
+ return result, test_config.plugins._plugins
def main():
# ---- Configuration ------------------------------------------------------
@@ -391,10 +436,6 @@
log.info( "The embedded Galaxy application is running on %s:%s" % ( galaxy_test_host, galaxy_test_port ) )
log.info( "Repositories will be installed from the tool shed at %s" % galaxy_tool_shed_url )
success = False
- repository_status = dict()
- test_environment = get_test_environment()
- test_environment[ 'galaxy_database_version' ] = get_database_version( app )
- test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() )
try:
# Iterate through a list of repository info dicts.
log.info( "Retrieving repositories to install from the URL:\n%s\n" % str( galaxy_tool_shed_url ) )
@@ -472,7 +513,7 @@
test_config.configure( sys.argv )
# Run the configured install method as a test. This method uses the embedded Galaxy application's web interface to install the specified
# repository, with tool and repository dependencies also selected for installation.
- result = run_tests( test_config )
+ result, _ = run_tests( test_config )
success = result.wasSuccessful()
# If the installation succeeds, configure and run functional tests for this repository. This is equivalent to
# sh run_functional_tests.sh -installed
@@ -483,14 +524,63 @@
# and the tools_functionally_correct flag to False, as well as updating tool_test_errors.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( dict() ) )
has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file, from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
+ # The repository_status dict should always have the following structure:
+ # {
+ # "test_environment":
+ # {
+ # "galaxy_revision": "9001:abcd1234",
+ # "galaxy_database_version": "114",
+ # "tool_shed_revision": "9001:abcd1234",
+ # "tool_shed_mercurial_version": "2.3.1",
+ # "tool_shed_database_version": "17",
+ # "python_version": "2.7.2",
+ # "architecture": "x86_64",
+ # "system": "Darwin 12.2.0"
+ # },
+ # "test_errors":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # "traceback": "The captured traceback."
+ # },
+ # ]
+ # "passed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # },
+ # ]
+ # "invalid_tests":
+ # [
+ # {
+ # "tool_id": "The tool ID that does not have functional tests defined.",
+ # "tool_version": "The version of the tool."
+ # "tool_guid": "The guid of the tool."
+ # "reason_test_is_invalid": "A short explanation of what is invalid.
+ # },
+ # ]
+ # }
+ repository_status = get_tool_test_errors_from_api( galaxy_tool_shed_url, repository_dict )
+ test_environment = get_test_environment( repository_status[ 'test_environment' ] )
+ test_environment[ 'galaxy_database_version' ] = get_database_version( app )
+ test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() )
+ repository_status[ 'test_environment' ] = test_environment
+ repository_status[ 'tests_passed' ] = []
+ repository_status[ 'test_errors' ] = []
if not has_test_data:
log.error( 'Test data is missing for this repository. Updating repository and skipping functional tests.' )
repository_status[ 'test_environment' ] = get_test_environment()
test_id = 'Find functional test data for %s' % repository_dict[ 'name' ]
- test_errors = dict( test_id=test_id,
- stdout='No test data found for changeset revision %s of repository %s owned by %s.' % \
- ( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ], repository_dict[ 'owner' ] ) )
- repository_status[ 'test_errors' ] = [ test_errors ]
+ # Record the lack of test data.
+ test_errors = dict( tool_id=None, tool_version=None, tool_guid=None,
+ reason_test_is_invalid="Repository %s is missing a test-data directory." % name )
+ repository_status[ 'invalid_tests' ].append( test_errors )
# Record the status of this repository in the tool shed.
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
@@ -518,12 +608,20 @@
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
# Run the configured tests.
- result = run_tests( test_config )
+ result, test_plugins = run_tests( test_config )
success = result.wasSuccessful()
# Record some information about the environment in which this test was run, in case a failure is specific to a certain processor
# architecture or operating system.
repository_dict[ 'test_environment' ] = test_environment
- test_errors = []
+ for plugin in test_plugins:
+ if hasattr( plugin, 'getTestStatus' ):
+ tests_passed = plugin.getTestStatus()
+ break
+ repository_status[ 'tests_passed' ] = []
+ for test_id in tests_passed:
+ tool_id, tool_version = get_tool_info_from_test_id( test_id )
+ test_result = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version )
+ repository_status[ 'tests_passed' ].append( test_result )
if success:
# This repository's tools passed all functional tests. Update the repository_metadata table in the tool shed's database
# to reflect that. Call the register_test_result method, which executes a PUT request to the repository_revisions API
@@ -531,15 +629,16 @@
# updates the time_last_tested field to today's date.
repositories_passed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
repository_status[ 'test_environment' ] = test_environment
- repository_status[ 'test_errors' ] = []
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=True )
log.debug( 'Revision %s of repository %s installed and passed functional tests.' % \
( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ] ) )
else:
# If the functional tests fail, log the output and update the failed changeset revision's metadata record in the tool shed via the API.
for failure in result.failures:
- # Record the twill test identifier, so the repository owner can discover which test is failing.
- test_status = dict( test_id=str( failure[0] ) )
+ # Record the twill test identifier and information about the tool, so the repository owner can discover which test is failing.
+ test_id = str( failure[0] )
+ tool_id, tool_version = get_tool_info_from_test_id( test_id )
+ test_status = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version )
log_output = failure[1].replace( '\\n', '\n' )
# Remove debug output that the reviewer or owner doesn't need.
log_output = re.sub( r'control \d+:.+', r'', log_output )
@@ -547,7 +646,7 @@
appending_to = 'output'
tmp_output = {}
output = {}
- # Iterate through the functional test output and extract only the important data. Captured logging is not recorded.
+ # Iterate through the functional test output and extract only the important data. Captured logging and stdout are not recorded.
for line in log_output.split( '\n' ):
if line.startswith( 'Traceback' ):
appending_to = 'traceback'
@@ -567,17 +666,15 @@
if appending_to not in tmp_output:
tmp_output[ appending_to ] = []
tmp_output[ appending_to ].append( line )
- for output_type in [ 'stderr', 'stdout', 'traceback' ]:
+ for output_type in [ 'stderr', 'traceback' ]:
if output_type in tmp_output:
test_status[ output_type ] = '\n'.join( tmp_output[ output_type ] )
- test_errors.append( test_status )
- if test_errors:
- # Only update test_errors for this repository if it's not empty.
- repository_status[ 'test_environment' ] = test_environment
- repository_status[ 'test_errors' ] = test_errors
+ repository_status[ 'test_errors' ].append( test_status )
+ log.debug( to_json_string( repository_status, indent=2, sort_keys=True ) )
# Call the register_test_result method, which executes a PUT request to the repository_revisions API controller with the outcome
- # status of the tests, and updates tool_test_errors with the relevant log data.
- # This also sets the do_not_test and tools_functionally correct flags, and updates the time_last_tested field to today's date.
+ # of the tests, and updates tool_test_errors with the relevant log data.
+ # This also sets the do_not_test and tools_functionally correct flags to the appropriate values, and updates the time_last_tested
+ # field to today's date.
repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
log.debug( 'Revision %s of repository %s installed successfully, but did not pass functional tests.' % \
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: inithello: Added even more information to the tool_test_errors dict. Separated failed and passed tests in the metadata record. Added documentation.
by commits-noreply@bitbucket.org 08 Mar '13
by commits-noreply@bitbucket.org 08 Mar '13
08 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/b98890b116c0/
changeset: b98890b116c0
user: inithello
date: 2013-03-08 20:25:06
summary: Added even more information to the tool_test_errors dict. Separated failed and passed tests in the metadata record. Added documentation.
affected #: 3 files
diff -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b -r b98890b116c09a286d989b4b41595c0cf10d9c9d lib/tool_shed/scripts/check_repositories_for_functional_tests.py
--- a/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
+++ b/lib/tool_shed/scripts/check_repositories_for_functional_tests.py
@@ -38,7 +38,16 @@
'''Script that checks repositories to see if the tools contained within them have functional tests defined.'''
parser = OptionParser()
parser.add_option( "-i", "--info_only", action="store_true", dest="info_only", help="info about the requested action", default=False )
- parser.add_option( "-v", "--verbose", action="store_true", dest="verbose", help="verbose mode, print the name, owner, and changeset revision of each repository", default=False )
+ parser.add_option(
+ "-v", "--verbose",
+ action="count", dest="verbosity",
+ default=1,
+ help="Control the amount of detail in the log output.")
+ parser.add_option(
+ "--verbosity", action="store", dest="verbosity",
+ metavar='VERBOSITY',
+ type="int", help="Control the amount of detail in the log output. --verbosity=1 is "
+ "the same as -v")
( options, args ) = parser.parse_args()
ini_file = args[0]
config_parser = ConfigParser.ConfigParser( {'here':os.getcwd()} )
@@ -56,12 +65,12 @@
if options.info_only:
print "# Displaying info only ( --info_only )"
- if options.verbose:
- print "# Displaying extra information ( --verbose )"
+ if options.verbosity:
+ print "# Displaying extra information ( --verbosity = %d )" % options.verbosity
- check_and_flag_repositories( app, info_only=options.info_only, verbose=options.verbose )
+ check_and_flag_repositories( app, info_only=options.info_only, verbosity=options.verbosity )
-def check_and_flag_repositories( app, info_only=False, verbose=False ):
+def check_and_flag_repositories( app, info_only=False, verbosity=1 ):
'''
This method will iterate through all records in the repository_metadata table, checking each one for tool metadata,
then checking the tool metadata for tests.
@@ -97,10 +106,15 @@
and test repositories script to process. If the tested changeset revision does not have a test-data directory, this script will also mark the revision
not to be tested.
- If any error is encountered, the script will update the repository_metadata.tool_test_errors attribute with the following structure:
+ If any error is encountered, the script will update the repository_metadata.tool_test_errors attribute following this structure:
{
"test_environment":
{
+ "galaxy_revision": "9001:abcd1234",
+ "galaxy_database_version": "114",
+ "tool_shed_revision": "9001:abcd1234",
+ "tool_shed_mercurial_version": "2.3.1",
+ "tool_shed_database_version": "17",
"python_version": "2.7.2",
"architecture": "x86_64",
"system": "Darwin 12.2.0"
@@ -108,8 +122,28 @@
"test_errors":
[
{
- "test_id": "Something that will easily identify what the problem is",
- "stdout": "The output of the test, or a more detailed description of what was tested and why it failed."
+ "test_id": "The test ID, generated by twill",
+ "tool_id": "The tool ID that was tested",
+ "tool_version": "The tool version that was tested",
+ "stderr": "The output of the test, or a more detailed description of what was tested and what the error was."
+ "traceback": "The traceback, if any."
+ },
+ ]
+ "passed_tests":
+ [
+ {
+ "test_id": "The test ID, generated by twill",
+ "tool_id": "The tool ID that was tested",
+ "tool_version": "The tool version that was tested",
+ },
+ ]
+ "invalid_tests":
+ [
+ {
+ "tool_id": "The tool ID that does not have functional tests defined.",
+ "tool_version": "The version of the tool."
+ "tool_guid": "The guid of the tool."
+ "reason_test_is_invalid": "A short explanation of what is invalid.
},
]
}
@@ -129,19 +163,24 @@
repository_status[ 'test_environment' ][ 'tool_shed_database_version' ] = get_database_version( app )
repository_status[ 'test_environment' ][ 'tool_shed_mercurial_version' ] = __version__.version
repository_status[ 'test_environment' ][ 'tool_shed_revision' ] = get_repository_current_revision( os.getcwd() )
- repository_status[ 'test_errors' ] = []
+ repository_status[ 'invalid_tests' ] = []
metadata_records_to_check = app.sa_session.query( app.model.RepositoryMetadata ) \
.filter( and_( app.model.RepositoryMetadata.table.c.downloadable == True,
app.model.RepositoryMetadata.table.c.do_not_test == False,
app.model.RepositoryMetadata.table.c.tools_functionally_correct == False ) ) \
.all()
for metadata_record in metadata_records_to_check:
- repository_status[ 'test_errors' ] = []
+ if metadata_record.tool_test_errors:
+ repository_status = metadata_record.tool_test_errors
name = metadata_record.repository.name
owner = metadata_record.repository.user.username
changeset_revision = str( metadata_record.changeset_revision )
+ repository_status[ 'invalid_tests' ] = []
if metadata_record.repository.id not in checked_repository_ids:
checked_repository_ids.append( metadata_record.repository.id )
+ if verbosity >= 1:
+ print '# Checking for functional tests in changeset revision %s of %s, owned by %s.' % \
+ ( changeset_revision, name, owner )
# If this changeset revision has no tools, we don't need to do anything here, the install and test script has a filter for returning
# only repositories that contain tools.
if 'tools' not in metadata_record.metadata:
@@ -152,19 +191,26 @@
for tool_metadata in metadata_record.metadata[ 'tools' ]:
tool_count += 1
tool_id = tool_metadata[ 'id' ]
- if verbose:
- print '# Checking for functional tests in changeset revision %s of %s, tool ID %s.' % \
- ( changeset_revision, name, tool_id )
+ tool_version = tool_metadata[ 'version' ]
+ tool_guid = tool_metadata[ 'guid' ]
+ if verbosity >= 2:
+ print "# Checking tool ID '%s' in changeset revision %s of %s." % \
+ ( tool_id, changeset_revision, name )
# If there are no tests, this tool should not be tested, since the tool functional tests only report failure if the test itself fails,
# not if it's missing or undefined. Filtering out those repositories at this step will reduce the number of "false negatives" the
# automated functional test framework produces.
if 'tests' not in tool_metadata or not tool_metadata[ 'tests' ]:
- if verbose:
+ if verbosity >= 2:
print '# No functional tests defined for %s.' % tool_id
# The repository_metadata.tool_test_errors attribute should always have the following structure:
# {
# "test_environment":
# {
+ # "galaxy_revision": "9001:abcd1234",
+ # "galaxy_database_version": "114",
+ # "tool_shed_revision": "9001:abcd1234",
+ # "tool_shed_mercurial_version": "2.3.1",
+ # "tool_shed_database_version": "17",
# "python_version": "2.7.2",
# "architecture": "x86_64",
# "system": "Darwin 12.2.0"
@@ -172,22 +218,44 @@
# "test_errors":
# [
# {
- # "test_id": "Something that will easily identify what the problem is",
- # "stderr": "The output of the test, or a more detailed description of what was tested and why it failed."
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # "traceback": "The captured traceback."
+ # },
+ # ]
+ # "passed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # },
+ # ]
+ # "invalid_tests":
+ # [
+ # {
+ # "tool_id": "The tool ID that does not have functional tests defined.",
+ # "tool_version": "The version of the tool."
+ # "tool_guid": "The guid of the tool."
+ # "reason_test_is_invalid": "A short explanation of what is invalid.
# },
# ]
# }
- # Optionally, "stdout" and "traceback" may be included in a test_errors dict, if they are relevant.
- test_id = 'Functional tests for %s' % tool_id
- test_errors = dict( stderr='No functional tests defined for tool %s in changeset revision %s of repository %s owned by %s.' % \
- ( tool_id, changeset_revision, name, owner ) )
- repository_status[ 'test_errors' ].append( test_errors )
- repository_status[ 'status' ] = 'failed'
+ # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
+ # than the list relevant to what it is testing.
+ test_errors = dict( tool_id=tool_id, tool_version=tool_version, tool_guid=tool_guid )
+ repository_status[ 'invalid_tests' ].append( test_errors )
no_tests += 1
else:
+ if verbosity >= 2:
+ print "# Tool ID '%s' in changeset revision %s of %s has one or more valid functional tests defined." % \
+ ( tool_id, changeset_revision, name )
has_tests += 1
- if verbose:
- if not repository_status[ 'test_errors' ]:
+ if verbosity >= 1:
+ if not repository_status[ 'invalid_tests' ]:
print '# All tools have functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
else:
print '# Some tools missing functional tests in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
@@ -211,12 +279,17 @@
if os.path.exists( work_dir ):
shutil.rmtree( work_dir )
if not has_test_data:
- if verbose:
+ if verbosity >= 1:
print '# Test data missing in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
# The repository_metadata.tool_test_errors attribute should always have the following structure:
# {
# "test_environment":
# {
+ # "galaxy_revision": "9001:abcd1234",
+ # "galaxy_database_version": "114",
+ # "tool_shed_revision": "9001:abcd1234",
+ # "tool_shed_mercurial_version": "2.3.1",
+ # "tool_shed_database_version": "17",
# "python_version": "2.7.2",
# "architecture": "x86_64",
# "system": "Darwin 12.2.0"
@@ -224,23 +297,44 @@
# "test_errors":
# [
# {
- # "test_id": "Something that will easily identify what the problem is",
- # "stderr": "The output of the test, or a more detailed description of what was tested and why it failed."
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # "traceback": "The captured traceback."
+ # },
+ # ]
+ # "passed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # },
+ # ]
+ # "invalid_tests":
+ # [
+ # {
+ # "tool_id": "The tool ID that does not have functional tests defined.",
+ # "tool_version": "The version of the tool."
+ # "tool_guid": "The guid of the tool."
+ # "reason_test_is_invalid": "A short explanation of what is invalid.
# },
# ]
# }
- # Optionally, "stdout" and "traceback" may be included in a test_errors dict, if they are relevant.
- test_id = 'Find functional test data for %s' % metadata_record.repository.name
- test_errors = dict( stderr='No test data found for changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner ) )
- repository_status[ 'test_errors' ].append( test_errors )
- repository_status[ 'status' ] = 'failed'
+ # Optionally, "traceback" may be included in a test_errors dict, if it is relevant. No script should overwrite anything other
+ # than the list relevant to what it is testing.
+ test_errors = dict( tool_id=None, tool_version=None, tool_guid=None,
+ reason_test_is_invalid="Repository %s is missing a test-data directory." % name )
+ repository_status[ 'invalid_tests' ].append( test_errors )
else:
- if verbose:
+ if verbosity >= 1:
print '# Test data found in changeset revision %s of repository %s owned by %s.' % ( changeset_revision, name, owner )
if not info_only:
# If repository_status[ 'test_errors' ] is empty, no issues were found, and we can just update time_last_tested with the platform
# on which this script was run.
- if repository_status[ 'test_errors' ]:
+ if repository_status[ 'invalid_tests' ]:
# If functional test definitions or test data are missing, set do_not_test = True if and only if:
# a) There are multiple downloadable revisions, and the revision being tested is not the most recent downloadable revision. In this case,
# the revision will never be updated with correct data, and re-testing it would be redundant.
@@ -250,8 +344,6 @@
if should_set_do_not_test_flag( app, metadata_record.repository, changeset_revision ):
metadata_record.do_not_test = True
metadata_record.tools_functionally_correct = False
- else:
- repository_status[ 'status' ] = 'passed'
metadata_record.tool_test_errors = repository_status
metadata_record.time_last_tested = datetime.utcnow()
app.sa_session.add( metadata_record )
diff -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b -r b98890b116c09a286d989b4b41595c0cf10d9c9d test/base/util.py
--- a/test/base/util.py
+++ b/test/base/util.py
@@ -74,8 +74,8 @@
return None, repository_name, changeset_revision
return last_galaxy_test_file_dir, last_tested_repository_name, last_tested_changeset_revision
-def get_test_environment():
- rval = {}
+def get_test_environment( current_environment={} ):
+ rval = current_environment
rval[ 'python_version' ] = platform.python_version()
rval[ 'architecture' ] = platform.machine()
os, hostname, os_version, uname, arch, processor = platform.uname()
diff -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b -r b98890b116c09a286d989b4b41595c0cf10d9c9d test/install_and_test_tool_shed_repositories/functional_tests.py
--- a/test/install_and_test_tool_shed_repositories/functional_tests.py
+++ b/test/install_and_test_tool_shed_repositories/functional_tests.py
@@ -60,6 +60,7 @@
import nose.config
import nose.loader
import nose.plugins.manager
+from nose.plugins import Plugin
from base.util import parse_tool_panel_config, get_database_version, get_test_environment, get_repository_current_revision
@@ -115,9 +116,36 @@
else:
galaxy_encode_secret = os.environ[ 'GALAXY_INSTALL_TEST_SECRET' ]
+
+class ReportResults( Plugin ):
+ '''Simple Nose plugin to record the IDs of all tests run, regardless of success.'''
+ name = "reportresults"
+ passed = []
+
+ def options( self, parser, env=os.environ ):
+ super( ReportResults, self ).options( parser, env=env )
+
+ def configure(self, options, conf):
+ super( ReportResults, self ).configure( options, conf )
+ if not self.enabled:
+ return
+
+ def addSuccess( self, test ):
+ '''Only record test IDs that correspond to tool functional tests.'''
+ if 'TestForTool' in test.id():
+ test_id = test.id()
+ # Rearrange the test ID to match the format that is produced in test_results.failures
+ test_id_parts = test_id.split( '.' )
+ fixed_test_id = '%s (%s)' % ( test_id_parts[ -1 ], '.'.join( test_id_parts[ :-1 ] ) )
+ self.passed.append( fixed_test_id )
+
+ def getTestStatus( self ):
+ return self.passed
+
def execute_uninstall_method( repository_dict ):
# Delete any configured tool functional tests from the test_toolbox.__dict__, otherwise nose will find them
- # and try to re-run the tests after uninstalling the repository.
+ # and try to re-run the tests after uninstalling the repository, which will cause false failure reports,
+ # since the test data has been deleted from disk by now.
tests_to_delete = []
for key in test_toolbox.__dict__:
if key.startswith( 'TestForTool_' ):
@@ -132,7 +160,7 @@
test_config.configure( sys.argv )
# Run the uninstall method. This method uses the Galaxy web interface to uninstall the previously installed
# repository and delete it from disk.
- result = run_tests( test_config )
+ result, _ = run_tests( test_config )
success = result.wasSuccessful()
return success
@@ -191,6 +219,23 @@
else:
raise AssertonError( 'Unknown format %s.' % format )
+def get_tool_info_from_test_id( test_id ):
+ '''
+ Test IDs come in the form test_tool_number (functional.test_toolbox.TestForTool_toolshed_url/repos/owner/repository_name/tool_id/tool_version)
+ We want the tool ID and tool version.
+ '''
+ parts = test_id.replace( ')', '' ).split( '/' )
+ tool_version = parts[ -1 ]
+ tool_id = parts[ -2 ]
+ return tool_id, tool_version
+
+def get_tool_test_errors_from_api( tool_shed_url, metadata_dict ):
+ params = dict()
+ api_path = metadata_dict[ 'url' ].split( '/' )
+ api_url = get_api_url( base=tool_shed_url, parts=api_path )
+ repository_metadata = json_from_url( api_url )
+ return repository_metadata[ 'tool_test_errors' ]
+
def json_from_url( url ):
url_handle = urllib.urlopen( url )
url_contents = url_handle.read()
@@ -201,16 +246,15 @@
if tests_passed:
params[ 'tools_functionally_correct' ] = 'true'
params[ 'do_not_test' ] = 'true'
- test_results_dict[ 'status' ] = 'passed'
else:
params[ 'tools_functionally_correct' ] = 'false'
params[ 'do_not_test' ] = 'true'
- test_results_dict[ 'status' ] = 'failed'
params[ 'tool_test_errors' ] = test_results_dict
return update( tool_shed_api_key, '%s' % ( url_join( galaxy_tool_shed_url, 'api', 'repository_revisions', metadata_id ) ), params, return_formatted=False )
def run_tests( test_config ):
loader = nose.loader.TestLoader( config=test_config )
+ test_config.plugins.addPlugin( ReportResults() )
plug_loader = test_config.plugins.prepareTestLoader( loader )
if plug_loader is not None:
loader = plug_loader
@@ -221,7 +265,8 @@
plug_runner = test_config.plugins.prepareTestRunner( test_runner )
if plug_runner is not None:
test_runner = plug_runner
- return test_runner.run( tests )
+ result = test_runner.run( tests )
+ return result, test_config.plugins._plugins
def main():
# ---- Configuration ------------------------------------------------------
@@ -391,10 +436,6 @@
log.info( "The embedded Galaxy application is running on %s:%s" % ( galaxy_test_host, galaxy_test_port ) )
log.info( "Repositories will be installed from the tool shed at %s" % galaxy_tool_shed_url )
success = False
- repository_status = dict()
- test_environment = get_test_environment()
- test_environment[ 'galaxy_database_version' ] = get_database_version( app )
- test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() )
try:
# Iterate through a list of repository info dicts.
log.info( "Retrieving repositories to install from the URL:\n%s\n" % str( galaxy_tool_shed_url ) )
@@ -472,7 +513,7 @@
test_config.configure( sys.argv )
# Run the configured install method as a test. This method uses the embedded Galaxy application's web interface to install the specified
# repository, with tool and repository dependencies also selected for installation.
- result = run_tests( test_config )
+ result, _ = run_tests( test_config )
success = result.wasSuccessful()
# If the installation succeeds, configure and run functional tests for this repository. This is equivalent to
# sh run_functional_tests.sh -installed
@@ -483,14 +524,63 @@
# and the tools_functionally_correct flag to False, as well as updating tool_test_errors.
file( galaxy_shed_tools_dict, 'w' ).write( to_json_string( dict() ) )
has_test_data, shed_tools_dict = parse_tool_panel_config( galaxy_shed_tool_conf_file, from_json_string( file( galaxy_shed_tools_dict, 'r' ).read() ) )
+ # The repository_status dict should always have the following structure:
+ # {
+ # "test_environment":
+ # {
+ # "galaxy_revision": "9001:abcd1234",
+ # "galaxy_database_version": "114",
+ # "tool_shed_revision": "9001:abcd1234",
+ # "tool_shed_mercurial_version": "2.3.1",
+ # "tool_shed_database_version": "17",
+ # "python_version": "2.7.2",
+ # "architecture": "x86_64",
+ # "system": "Darwin 12.2.0"
+ # },
+ # "test_errors":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # "traceback": "The captured traceback."
+ # },
+ # ]
+ # "passed_tests":
+ # [
+ # {
+ # "test_id": "The test ID, generated by twill",
+ # "tool_id": "The tool ID that was tested",
+ # "tool_version": "The tool version that was tested",
+ # "stderr": "The output of the test, or a more detailed description of what was tested and what the outcome was."
+ # },
+ # ]
+ # "invalid_tests":
+ # [
+ # {
+ # "tool_id": "The tool ID that does not have functional tests defined.",
+ # "tool_version": "The version of the tool."
+ # "tool_guid": "The guid of the tool."
+ # "reason_test_is_invalid": "A short explanation of what is invalid.
+ # },
+ # ]
+ # }
+ repository_status = get_tool_test_errors_from_api( galaxy_tool_shed_url, repository_dict )
+ test_environment = get_test_environment( repository_status[ 'test_environment' ] )
+ test_environment[ 'galaxy_database_version' ] = get_database_version( app )
+ test_environment[ 'galaxy_revision'] = get_repository_current_revision( os.getcwd() )
+ repository_status[ 'test_environment' ] = test_environment
+ repository_status[ 'tests_passed' ] = []
+ repository_status[ 'test_errors' ] = []
if not has_test_data:
log.error( 'Test data is missing for this repository. Updating repository and skipping functional tests.' )
repository_status[ 'test_environment' ] = get_test_environment()
test_id = 'Find functional test data for %s' % repository_dict[ 'name' ]
- test_errors = dict( test_id=test_id,
- stdout='No test data found for changeset revision %s of repository %s owned by %s.' % \
- ( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ], repository_dict[ 'owner' ] ) )
- repository_status[ 'test_errors' ] = [ test_errors ]
+ # Record the lack of test data.
+ test_errors = dict( tool_id=None, tool_version=None, tool_guid=None,
+ reason_test_is_invalid="Repository %s is missing a test-data directory." % name )
+ repository_status[ 'invalid_tests' ].append( test_errors )
# Record the status of this repository in the tool shed.
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
# Run the cleanup method. This removes tool functional test methods from the test_toolbox module and uninstalls the
@@ -518,12 +608,20 @@
test_config = nose.config.Config( env=os.environ, plugins=nose.plugins.manager.DefaultPluginManager() )
test_config.configure( sys.argv )
# Run the configured tests.
- result = run_tests( test_config )
+ result, test_plugins = run_tests( test_config )
success = result.wasSuccessful()
# Record some information about the environment in which this test was run, in case a failure is specific to a certain processor
# architecture or operating system.
repository_dict[ 'test_environment' ] = test_environment
- test_errors = []
+ for plugin in test_plugins:
+ if hasattr( plugin, 'getTestStatus' ):
+ tests_passed = plugin.getTestStatus()
+ break
+ repository_status[ 'tests_passed' ] = []
+ for test_id in tests_passed:
+ tool_id, tool_version = get_tool_info_from_test_id( test_id )
+ test_result = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version )
+ repository_status[ 'tests_passed' ].append( test_result )
if success:
# This repository's tools passed all functional tests. Update the repository_metadata table in the tool shed's database
# to reflect that. Call the register_test_result method, which executes a PUT request to the repository_revisions API
@@ -531,15 +629,16 @@
# updates the time_last_tested field to today's date.
repositories_passed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
repository_status[ 'test_environment' ] = test_environment
- repository_status[ 'test_errors' ] = []
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=True )
log.debug( 'Revision %s of repository %s installed and passed functional tests.' % \
( repository_dict[ 'changeset_revision' ], repository_dict[ 'name' ] ) )
else:
# If the functional tests fail, log the output and update the failed changeset revision's metadata record in the tool shed via the API.
for failure in result.failures:
- # Record the twill test identifier, so the repository owner can discover which test is failing.
- test_status = dict( test_id=str( failure[0] ) )
+ # Record the twill test identifier and information about the tool, so the repository owner can discover which test is failing.
+ test_id = str( failure[0] )
+ tool_id, tool_version = get_tool_info_from_test_id( test_id )
+ test_status = dict( test_id=test_id, tool_id=tool_id, tool_version=tool_version )
log_output = failure[1].replace( '\\n', '\n' )
# Remove debug output that the reviewer or owner doesn't need.
log_output = re.sub( r'control \d+:.+', r'', log_output )
@@ -547,7 +646,7 @@
appending_to = 'output'
tmp_output = {}
output = {}
- # Iterate through the functional test output and extract only the important data. Captured logging is not recorded.
+ # Iterate through the functional test output and extract only the important data. Captured logging and stdout are not recorded.
for line in log_output.split( '\n' ):
if line.startswith( 'Traceback' ):
appending_to = 'traceback'
@@ -567,17 +666,15 @@
if appending_to not in tmp_output:
tmp_output[ appending_to ] = []
tmp_output[ appending_to ].append( line )
- for output_type in [ 'stderr', 'stdout', 'traceback' ]:
+ for output_type in [ 'stderr', 'traceback' ]:
if output_type in tmp_output:
test_status[ output_type ] = '\n'.join( tmp_output[ output_type ] )
- test_errors.append( test_status )
- if test_errors:
- # Only update test_errors for this repository if it's not empty.
- repository_status[ 'test_environment' ] = test_environment
- repository_status[ 'test_errors' ] = test_errors
+ repository_status[ 'test_errors' ].append( test_status )
+ log.debug( to_json_string( repository_status, indent=2, sort_keys=True ) )
# Call the register_test_result method, which executes a PUT request to the repository_revisions API controller with the outcome
- # status of the tests, and updates tool_test_errors with the relevant log data.
- # This also sets the do_not_test and tools_functionally correct flags, and updates the time_last_tested field to today's date.
+ # of the tests, and updates tool_test_errors with the relevant log data.
+ # This also sets the do_not_test and tools_functionally correct flags to the appropriate values, and updates the time_last_tested
+ # field to today's date.
repositories_failed.append( dict( name=name, owner=owner, changeset_revision=changeset_revision ) )
register_test_result( galaxy_tool_shed_url, metadata_revision_id, repository_status, tests_passed=False )
log.debug( 'Revision %s of repository %s installed successfully, but did not pass functional tests.' % \
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/a2cfc332d7b8/
changeset: a2cfc332d7b8
user: Kyle Ellrott
date: 2013-01-30 00:58:58
summary: Enabling dataset UUID setting through library upload, upload1 tool, and galaxy.json file
affected #: 5 files
diff -r c7bde6953174eabe5510c89d023ffd8232592f11 -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 lib/galaxy/jobs/__init__.py
--- a/lib/galaxy/jobs/__init__.py
+++ b/lib/galaxy/jobs/__init__.py
@@ -907,7 +907,6 @@
job_context = ExpressionContext( dict( stdout = job.stdout, stderr = job.stderr ) )
job_tool = self.app.toolbox.tools_by_id.get( job.tool_id, None )
-
for dataset_assoc in job.output_datasets + job.output_library_datasets:
context = self.get_dataset_finish_context( job_context, dataset_assoc.dataset.dataset )
#should this also be checking library associations? - can a library item be added from a history before the job has ended? - lets not allow this to occur
@@ -917,6 +916,8 @@
dataset.info = ( dataset.info or '' ) + context['stdout'] + context['stderr']
dataset.tool_version = self.version_string
dataset.set_size()
+ if 'uuid' in context:
+ dataset.dataset.uuid = context['uuid']
# Update (non-library) job output datasets through the object store
if dataset not in job.output_library_datasets:
self.app.object_store.update_from_file(dataset.dataset, create=True)
diff -r c7bde6953174eabe5510c89d023ffd8232592f11 -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 lib/galaxy/tools/actions/upload_common.py
--- a/lib/galaxy/tools/actions/upload_common.py
+++ b/lib/galaxy/tools/actions/upload_common.py
@@ -294,6 +294,10 @@
link_data_only = uploaded_dataset.link_data_only
except:
link_data_only = 'copy_files'
+ try:
+ uuid_str = uploaded_dataset.uuid
+ except:
+ uuid_str = None
json = dict( file_type = uploaded_dataset.file_type,
ext = uploaded_dataset.ext,
name = uploaded_dataset.name,
@@ -302,6 +306,7 @@
type = uploaded_dataset.type,
is_binary = is_binary,
link_data_only = link_data_only,
+ uuid = uuid_str,
space_to_tab = uploaded_dataset.space_to_tab,
in_place = trans.app.config.external_chown_script is None,
path = uploaded_dataset.path )
diff -r c7bde6953174eabe5510c89d023ffd8232592f11 -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 lib/galaxy/webapps/galaxy/controllers/library_common.py
--- a/lib/galaxy/webapps/galaxy/controllers/library_common.py
+++ b/lib/galaxy/webapps/galaxy/controllers/library_common.py
@@ -1070,11 +1070,13 @@
job, output = upload_common.create_job( trans, tool_params, tool, json_file_path, data_list, folder=library_bunch.folder )
# HACK: Prevent outputs_to_working_directory from overwriting inputs when "linking"
job.add_parameter( 'link_data_only', to_json_string( kwd.get( 'link_data_only', 'copy_files' ) ) )
+ job.add_parameter( 'uuid', to_json_string( kwd.get( 'uuid', None ) ) )
trans.sa_session.add( job )
trans.sa_session.flush()
return output
def make_library_uploaded_dataset( self, trans, cntrller, params, name, path, type, library_bunch, in_folder=None ):
link_data_only = params.get( 'link_data_only', 'copy_files' )
+ uuid_str = params.get( 'uuid', None )
library_bunch.replace_dataset = None # not valid for these types of upload
uploaded_dataset = util.bunch.Bunch()
new_name = name
@@ -1096,6 +1098,7 @@
uploaded_dataset.in_folder = in_folder
uploaded_dataset.data = upload_common.new_upload( trans, cntrller, uploaded_dataset, library_bunch )
uploaded_dataset.link_data_only = link_data_only
+ uploaded_dataset.uuid = uuid_str
if link_data_only == 'link_to_files':
uploaded_dataset.data.file_name = os.path.abspath( path )
# Since we are not copying the file into Galaxy's managed
diff -r c7bde6953174eabe5510c89d023ffd8232592f11 -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 scripts/api/load_data_with_metadata.py
--- a/scripts/api/load_data_with_metadata.py
+++ b/scripts/api/load_data_with_metadata.py
@@ -15,10 +15,34 @@
import sys
import json
import time
+import argparse
+
sys.path.insert( 0, os.path.dirname( __file__ ) )
from common import submit, display
-def main(api_key, api_url, in_folder, data_library):
+def load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field=None):
+ data = {}
+ data['folder_id'] = library_folder_id
+ data['file_type'] = 'auto'
+ data['dbkey'] = ''
+ data['upload_option'] = 'upload_paths'
+ data['filesystem_paths'] = fullpath
+ data['create_type'] = 'file'
+ data['link_data_only'] = 'link_to_files'
+
+ handle = open( fullpath + ".json" )
+ smeta = handle.read()
+ handle.close()
+ ext_meta = json.loads(smeta)
+ data['extended_metadata'] = ext_meta
+ if uuid_field is not None and uuid_field in ext_meta:
+ data['uuid'] = ext_meta[uuid_field]
+
+ libset = submit(api_key, api_url + "libraries/%s/contents" % library_id, data, return_formatted = True)
+ print libset
+
+
+def main(api_key, api_url, in_folder, data_library, uuid_field=None):
# Find/Create data library with the above name. Assume we're putting datasets in the root folder '/'
libs = display(api_key, api_url + 'libraries', return_formatted=False)
library_id = None
@@ -28,7 +52,7 @@
if not library_id:
lib_create_data = {'name':data_library}
library = submit(api_key, api_url + 'libraries', lib_create_data, return_formatted=False)
- library_id = library[0]['id']
+ library_id = library['id']
folders = display(api_key, api_url + "libraries/%s/contents" % library_id, return_formatted = False)
for f in folders:
if f['name'] == "/":
@@ -37,35 +61,24 @@
print "Failure to configure library destination."
sys.exit(1)
- for fname in os.listdir(in_folder):
- fullpath = os.path.join(in_folder, fname)
- if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"):
+ if os.path.isfile(in_folder):
+ if os.path.exists(in_folder + ".json"):
+ fullpath = os.path.abspath(in_folder)
print "Loading", fullpath
- data = {}
- data['folder_id'] = library_folder_id
- data['file_type'] = 'auto'
- data['dbkey'] = ''
- data['upload_option'] = 'upload_paths'
- data['filesystem_paths'] = fullpath
- data['create_type'] = 'file'
-
- data['link_data_only'] = 'link_to_files'
-
- handle = open( fullpath + ".json" )
- smeta = handle.read()
- handle.close()
- data['extended_metadata'] = json.loads(smeta)
- libset = submit(api_key, api_url + "libraries/%s/contents" % library_id, data, return_formatted = True)
- print libset
+ load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field)
+ else:
+ for fname in os.listdir(in_folder):
+ fullpath = os.path.join(in_folder, fname)
+ if os.path.isfile(fullpath) and os.path.exists(fullpath + ".json"):
+ print "Loading", fullpath
+ load_file(fullpath, api_key, api_url, library_id, library_folder_id, uuid_field)
if __name__ == '__main__':
- try:
- api_key = sys.argv[1]
- api_url = sys.argv[2]
- in_folder = sys.argv[3]
- data_library = sys.argv[4]
- except IndexError:
- print 'usage: %s key url in_folder data_library' % os.path.basename( sys.argv[0] )
- sys.exit( 1 )
- main(api_key, api_url, in_folder, data_library )
-
+ parser = argparse.ArgumentParser()
+ parser.add_argument("api_key", help="API KEY")
+ parser.add_argument('api_url', help='API URL')
+ parser.add_argument("in_folder", help="Input Folder")
+ parser.add_argument("data_library", help="Data Library")
+ parser.add_argument("--uuid_field", help="UUID Field", default=None)
+ args = parser.parse_args()
+ main(args.api_key, args.api_url, args.in_folder, args.data_library, args.uuid_field)
diff -r c7bde6953174eabe5510c89d023ffd8232592f11 -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 tools/data_source/upload.py
--- a/tools/data_source/upload.py
+++ b/tools/data_source/upload.py
@@ -307,6 +307,8 @@
stdout = stdout,
name = dataset.name,
line_count = line_count )
+ if dataset.get('uuid', None) is not None:
+ info['uuid'] = dataset.get('uuid')
json_file.write( to_json_string( info ) + "\n" )
if link_data_only == 'copy_files' and datatype.dataset_content_needs_grooming( output_path ):
https://bitbucket.org/galaxy/galaxy-central/commits/ddcce8b55152/
changeset: ddcce8b55152
user: dannon
date: 2013-03-08 17:35:06
summary: Merge changes from PR https://bitbucket.org/galaxy/galaxy-central/pull-request/115/adding-dataset…
affected #: 6 files
diff -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 -r ddcce8b55152c47177d85ab1a2433f80f162b64f lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -1774,6 +1774,10 @@
misc_info = ldda.info,
misc_blurb = ldda.blurb,
template_data = template_data )
+ if ldda.dataset.uuid is None:
+ rval['uuid'] = None
+ else:
+ rval['uuid'] = str(ldda.dataset.uuid)
for name, spec in ldda.metadata.spec.items():
val = ldda.metadata.get( name )
if isinstance( val, MetadataFile ):
diff -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 -r ddcce8b55152c47177d85ab1a2433f80f162b64f lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1922,6 +1922,8 @@
if "runtool_btn" not in incoming and "URL" not in incoming:
if not self.display_interface:
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ if len(incoming):
+ self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
return "tool_form.mako", dict( errors={}, tool_state=state, param_values={}, incoming={} )
# Process incoming data
if not( self.check_values ):
diff -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 -r ddcce8b55152c47177d85ab1a2433f80f162b64f lib/galaxy/webapps/galaxy/api/history_contents.py
--- a/lib/galaxy/webapps/galaxy/api/history_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/history_contents.py
@@ -220,6 +220,11 @@
hda_dict[ 'display_apps' ] = get_display_apps( trans, hda )
hda_dict[ 'display_types' ] = get_old_display_applications( trans, hda )
+ if hda.dataset.uuid is None:
+ hda_dict['uuid'] = None
+ else:
+ hda_dict['uuid'] = str(hda.dataset.uuid)
+
hda_dict[ 'visualizations' ] = hda.get_visualizations()
if hda.peek and hda.peek != 'no peek':
hda_dict[ 'peek' ] = to_unicode( hda.display_peek() )
diff -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 -r ddcce8b55152c47177d85ab1a2433f80f162b64f lib/galaxy/webapps/galaxy/controllers/root.py
--- a/lib/galaxy/webapps/galaxy/controllers/root.py
+++ b/lib/galaxy/webapps/galaxy/controllers/root.py
@@ -22,7 +22,8 @@
return trans.fill_template( "root/index.mako",
tool_id=tool_id,
workflow_id=workflow_id,
- m_c=m_c, m_a=m_a )
+ m_c=m_c, m_a=m_a,
+ params=kwd )
## ---- Tool related -----------------------------------------------------
diff -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 -r ddcce8b55152c47177d85ab1a2433f80f162b64f templates/webapps/galaxy/library/common/ldda_info.mako
--- a/templates/webapps/galaxy/library/common/ldda_info.mako
+++ b/templates/webapps/galaxy/library/common/ldda_info.mako
@@ -100,6 +100,13 @@
${ldda.get_size( nice_size=True )}
<div style="clear: both"></div></div>
+ %if ldda.dataset.uuid:
+ <div class="form-row">
+ <label>UUID:</label>
+ ${ldda.dataset.uuid}
+ <div style="clear: both"></div>
+ </div>
+ %endif
%if ldda.tool_version:
<div class="form-row"><label>Tool version:</label>
diff -r a2cfc332d7b8e0f3b765b5d5b3c5b012521c39c0 -r ddcce8b55152c47177d85ab1a2433f80f162b64f templates/webapps/galaxy/root/index.mako
--- a/templates/webapps/galaxy/root/index.mako
+++ b/templates/webapps/galaxy/root/index.mako
@@ -118,7 +118,7 @@
if trans.app.config.require_login and not trans.user:
center_url = h.url_for( controller='user', action='login' )
elif tool_id is not None:
- center_url = h.url_for( 'tool_runner', tool_id=tool_id, from_noframe=True )
+ center_url = h.url_for( 'tool_runner', tool_id=tool_id, from_noframe=True, **params )
elif workflow_id is not None:
center_url = h.url_for( controller='workflow', action='run', id=workflow_id )
elif m_c is not None:
https://bitbucket.org/galaxy/galaxy-central/commits/a37fbe9cbd93/
changeset: a37fbe9cbd93
user: dannon
date: 2013-03-08 20:15:11
summary: Merge
affected #: 6 files
diff -r ddcce8b55152c47177d85ab1a2433f80f162b64f -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b lib/galaxy/webapps/galaxy/controllers/tool_runner.py
--- a/lib/galaxy/webapps/galaxy/controllers/tool_runner.py
+++ b/lib/galaxy/webapps/galaxy/controllers/tool_runner.py
@@ -2,15 +2,20 @@
Upload class
"""
-from galaxy.web.base.controller import *
+import logging
+import galaxy.util
+
+from galaxy import web
+from galaxy.tools import DefaultToolState
+from galaxy.tools.actions import upload_common
+from galaxy.tools.parameters import params_to_incoming, visit_input_values
+from galaxy.tools.parameters.basic import DataToolParameter, UnvalidatedValue
from galaxy.util.bunch import Bunch
from galaxy.util.hash_util import is_hashable
-from galaxy.tools import DefaultToolState
-from galaxy.tools.parameters.basic import UnvalidatedValue
-from galaxy.tools.parameters import params_to_incoming
-from galaxy.tools.actions import upload_common
+from galaxy.web import error, url_for
+from galaxy.web.base.controller import BaseUIController
+from galaxy.web.form_builder import SelectField
-import logging
log = logging.getLogger( __name__ )
class AddFrameData:
@@ -47,7 +52,7 @@
tools = []
tool = None
# Backwards compatibility for datasource tools that have default tool_id configured, but which are now using only GALAXY_URL.
- tool_ids = util.listify( tool_id )
+ tool_ids = galaxy.util.listify( tool_id )
for tool_id in tool_ids:
if get_loaded_tools_by_lineage:
tools = toolbox.get_loaded_tools_by_lineage( tool_id )
@@ -83,7 +88,7 @@
message=message,
status=status,
redirect=redirect ) )
- params = util.Params( kwd, sanitize = False ) #Sanitize parameters when substituting into command line via input wrappers
+ params = galaxy.util.Params( kwd, sanitize = False ) #Sanitize parameters when substituting into command line via input wrappers
#do param translation here, used by datasource tools
if tool.input_translator:
tool.input_translator.translate( params )
@@ -103,7 +108,7 @@
toolbox=self.get_toolbox(),
tool_version_select_field=tool_version_select_field,
tool=tool,
- util=util,
+ util=galaxy.util,
add_frame=add_frame,
**vars )
@@ -215,7 +220,7 @@
#create an incoming object from the original job's dataset-modified param objects
incoming = {}
params_to_incoming( incoming, tool.inputs, params_objects, trans.app )
- incoming[ "tool_state" ] = util.object_to_string( state.encode( tool, trans.app ) )
+ incoming[ "tool_state" ] = galaxy.util.object_to_string( state.encode( tool, trans.app ) )
template, vars = tool.handle_input( trans, incoming, old_errors=upgrade_messages ) #update new state with old parameters
# Is the "add frame" stuff neccesary here?
add_frame = AddFrameData()
@@ -228,7 +233,7 @@
toolbox=self.get_toolbox(),
tool_version_select_field=tool_version_select_field,
tool=tool,
- util=util,
+ util=galaxy.util,
add_frame=add_frame,
tool_id_version_message=tool_id_version_message,
**vars )
@@ -288,9 +293,9 @@
tool = self.get_toolbox().get_tool( tool_id )
if not tool:
return False # bad tool_id
- nonfile_params = util.Params( kwd, sanitize=False )
+ nonfile_params = galaxy.util.Params( kwd, sanitize=False )
if kwd.get( 'tool_state', None ) not in ( None, 'None' ):
- encoded_state = util.string_to_object( kwd["tool_state"] )
+ encoded_state = galaxy.util.string_to_object( kwd["tool_state"] )
tool_state = DefaultToolState()
tool_state.decode( encoded_state, tool, trans.app )
else:
diff -r ddcce8b55152c47177d85ab1a2433f80f162b64f -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b test-data/sort_in1.bed
--- a/test-data/sort_in1.bed
+++ b/test-data/sort_in1.bed
@@ -1,3 +1,6 @@
+# comment 1 \n\n''" again
+# comment 2 **}"''' special
+# comment 3 @n/n""" characters
chr6 108298214 108386086 NM_007214 0 - 108299600 108385906 0 21 1530,105,99,102,159,174,60,83,148,155,93,133,95,109,51,59,62,113,115,100,304, 0,2490,6246,10831,12670,23164,23520,27331,31052,32526,34311,36130,36365,38609,41028,42398,43048,51479,54500,59097,87568,
chr6 108593954 108616704 NM_003269 0 + 108594662 108615360 0 9 733,146,88,236,147,97,150,106,1507, 0,5400,8778,10445,12037,14265,14749,15488,21243,
chr6 108639410 108689143 NM_152827 0 - 108640045 108688818 0 3 741,125,487, 0,2984,49246,
diff -r ddcce8b55152c47177d85ab1a2433f80f162b64f -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b test-data/sort_out1.bed
--- a/test-data/sort_out1.bed
+++ b/test-data/sort_out1.bed
@@ -1,29 +1,32 @@
+# comment 1 \n\n''" again
+# comment 2 **}"''' special
+# comment 3 @n/n""" characters
+chr1 148185113 148187485 NM_002796 0 + 148185136 148187378 0 7 163,207,147,82,117,89,120, 0,416,877,1199,1674,1977,2252,
+chr1 148077485 148111797 NM_002651 0 - 148078400 148111728 0 12 1097,121,133,266,124,105,110,228,228,45,937,77, 0,2081,2472,6871,9907,10257,11604,14199,15637,18274,23636,34235,
+chr1 147984101 148035079 BC007833 0 + 147984545 148033414 0 14 529,32,81,131,118,153,300,206,84,49,85,130,46,1668, 0,25695,28767,33118,33695,33998,35644,38005,39629,40577,41402,43885,48367,49310,
+chr1 147962006 147975713 NM_005997 0 - 147962192 147975670 0 6 574,145,177,115,153,160, 0,1543,7859,9048,9340,13547,
+chr2 220229182 220233943 NM_024536 0 - 220229609 220233765 0 4 1687,180,574,492, 0,1990,2660,4269,
+chr2 220108603 220116964 NM_001927 0 + 220108689 220116217 0 9 664,61,96,162,126,221,44,83,789, 0,1718,1874,2118,2451,2963,5400,7286,7572,
+chr2 118389378 118390700 BC005078 0 - 118390395 118390500 0 1 1322, 0,
+chr2 118288484 118306183 NM_006773 0 + 118288583 118304530 0 14 184,285,144,136,101,200,115,140,162,153,114,57,178,1796, 0,2765,4970,6482,6971,7183,7468,9890,10261,10768,11590,14270,14610,15903,
+chr5 131621285 131637046 NM_003687 0 + 131621326 131635821 0 7 134,152,82,179,164,118,1430, 0,4915,8770,13221,13609,14097,14331,
+chr5 131556201 131590458 NM_004199 0 - 131556601 131582218 0 15 471,97,69,66,54,100,71,177,194,240,138,152,97,100,170, 0,2316,2802,5596,6269,11138,11472,15098,16528,17674,21306,24587,25142,25935,34087,
+chr5 131424245 131426795 NM_000588 0 + 131424298 131426383 0 5 215,42,90,42,535, 0,313,1658,1872,2015,
+chr5 131170738 131357870 AF099740 0 - 131311206 131357817 0 31 112,124,120,81,65,40,120,129,61,88,94,79,72,102,144,117,89,73,96,135,135,78,74,52,33,179,100,102,65,115,248, 0,11593,44117,47607,104668,109739,114675,126366,135488,137518,138009,140437,152389,153373,155388,159269,160793,162981,164403,165577,166119,167611,169501,178260,179675,180901,181658,182260,182953,183706,186884,
+chr6 108722790 108950942 NM_145315 0 + 108722976 108950321 0 13 325,224,52,102,131,100,59,83,71,101,141,114,750, 0,28931,52094,60760,61796,71339,107102,152319,181970,182297,215317,224802,227402,
+chr6 108639410 108689143 NM_152827 0 - 108640045 108688818 0 3 741,125,487, 0,2984,49246,
+chr6 108593954 108616704 NM_003269 0 + 108594662 108615360 0 9 733,146,88,236,147,97,150,106,1507, 0,5400,8778,10445,12037,14265,14749,15488,21243,
chr6 108298214 108386086 NM_007214 0 - 108299600 108385906 0 21 1530,105,99,102,159,174,60,83,148,155,93,133,95,109,51,59,62,113,115,100,304, 0,2490,6246,10831,12670,23164,23520,27331,31052,32526,34311,36130,36365,38609,41028,42398,43048,51479,54500,59097,87568,
-chr6 108593954 108616704 NM_003269 0 + 108594662 108615360 0 9 733,146,88,236,147,97,150,106,1507, 0,5400,8778,10445,12037,14265,14749,15488,21243,
-chr6 108639410 108689143 NM_152827 0 - 108640045 108688818 0 3 741,125,487, 0,2984,49246,
-chr6 108722790 108950942 NM_145315 0 + 108722976 108950321 0 13 325,224,52,102,131,100,59,83,71,101,141,114,750, 0,28931,52094,60760,61796,71339,107102,152319,181970,182297,215317,224802,227402,
+chr7 116944658 117107512 AF377960 0 - 116945541 116979926 0 23 1129,102,133,64,186,206,179,188,153,100,87,80,96,276,118,255,151,100,204,1654,225,108,173, 0,7364,8850,10413,13893,14398,17435,24259,24615,35177,35359,45901,47221,49781,56405,66857,69787,72208,73597,80474,100111,150555,162681,
+chr7 116713967 116902666 NM_000492 0 + 116714099 116901113 0 27 185,111,109,216,90,164,126,247,93,183,192,95,87,724,129,38,251,80,151,228,101,249,156,90,173,106,1754, 0,24290,29071,50936,54313,55285,56585,60137,62053,68678,79501,107776,110390,111971,114967,122863,123569,126711,130556,131618,134650,147559,162475,172879,184725,185496,186945,
+chr7 116511232 116557294 NM_003391 0 - 116512159 116556994 0 5 1157,265,278,227,383, 0,20384,37843,43339,45679,
chr7 113320332 113924911 AK131266 0 + 113862563 113893433 0 20 285,91,178,90,58,75,138,51,201,178,214,105,88,84,77,102,122,70,164,1124, 0,201692,340175,448290,451999,484480,542213,543265,543478,545201,556083,558358,565876,567599,573029,573245,575738,577123,577946,603455,
-chr7 116511232 116557294 NM_003391 0 - 116512159 116556994 0 5 1157,265,278,227,383, 0,20384,37843,43339,45679,
-chr7 116713967 116902666 NM_000492 0 + 116714099 116901113 0 27 185,111,109,216,90,164,126,247,93,183,192,95,87,724,129,38,251,80,151,228,101,249,156,90,173,106,1754, 0,24290,29071,50936,54313,55285,56585,60137,62053,68678,79501,107776,110390,111971,114967,122863,123569,126711,130556,131618,134650,147559,162475,172879,184725,185496,186945,
-chr7 116944658 117107512 AF377960 0 - 116945541 116979926 0 23 1129,102,133,64,186,206,179,188,153,100,87,80,96,276,118,255,151,100,204,1654,225,108,173, 0,7364,8850,10413,13893,14398,17435,24259,24615,35177,35359,45901,47221,49781,56405,66857,69787,72208,73597,80474,100111,150555,162681,
-chr2 118288484 118306183 NM_006773 0 + 118288583 118304530 0 14 184,285,144,136,101,200,115,140,162,153,114,57,178,1796, 0,2765,4970,6482,6971,7183,7468,9890,10261,10768,11590,14270,14610,15903,
-chr2 118389378 118390700 BC005078 0 - 118390395 118390500 0 1 1322, 0,
chr8 118880786 119193239 NM_000127 0 - 118881131 119192466 0 11 531,172,161,90,96,119,133,120,108,94,1735, 0,5355,7850,13505,19068,20309,23098,30863,36077,37741,310718,
+chr9 128849867 128870133 NM_020145 0 - 128850516 128869987 0 11 757,241,101,90,24,63,93,134,129,142,209, 0,1071,1736,2085,2635,4201,6376,6736,13056,14247,20057,
+chr9 128789530 128848928 NM_015354 0 + 128789552 128848511 0 44 54,55,74,85,81,45,93,120,212,115,201,90,66,120,127,153,127,88,77,115,121,67,129,140,107,207,170,70,68,196,78,86,146,182,201,93,159,138,75,228,132,74,130,594, 0,1491,5075,8652,9254,10312,11104,11317,20808,21702,23060,25462,31564,32908,33566,34851,35204,35595,35776,37202,38860,39111,39891,40349,42422,45499,45827,46675,47158,47621,50453,50840,51474,51926,53831,54186,55119,55619,57449,57605,57947,58352,58541,58804,
+chr9 128787362 128789566 NM_014908 0 - 128787519 128789136 0 1 2204, 0,
+chr9 128763240 128783870 NM_174933 0 + 128764156 128783586 0 12 261,118,74,159,76,48,56,63,129,117,127,370, 0,522,875,5630,12374,12603,15040,15175,18961,19191,20037,20260,
+chrX 152693677 152712545 NM_001666 0 - 152694029 152712503 0 22 586,100,93,184,74,234,106,135,78,61,103,28,85,192,102,222,129,183,63,163,205,109, 0,1693,2066,2364,2635,2794,3129,3323,3545,3752,5323,5647,5841,6032,6401,11455,11778,13249,13719,13987,14227,18759,
+chrX 152691216 152693487 NM_000054 0 + 152691446 152693029 0 3 255,885,664, 0,616,1607,
+chrX 152648233 152662158 NM_000425 0 - 152648964 152662138 0 28 963,12,73,135,156,120,174,123,202,116,223,71,198,111,125,157,167,112,144,132,185,112,171,123,203,106,11,100, 0,1436,1545,1951,2390,2653,2889,3156,3367,3772,4717,5122,5424,5868,6066,6370,6629,6909,7588,7871,8124,8456,8858,9125,10220,10660,11296,13825,
chrX 122719582 122773357 NM_001167 0 + 122745047 122766566 0 7 96,909,100,79,43,201,6985, 0,25433,28421,31040,32533,40295,46790,
-chr9 128763240 128783870 NM_174933 0 + 128764156 128783586 0 12 261,118,74,159,76,48,56,63,129,117,127,370, 0,522,875,5630,12374,12603,15040,15175,18961,19191,20037,20260,
-chr9 128787362 128789566 NM_014908 0 - 128787519 128789136 0 1 2204, 0,
-chr9 128789530 128848928 NM_015354 0 + 128789552 128848511 0 44 54,55,74,85,81,45,93,120,212,115,201,90,66,120,127,153,127,88,77,115,121,67,129,140,107,207,170,70,68,196,78,86,146,182,201,93,159,138,75,228,132,74,130,594, 0,1491,5075,8652,9254,10312,11104,11317,20808,21702,23060,25462,31564,32908,33566,34851,35204,35595,35776,37202,38860,39111,39891,40349,42422,45499,45827,46675,47158,47621,50453,50840,51474,51926,53831,54186,55119,55619,57449,57605,57947,58352,58541,58804,
-chr9 128849867 128870133 NM_020145 0 - 128850516 128869987 0 11 757,241,101,90,24,63,93,134,129,142,209, 0,1071,1736,2085,2635,4201,6376,6736,13056,14247,20057,
-chr5 131170738 131357870 AF099740 0 - 131311206 131357817 0 31 112,124,120,81,65,40,120,129,61,88,94,79,72,102,144,117,89,73,96,135,135,78,74,52,33,179,100,102,65,115,248, 0,11593,44117,47607,104668,109739,114675,126366,135488,137518,138009,140437,152389,153373,155388,159269,160793,162981,164403,165577,166119,167611,169501,178260,179675,180901,181658,182260,182953,183706,186884,
-chr5 131424245 131426795 NM_000588 0 + 131424298 131426383 0 5 215,42,90,42,535, 0,313,1658,1872,2015,
-chr5 131556201 131590458 NM_004199 0 - 131556601 131582218 0 15 471,97,69,66,54,100,71,177,194,240,138,152,97,100,170, 0,2316,2802,5596,6269,11138,11472,15098,16528,17674,21306,24587,25142,25935,34087,
-chr5 131621285 131637046 NM_003687 0 + 131621326 131635821 0 7 134,152,82,179,164,118,1430, 0,4915,8770,13221,13609,14097,14331,
-chr1 147962006 147975713 NM_005997 0 - 147962192 147975670 0 6 574,145,177,115,153,160, 0,1543,7859,9048,9340,13547,
-chr1 147984101 148035079 BC007833 0 + 147984545 148033414 0 14 529,32,81,131,118,153,300,206,84,49,85,130,46,1668, 0,25695,28767,33118,33695,33998,35644,38005,39629,40577,41402,43885,48367,49310,
-chr1 148077485 148111797 NM_002651 0 - 148078400 148111728 0 12 1097,121,133,266,124,105,110,228,228,45,937,77, 0,2081,2472,6871,9907,10257,11604,14199,15637,18274,23636,34235,
-chr1 148185113 148187485 NM_002796 0 + 148185136 148187378 0 7 163,207,147,82,117,89,120, 0,416,877,1199,1674,1977,2252,
-chrX 152648233 152662158 NM_000425 0 - 152648964 152662138 0 28 963,12,73,135,156,120,174,123,202,116,223,71,198,111,125,157,167,112,144,132,185,112,171,123,203,106,11,100, 0,1436,1545,1951,2390,2653,2889,3156,3367,3772,4717,5122,5424,5868,6066,6370,6629,6909,7588,7871,8124,8456,8858,9125,10220,10660,11296,13825,
-chrX 152691216 152693487 NM_000054 0 + 152691446 152693029 0 3 255,885,664, 0,616,1607,
-chrX 152693677 152712545 NM_001666 0 - 152694029 152712503 0 22 586,100,93,184,74,234,106,135,78,61,103,28,85,192,102,222,129,183,63,163,205,109, 0,1693,2066,2364,2635,2794,3129,3323,3545,3752,5323,5647,5841,6032,6401,11455,11778,13249,13719,13987,14227,18759,
-chr2 220108603 220116964 NM_001927 0 + 220108689 220116217 0 9 664,61,96,162,126,221,44,83,789, 0,1718,1874,2118,2451,2963,5400,7286,7572,
-chr2 220229182 220233943 NM_024536 0 - 220229609 220233765 0 4 1687,180,574,492, 0,1990,2660,4269,
diff -r ddcce8b55152c47177d85ab1a2433f80f162b64f -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b test-data/sort_out2.bed
--- a/test-data/sort_out2.bed
+++ b/test-data/sort_out2.bed
@@ -1,3 +1,18 @@
+# comment 1 \n\n''" again
+# comment 2 **}"''' special
+# comment 3 @n/n""" characters
+chr1 147962006 147975713 NM_005997 0 - 147962192 147975670 0 6 574,145,177,115,153,160, 0,1543,7859,9048,9340,13547,
+chr1 147984101 148035079 BC007833 0 + 147984545 148033414 0 14 529,32,81,131,118,153,300,206,84,49,85,130,46,1668, 0,25695,28767,33118,33695,33998,35644,38005,39629,40577,41402,43885,48367,49310,
+chr1 148077485 148111797 NM_002651 0 - 148078400 148111728 0 12 1097,121,133,266,124,105,110,228,228,45,937,77, 0,2081,2472,6871,9907,10257,11604,14199,15637,18274,23636,34235,
+chr1 148185113 148187485 NM_002796 0 + 148185136 148187378 0 7 163,207,147,82,117,89,120, 0,416,877,1199,1674,1977,2252,
+chr2 118288484 118306183 NM_006773 0 + 118288583 118304530 0 14 184,285,144,136,101,200,115,140,162,153,114,57,178,1796, 0,2765,4970,6482,6971,7183,7468,9890,10261,10768,11590,14270,14610,15903,
+chr2 118389378 118390700 BC005078 0 - 118390395 118390500 0 1 1322, 0,
+chr2 220108603 220116964 NM_001927 0 + 220108689 220116217 0 9 664,61,96,162,126,221,44,83,789, 0,1718,1874,2118,2451,2963,5400,7286,7572,
+chr2 220229182 220233943 NM_024536 0 - 220229609 220233765 0 4 1687,180,574,492, 0,1990,2660,4269,
+chr5 131170738 131357870 AF099740 0 - 131311206 131357817 0 31 112,124,120,81,65,40,120,129,61,88,94,79,72,102,144,117,89,73,96,135,135,78,74,52,33,179,100,102,65,115,248, 0,11593,44117,47607,104668,109739,114675,126366,135488,137518,138009,140437,152389,153373,155388,159269,160793,162981,164403,165577,166119,167611,169501,178260,179675,180901,181658,182260,182953,183706,186884,
+chr5 131424245 131426795 NM_000588 0 + 131424298 131426383 0 5 215,42,90,42,535, 0,313,1658,1872,2015,
+chr5 131556201 131590458 NM_004199 0 - 131556601 131582218 0 15 471,97,69,66,54,100,71,177,194,240,138,152,97,100,170, 0,2316,2802,5596,6269,11138,11472,15098,16528,17674,21306,24587,25142,25935,34087,
+chr5 131621285 131637046 NM_003687 0 + 131621326 131635821 0 7 134,152,82,179,164,118,1430, 0,4915,8770,13221,13609,14097,14331,
chr6 108298214 108386086 NM_007214 0 - 108299600 108385906 0 21 1530,105,99,102,159,174,60,83,148,155,93,133,95,109,51,59,62,113,115,100,304, 0,2490,6246,10831,12670,23164,23520,27331,31052,32526,34311,36130,36365,38609,41028,42398,43048,51479,54500,59097,87568,
chr6 108593954 108616704 NM_003269 0 + 108594662 108615360 0 9 733,146,88,236,147,97,150,106,1507, 0,5400,8778,10445,12037,14265,14749,15488,21243,
chr6 108639410 108689143 NM_152827 0 - 108640045 108688818 0 3 741,125,487, 0,2984,49246,
@@ -6,24 +21,12 @@
chr7 116511232 116557294 NM_003391 0 - 116512159 116556994 0 5 1157,265,278,227,383, 0,20384,37843,43339,45679,
chr7 116713967 116902666 NM_000492 0 + 116714099 116901113 0 27 185,111,109,216,90,164,126,247,93,183,192,95,87,724,129,38,251,80,151,228,101,249,156,90,173,106,1754, 0,24290,29071,50936,54313,55285,56585,60137,62053,68678,79501,107776,110390,111971,114967,122863,123569,126711,130556,131618,134650,147559,162475,172879,184725,185496,186945,
chr7 116944658 117107512 AF377960 0 - 116945541 116979926 0 23 1129,102,133,64,186,206,179,188,153,100,87,80,96,276,118,255,151,100,204,1654,225,108,173, 0,7364,8850,10413,13893,14398,17435,24259,24615,35177,35359,45901,47221,49781,56405,66857,69787,72208,73597,80474,100111,150555,162681,
-chr2 118288484 118306183 NM_006773 0 + 118288583 118304530 0 14 184,285,144,136,101,200,115,140,162,153,114,57,178,1796, 0,2765,4970,6482,6971,7183,7468,9890,10261,10768,11590,14270,14610,15903,
-chr2 118389378 118390700 BC005078 0 - 118390395 118390500 0 1 1322, 0,
chr8 118880786 119193239 NM_000127 0 - 118881131 119192466 0 11 531,172,161,90,96,119,133,120,108,94,1735, 0,5355,7850,13505,19068,20309,23098,30863,36077,37741,310718,
-chrX 122719582 122773357 NM_001167 0 + 122745047 122766566 0 7 96,909,100,79,43,201,6985, 0,25433,28421,31040,32533,40295,46790,
chr9 128763240 128783870 NM_174933 0 + 128764156 128783586 0 12 261,118,74,159,76,48,56,63,129,117,127,370, 0,522,875,5630,12374,12603,15040,15175,18961,19191,20037,20260,
chr9 128787362 128789566 NM_014908 0 - 128787519 128789136 0 1 2204, 0,
chr9 128789530 128848928 NM_015354 0 + 128789552 128848511 0 44 54,55,74,85,81,45,93,120,212,115,201,90,66,120,127,153,127,88,77,115,121,67,129,140,107,207,170,70,68,196,78,86,146,182,201,93,159,138,75,228,132,74,130,594, 0,1491,5075,8652,9254,10312,11104,11317,20808,21702,23060,25462,31564,32908,33566,34851,35204,35595,35776,37202,38860,39111,39891,40349,42422,45499,45827,46675,47158,47621,50453,50840,51474,51926,53831,54186,55119,55619,57449,57605,57947,58352,58541,58804,
chr9 128849867 128870133 NM_020145 0 - 128850516 128869987 0 11 757,241,101,90,24,63,93,134,129,142,209, 0,1071,1736,2085,2635,4201,6376,6736,13056,14247,20057,
-chr5 131170738 131357870 AF099740 0 - 131311206 131357817 0 31 112,124,120,81,65,40,120,129,61,88,94,79,72,102,144,117,89,73,96,135,135,78,74,52,33,179,100,102,65,115,248, 0,11593,44117,47607,104668,109739,114675,126366,135488,137518,138009,140437,152389,153373,155388,159269,160793,162981,164403,165577,166119,167611,169501,178260,179675,180901,181658,182260,182953,183706,186884,
-chr5 131424245 131426795 NM_000588 0 + 131424298 131426383 0 5 215,42,90,42,535, 0,313,1658,1872,2015,
-chr5 131556201 131590458 NM_004199 0 - 131556601 131582218 0 15 471,97,69,66,54,100,71,177,194,240,138,152,97,100,170, 0,2316,2802,5596,6269,11138,11472,15098,16528,17674,21306,24587,25142,25935,34087,
-chr5 131621285 131637046 NM_003687 0 + 131621326 131635821 0 7 134,152,82,179,164,118,1430, 0,4915,8770,13221,13609,14097,14331,
-chr1 147962006 147975713 NM_005997 0 - 147962192 147975670 0 6 574,145,177,115,153,160, 0,1543,7859,9048,9340,13547,
-chr1 147984101 148035079 BC007833 0 + 147984545 148033414 0 14 529,32,81,131,118,153,300,206,84,49,85,130,46,1668, 0,25695,28767,33118,33695,33998,35644,38005,39629,40577,41402,43885,48367,49310,
-chr1 148077485 148111797 NM_002651 0 - 148078400 148111728 0 12 1097,121,133,266,124,105,110,228,228,45,937,77, 0,2081,2472,6871,9907,10257,11604,14199,15637,18274,23636,34235,
-chr1 148185113 148187485 NM_002796 0 + 148185136 148187378 0 7 163,207,147,82,117,89,120, 0,416,877,1199,1674,1977,2252,
+chrX 122719582 122773357 NM_001167 0 + 122745047 122766566 0 7 96,909,100,79,43,201,6985, 0,25433,28421,31040,32533,40295,46790,
chrX 152648233 152662158 NM_000425 0 - 152648964 152662138 0 28 963,12,73,135,156,120,174,123,202,116,223,71,198,111,125,157,167,112,144,132,185,112,171,123,203,106,11,100, 0,1436,1545,1951,2390,2653,2889,3156,3367,3772,4717,5122,5424,5868,6066,6370,6629,6909,7588,7871,8124,8456,8858,9125,10220,10660,11296,13825,
chrX 152691216 152693487 NM_000054 0 + 152691446 152693029 0 3 255,885,664, 0,616,1607,
chrX 152693677 152712545 NM_001666 0 - 152694029 152712503 0 22 586,100,93,184,74,234,106,135,78,61,103,28,85,192,102,222,129,183,63,163,205,109, 0,1693,2066,2364,2635,2794,3129,3323,3545,3752,5323,5647,5841,6032,6401,11455,11778,13249,13719,13987,14227,18759,
-chr2 220108603 220116964 NM_001927 0 + 220108689 220116217 0 9 664,61,96,162,126,221,44,83,789, 0,1718,1874,2118,2451,2963,5400,7286,7572,
-chr2 220229182 220233943 NM_024536 0 - 220229609 220233765 0 4 1687,180,574,492, 0,1990,2660,4269,
diff -r ddcce8b55152c47177d85ab1a2433f80f162b64f -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b tools/filters/sorter.py
--- a/tools/filters/sorter.py
+++ b/tools/filters/sorter.py
@@ -1,49 +1,59 @@
"""
-Sorts tabular data on one or more columns.
+ Sorts tabular data on one or more columns. All comments of the file are collected
+ and placed at the beginning of the sorted output file.
+
+ usage: sorter.py [options]
+ -i, --input: Tabular file to be sorted
+ -o, --output: Sorted output file
+ -k, --key: Key (see manual for bash/sort)
+
+ usage: sorter.py input output [key ...]
+"""
+# 03/05/2013 guerler
-usage: %prog [options]
- -i, --input=i: Tabular file to be sorted
- -o, --out_file1=o: Sorted output file
- -c, --column=c: First column to sort on
- -s, --style=s: Sort style (numerical or alphabetical)
- -r, --order=r: Order (ASC or DESC)
+# imports
+import os, re, string, sys
+from optparse import OptionParser
-usage: %prog input out_file1 column style order [column style ...]
-"""
-
-import os, re, string, sys
-from galaxy import eggs
-import pkg_resources; pkg_resources.require( "bx-python" )
-from bx.cookbook import doc_optparse
-
+# error
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
+# main
def main():
- #Parse Command Line
- options, args = doc_optparse.parse( __doc__ )
+ # define options
+ parser = OptionParser()
+ parser.add_option("-i", "--input")
+ parser.add_option("-o", "--output")
+ parser.add_option("-k", "--key", action="append")
+
+ # parse
+ options, args = parser.parse_args()
+
try:
- inputfile = options.input
- outputfile = '-o %s' % options.out_file1
- columns = [options.column]
- styles = [('','n')[options.style == 'num']]
- orders = [('','r')[options.order == 'DESC']]
- col_style_orders = sys.argv[6:]
- if len(col_style_orders) > 1:
- columns.extend([col_style_orders[i] for i in range(0,len(col_style_orders),3)])
- styles.extend([('','n')[col_style_orders[i] == 'num'] for i in range(1,len(col_style_orders),3)])
- orders.extend([('','r')[col_style_orders[i] == 'DESC'] for i in range(2,len(col_style_orders),3)])
- cols = [ '-k%s,%s%s%s'%(columns[i], columns[i], styles[i], orders[i]) for i in range(len(columns)) ]
+ # retrieve options
+ input = options.input
+ output = options.output
+ key = [" -k" + k for k in options.key]
+
+ # grep comments
+ grep_comments = "(grep '^#' %s) > %s" % (input, output)
+ #print grep_comments
+
+ # grep and sort columns
+ sort_columns = "(grep '^[^#]' %s | sort -f -t '\t' %s) >> %s" % (input, ' '.join(key), output)
+ #print sort_columns
+
+ # execute
+ os.system(grep_comments)
+ os.system(sort_columns)
+
except Exception, ex:
- stop_err('Error parsing input parameters\n' + str(ex))
+ stop_err('Error running sorter.py\n' + str(ex))
- # Launch sort.
- cmd = "sort -f -t ' ' %s %s %s" % (' '.join(cols), outputfile, inputfile)
- try:
- os.system(cmd)
- except Exception, ex:
- stop_err('Error running sort command\n' + str(ex))
+ # exit
+ sys.exit(0)
if __name__ == "__main__":
main()
diff -r ddcce8b55152c47177d85ab1a2433f80f162b64f -r a37fbe9cbd93a4e73a765d50be3aca4f31968f0b tools/filters/sorter.xml
--- a/tools/filters/sorter.xml
+++ b/tools/filters/sorter.xml
@@ -1,130 +1,61 @@
-<tool id="sort1" name="Sort" version="1.0.1">
- <description>data in ascending or descending order</description>
- <command interpreter="python">
- sorter.py
- --input=$input
- --out_file1=$out_file1
- --column=$column
- --style=$style
- --order=$order
- #for $col in $column_set:
- ${col.other_column}
- ${col.other_style}
- ${col.other_order}
- #end for
- </command>
- <inputs>
- <param format="tabular" name="input" type="data" label="Sort Dataset" />
- <param name="column" label="on column" type="data_column" data_ref="input" accept_default="true"/>
- <param name="style" type="select" label="with flavor">
- <option value="num">Numerical sort</option>
- <option value="alpha">Alphabetical sort</option>
- </param>
- <param name="order" type="select" label="everything in">
- <option value="DESC">Descending order</option>
- <option value="ASC">Ascending order</option>
- </param>
- <repeat name="column_set" title="Column selection">
- <param name="other_column" label="on column" type="data_column" data_ref="input" accept_default="true" />
- <param name="other_style" type="select" label="with flavor">
- <option value="num">Numerical sort</option>
- <option value="alpha">Alphabetical sort</option>
- </param>
- <param name="other_order" type="select" label="everything in">
- <option value="DESC">Descending order</option>
- <option value="ASC">Ascending order</option>
- </param>
- </repeat>
- </inputs>
- <outputs>
- <data format="input" name="out_file1" metadata_source="input"/>
- </outputs>
- <tests>
- <test>
- <param name="input" value="sort_in1.bed"/>
- <param name="column" value="1"/>
- <param name="style" value="num"/>
- <param name="order" value="ASC"/>
- <param name="other_column" value="3"/>
- <param name="other_style" value="num"/>
- <param name="other_order" value="ASC"/>
- <output name="out_file1" file="sort_out1.bed"/>
- </test>
- <test>
- <param name="input" value="sort_in1.bed"/>
- <param name="column" value="3"/>
- <param name="style" value="alpha"/>
- <param name="order" value="ASC"/>
- <param name="other_column" value="1"/>
- <param name="other_style" value="alpha"/>
- <param name="other_order" value="ASC"/>
- <output name="out_file1" file="sort_out2.bed"/>
- </test>
- </tests>
- <help>
+<tool id="sort1" name="Sort" version="1.0.2">
+ <description>data in ascending or descending order</description>
+ <command interpreter="python">
+ sorter.py
+ --input=$input
+ --output=$output
+ --key=$column,$column$style$order
+ #for $col in $column_set:
+ --key=${col.other_column},${col.other_column}${col.other_style}${col.other_order}
+ #end for
+ </command>
+ <inputs>
+ <param format="tabular" name="input" type="data" label="Sort Dataset" />
+ <param name="column" label="on column" type="data_column" data_ref="input" accept_default="true"/>
+ <param name="style" type="select" label="with flavor">
+ <option value="n">Numerical sort</option>
+ <option value="">Alphabetical sort</option>
+ </param>
+ <param name="order" type="select" label="everything in">
+ <option value="r">Descending order</option>
+ <option value="">Ascending order</option>
+ </param>
+ <repeat name="column_set" title="Column selection">
+ <param name="other_column" label="on column" type="data_column" data_ref="input" accept_default="true" />
+ <param name="other_style" type="select" label="with flavor">
+ <option value="n">Numerical sort</option>
+ <option value="">Alphabetical sort</option>
+ </param>
+ <param name="other_order" type="select" label="everything in">
+ <option value="r">Descending order</option>
+ <option value="">Ascending order</option>
+ </param>
+ </repeat>
+ </inputs>
+ <outputs>
+ <data format="input" name="output" metadata_source="input"/>
+ </outputs>
+ <tests>
+ <test>
+ <param name="input" value="sort_in1.bed"/>
+ <param name="column" value="1"/>
+ <param name="style" value=""/>
+ <param name="order" value=""/>
+ <param name="other_column" value="3"/>
+ <param name="other_style" value="n"/>
+ <param name="other_order" value="r"/>
+ <output name="output" file="sort_out1.bed"/>
+ </test>
+ <test>
+ <param name="input" value="sort_in1.bed"/>
+ <param name="column" value="1"/>
+ <param name="style" value=""/>
+ <param name="order" value=""/>
+ <param name="other_column" value="3"/>
+ <param name="other_style" value="n"/>
+ <param name="other_order" value=""/>
+ <output name="output" file="sort_out2.bed"/>
+ </test>
+ </tests>
-.. class:: infomark
-
-**TIP:** If your data is not TAB delimited, use *Text Manipulation->Convert*
-
------
-
-**Syntax**
-
-This tool sorts the dataset on any number of columns in either ascending or descending order.
-
-* Numerical sort orders numbers by their magnitude, ignores all characters besides numbers, and evaluates a string of numbers to the value they signify.
-* Alphabetical sort is a phonebook type sort based on the conventional order of letters in an alphabet. Each nth letter is compared with the nth letter of other words in the list, starting at the first letter of each word and advancing to the second, third, fourth, and so on, until the order is established. Therefore, in an alphabetical sort, 2 comes after 100 (1 < 2).
-
------
-
-**Examples**
-
-The list of numbers 4,17,3,5 collates to 3,4,5,17 by numerical sorting, while it collates to 17,3,4,5 by alphabetical sorting.
-
-Sorting the following::
-
- Q d 7 II jhu 45
- A kk 4 I h 111
- Pd p 1 ktY WS 113
- A g 10 H ZZ 856
- A edf 4 tw b 234
- BBB rt 10 H ZZ 100
- A rew 10 d b 1111
- C sd 19 YH aa 10
- Hah c 23 ver bb 467
- MN gtr 1 a X 32
- N j 9 a T 205
- BBB rrf 10 b Z 134
- odfr ws 6 Weg dew 201
- C f 3 WW SW 34
- A jhg 4 I b 345
- Pd gf 7 Gthe de 567
- rS hty 90 YY LOp 89
- A g 10 H h 43
- A g 4 I h 500
-
-on columns 1 (alpha), 3 (num), and 6 (num) in ascending order will yield::
-
- A kk 4 I h 111
- A edf 4 tw b 234
- A jhg 4 I b 345
- A g 4 I h 500
- A g 10 H h 43
- A g 10 H ZZ 856
- A rew 10 d b 1111
- BBB rt 10 H ZZ 100
- BBB rrf 10 b Z 134
- C f 3 WW SW 34
- C sd 19 YH aa 10
- Hah c 23 ver bb 467
- MN gtr 1 a X 32
- N j 9 a T 205
- odfr ws 6 Weg dew 201
- Pd p 1 ktY WS 113
- Pd gf 7 Gthe de 567
- Q d 7 II jhu 45
- rS hty 90 YY LOp 89
-
- </help></tool>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Remove import * from tool_runner, fixes rerun (missing visit_input_values function) after recent import refactoring.
by commits-noreply@bitbucket.org 08 Mar '13
by commits-noreply@bitbucket.org 08 Mar '13
08 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/57c80f54cd51/
changeset: 57c80f54cd51
user: dannon
date: 2013-03-08 18:29:52
summary: Remove import * from tool_runner, fixes rerun (missing visit_input_values function) after recent import refactoring.
affected #: 1 file
diff -r 3317f0d7f848445cff6b384bf5a1915db677d139 -r 57c80f54cd51a5b427692df6d29513c88554a112 lib/galaxy/webapps/galaxy/controllers/tool_runner.py
--- a/lib/galaxy/webapps/galaxy/controllers/tool_runner.py
+++ b/lib/galaxy/webapps/galaxy/controllers/tool_runner.py
@@ -2,15 +2,20 @@
Upload class
"""
-from galaxy.web.base.controller import *
+import logging
+import galaxy.util
+
+from galaxy import web
+from galaxy.tools import DefaultToolState
+from galaxy.tools.actions import upload_common
+from galaxy.tools.parameters import params_to_incoming, visit_input_values
+from galaxy.tools.parameters.basic import DataToolParameter, UnvalidatedValue
from galaxy.util.bunch import Bunch
from galaxy.util.hash_util import is_hashable
-from galaxy.tools import DefaultToolState
-from galaxy.tools.parameters.basic import UnvalidatedValue
-from galaxy.tools.parameters import params_to_incoming
-from galaxy.tools.actions import upload_common
+from galaxy.web import error, url_for
+from galaxy.web.base.controller import BaseUIController
+from galaxy.web.form_builder import SelectField
-import logging
log = logging.getLogger( __name__ )
class AddFrameData:
@@ -47,7 +52,7 @@
tools = []
tool = None
# Backwards compatibility for datasource tools that have default tool_id configured, but which are now using only GALAXY_URL.
- tool_ids = util.listify( tool_id )
+ tool_ids = galaxy.util.listify( tool_id )
for tool_id in tool_ids:
if get_loaded_tools_by_lineage:
tools = toolbox.get_loaded_tools_by_lineage( tool_id )
@@ -83,7 +88,7 @@
message=message,
status=status,
redirect=redirect ) )
- params = util.Params( kwd, sanitize = False ) #Sanitize parameters when substituting into command line via input wrappers
+ params = galaxy.util.Params( kwd, sanitize = False ) #Sanitize parameters when substituting into command line via input wrappers
#do param translation here, used by datasource tools
if tool.input_translator:
tool.input_translator.translate( params )
@@ -103,7 +108,7 @@
toolbox=self.get_toolbox(),
tool_version_select_field=tool_version_select_field,
tool=tool,
- util=util,
+ util=galaxy.util,
add_frame=add_frame,
**vars )
@@ -215,7 +220,7 @@
#create an incoming object from the original job's dataset-modified param objects
incoming = {}
params_to_incoming( incoming, tool.inputs, params_objects, trans.app )
- incoming[ "tool_state" ] = util.object_to_string( state.encode( tool, trans.app ) )
+ incoming[ "tool_state" ] = galaxy.util.object_to_string( state.encode( tool, trans.app ) )
template, vars = tool.handle_input( trans, incoming, old_errors=upgrade_messages ) #update new state with old parameters
# Is the "add frame" stuff neccesary here?
add_frame = AddFrameData()
@@ -228,7 +233,7 @@
toolbox=self.get_toolbox(),
tool_version_select_field=tool_version_select_field,
tool=tool,
- util=util,
+ util=galaxy.util,
add_frame=add_frame,
tool_id_version_message=tool_id_version_message,
**vars )
@@ -288,9 +293,9 @@
tool = self.get_toolbox().get_tool( tool_id )
if not tool:
return False # bad tool_id
- nonfile_params = util.Params( kwd, sanitize=False )
+ nonfile_params = galaxy.util.Params( kwd, sanitize=False )
if kwd.get( 'tool_state', None ) not in ( None, 'None' ):
- encoded_state = util.string_to_object( kwd["tool_state"] )
+ encoded_state = galaxy.util.string_to_object( kwd["tool_state"] )
tool_state = DefaultToolState()
tool_state.decode( encoded_state, tool, trans.app )
else:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: sort: update log to browser
by commits-noreply@bitbucket.org 08 Mar '13
by commits-noreply@bitbucket.org 08 Mar '13
08 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/3317f0d7f848/
changeset: 3317f0d7f848
user: guerler
date: 2013-03-08 18:10:19
summary: sort: update log to browser
affected #: 1 file
diff -r 7f28b9616f70d4c6515c6fd88061374a2b01a283 -r 3317f0d7f848445cff6b384bf5a1915db677d139 tools/filters/sorter.py
--- a/tools/filters/sorter.py
+++ b/tools/filters/sorter.py
@@ -35,15 +35,15 @@
# retrieve options
input = options.input
output = options.output
- key = ["-k" + k for k in options.key]
+ key = [" -k" + k for k in options.key]
# grep comments
grep_comments = "(grep '^#' %s) > %s" % (input, output)
- print grep_comments
+ #print grep_comments
# grep and sort columns
sort_columns = "(grep '^[^#]' %s | sort -f -t '\t' %s) >> %s" % (input, ' '.join(key), output)
- print sort_columns
+ #print sort_columns
# execute
os.system(grep_comments)
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: guerler: sort: preserves comments in sorted output
by commits-noreply@bitbucket.org 08 Mar '13
by commits-noreply@bitbucket.org 08 Mar '13
08 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7f28b9616f70/
changeset: 7f28b9616f70
user: guerler
date: 2013-03-08 17:52:37
summary: sort: preserves comments in sorted output
affected #: 5 files
diff -r 7fe3612562eea76387e88ab7d184fccc3160cb43 -r 7f28b9616f70d4c6515c6fd88061374a2b01a283 test-data/sort_in1.bed
--- a/test-data/sort_in1.bed
+++ b/test-data/sort_in1.bed
@@ -1,3 +1,6 @@
+# comment 1 \n\n''" again
+# comment 2 **}"''' special
+# comment 3 @n/n""" characters
chr6 108298214 108386086 NM_007214 0 - 108299600 108385906 0 21 1530,105,99,102,159,174,60,83,148,155,93,133,95,109,51,59,62,113,115,100,304, 0,2490,6246,10831,12670,23164,23520,27331,31052,32526,34311,36130,36365,38609,41028,42398,43048,51479,54500,59097,87568,
chr6 108593954 108616704 NM_003269 0 + 108594662 108615360 0 9 733,146,88,236,147,97,150,106,1507, 0,5400,8778,10445,12037,14265,14749,15488,21243,
chr6 108639410 108689143 NM_152827 0 - 108640045 108688818 0 3 741,125,487, 0,2984,49246,
diff -r 7fe3612562eea76387e88ab7d184fccc3160cb43 -r 7f28b9616f70d4c6515c6fd88061374a2b01a283 test-data/sort_out1.bed
--- a/test-data/sort_out1.bed
+++ b/test-data/sort_out1.bed
@@ -1,29 +1,32 @@
+# comment 1 \n\n''" again
+# comment 2 **}"''' special
+# comment 3 @n/n""" characters
+chr1 148185113 148187485 NM_002796 0 + 148185136 148187378 0 7 163,207,147,82,117,89,120, 0,416,877,1199,1674,1977,2252,
+chr1 148077485 148111797 NM_002651 0 - 148078400 148111728 0 12 1097,121,133,266,124,105,110,228,228,45,937,77, 0,2081,2472,6871,9907,10257,11604,14199,15637,18274,23636,34235,
+chr1 147984101 148035079 BC007833 0 + 147984545 148033414 0 14 529,32,81,131,118,153,300,206,84,49,85,130,46,1668, 0,25695,28767,33118,33695,33998,35644,38005,39629,40577,41402,43885,48367,49310,
+chr1 147962006 147975713 NM_005997 0 - 147962192 147975670 0 6 574,145,177,115,153,160, 0,1543,7859,9048,9340,13547,
+chr2 220229182 220233943 NM_024536 0 - 220229609 220233765 0 4 1687,180,574,492, 0,1990,2660,4269,
+chr2 220108603 220116964 NM_001927 0 + 220108689 220116217 0 9 664,61,96,162,126,221,44,83,789, 0,1718,1874,2118,2451,2963,5400,7286,7572,
+chr2 118389378 118390700 BC005078 0 - 118390395 118390500 0 1 1322, 0,
+chr2 118288484 118306183 NM_006773 0 + 118288583 118304530 0 14 184,285,144,136,101,200,115,140,162,153,114,57,178,1796, 0,2765,4970,6482,6971,7183,7468,9890,10261,10768,11590,14270,14610,15903,
+chr5 131621285 131637046 NM_003687 0 + 131621326 131635821 0 7 134,152,82,179,164,118,1430, 0,4915,8770,13221,13609,14097,14331,
+chr5 131556201 131590458 NM_004199 0 - 131556601 131582218 0 15 471,97,69,66,54,100,71,177,194,240,138,152,97,100,170, 0,2316,2802,5596,6269,11138,11472,15098,16528,17674,21306,24587,25142,25935,34087,
+chr5 131424245 131426795 NM_000588 0 + 131424298 131426383 0 5 215,42,90,42,535, 0,313,1658,1872,2015,
+chr5 131170738 131357870 AF099740 0 - 131311206 131357817 0 31 112,124,120,81,65,40,120,129,61,88,94,79,72,102,144,117,89,73,96,135,135,78,74,52,33,179,100,102,65,115,248, 0,11593,44117,47607,104668,109739,114675,126366,135488,137518,138009,140437,152389,153373,155388,159269,160793,162981,164403,165577,166119,167611,169501,178260,179675,180901,181658,182260,182953,183706,186884,
+chr6 108722790 108950942 NM_145315 0 + 108722976 108950321 0 13 325,224,52,102,131,100,59,83,71,101,141,114,750, 0,28931,52094,60760,61796,71339,107102,152319,181970,182297,215317,224802,227402,
+chr6 108639410 108689143 NM_152827 0 - 108640045 108688818 0 3 741,125,487, 0,2984,49246,
+chr6 108593954 108616704 NM_003269 0 + 108594662 108615360 0 9 733,146,88,236,147,97,150,106,1507, 0,5400,8778,10445,12037,14265,14749,15488,21243,
chr6 108298214 108386086 NM_007214 0 - 108299600 108385906 0 21 1530,105,99,102,159,174,60,83,148,155,93,133,95,109,51,59,62,113,115,100,304, 0,2490,6246,10831,12670,23164,23520,27331,31052,32526,34311,36130,36365,38609,41028,42398,43048,51479,54500,59097,87568,
-chr6 108593954 108616704 NM_003269 0 + 108594662 108615360 0 9 733,146,88,236,147,97,150,106,1507, 0,5400,8778,10445,12037,14265,14749,15488,21243,
-chr6 108639410 108689143 NM_152827 0 - 108640045 108688818 0 3 741,125,487, 0,2984,49246,
-chr6 108722790 108950942 NM_145315 0 + 108722976 108950321 0 13 325,224,52,102,131,100,59,83,71,101,141,114,750, 0,28931,52094,60760,61796,71339,107102,152319,181970,182297,215317,224802,227402,
+chr7 116944658 117107512 AF377960 0 - 116945541 116979926 0 23 1129,102,133,64,186,206,179,188,153,100,87,80,96,276,118,255,151,100,204,1654,225,108,173, 0,7364,8850,10413,13893,14398,17435,24259,24615,35177,35359,45901,47221,49781,56405,66857,69787,72208,73597,80474,100111,150555,162681,
+chr7 116713967 116902666 NM_000492 0 + 116714099 116901113 0 27 185,111,109,216,90,164,126,247,93,183,192,95,87,724,129,38,251,80,151,228,101,249,156,90,173,106,1754, 0,24290,29071,50936,54313,55285,56585,60137,62053,68678,79501,107776,110390,111971,114967,122863,123569,126711,130556,131618,134650,147559,162475,172879,184725,185496,186945,
+chr7 116511232 116557294 NM_003391 0 - 116512159 116556994 0 5 1157,265,278,227,383, 0,20384,37843,43339,45679,
chr7 113320332 113924911 AK131266 0 + 113862563 113893433 0 20 285,91,178,90,58,75,138,51,201,178,214,105,88,84,77,102,122,70,164,1124, 0,201692,340175,448290,451999,484480,542213,543265,543478,545201,556083,558358,565876,567599,573029,573245,575738,577123,577946,603455,
-chr7 116511232 116557294 NM_003391 0 - 116512159 116556994 0 5 1157,265,278,227,383, 0,20384,37843,43339,45679,
-chr7 116713967 116902666 NM_000492 0 + 116714099 116901113 0 27 185,111,109,216,90,164,126,247,93,183,192,95,87,724,129,38,251,80,151,228,101,249,156,90,173,106,1754, 0,24290,29071,50936,54313,55285,56585,60137,62053,68678,79501,107776,110390,111971,114967,122863,123569,126711,130556,131618,134650,147559,162475,172879,184725,185496,186945,
-chr7 116944658 117107512 AF377960 0 - 116945541 116979926 0 23 1129,102,133,64,186,206,179,188,153,100,87,80,96,276,118,255,151,100,204,1654,225,108,173, 0,7364,8850,10413,13893,14398,17435,24259,24615,35177,35359,45901,47221,49781,56405,66857,69787,72208,73597,80474,100111,150555,162681,
-chr2 118288484 118306183 NM_006773 0 + 118288583 118304530 0 14 184,285,144,136,101,200,115,140,162,153,114,57,178,1796, 0,2765,4970,6482,6971,7183,7468,9890,10261,10768,11590,14270,14610,15903,
-chr2 118389378 118390700 BC005078 0 - 118390395 118390500 0 1 1322, 0,
chr8 118880786 119193239 NM_000127 0 - 118881131 119192466 0 11 531,172,161,90,96,119,133,120,108,94,1735, 0,5355,7850,13505,19068,20309,23098,30863,36077,37741,310718,
+chr9 128849867 128870133 NM_020145 0 - 128850516 128869987 0 11 757,241,101,90,24,63,93,134,129,142,209, 0,1071,1736,2085,2635,4201,6376,6736,13056,14247,20057,
+chr9 128789530 128848928 NM_015354 0 + 128789552 128848511 0 44 54,55,74,85,81,45,93,120,212,115,201,90,66,120,127,153,127,88,77,115,121,67,129,140,107,207,170,70,68,196,78,86,146,182,201,93,159,138,75,228,132,74,130,594, 0,1491,5075,8652,9254,10312,11104,11317,20808,21702,23060,25462,31564,32908,33566,34851,35204,35595,35776,37202,38860,39111,39891,40349,42422,45499,45827,46675,47158,47621,50453,50840,51474,51926,53831,54186,55119,55619,57449,57605,57947,58352,58541,58804,
+chr9 128787362 128789566 NM_014908 0 - 128787519 128789136 0 1 2204, 0,
+chr9 128763240 128783870 NM_174933 0 + 128764156 128783586 0 12 261,118,74,159,76,48,56,63,129,117,127,370, 0,522,875,5630,12374,12603,15040,15175,18961,19191,20037,20260,
+chrX 152693677 152712545 NM_001666 0 - 152694029 152712503 0 22 586,100,93,184,74,234,106,135,78,61,103,28,85,192,102,222,129,183,63,163,205,109, 0,1693,2066,2364,2635,2794,3129,3323,3545,3752,5323,5647,5841,6032,6401,11455,11778,13249,13719,13987,14227,18759,
+chrX 152691216 152693487 NM_000054 0 + 152691446 152693029 0 3 255,885,664, 0,616,1607,
+chrX 152648233 152662158 NM_000425 0 - 152648964 152662138 0 28 963,12,73,135,156,120,174,123,202,116,223,71,198,111,125,157,167,112,144,132,185,112,171,123,203,106,11,100, 0,1436,1545,1951,2390,2653,2889,3156,3367,3772,4717,5122,5424,5868,6066,6370,6629,6909,7588,7871,8124,8456,8858,9125,10220,10660,11296,13825,
chrX 122719582 122773357 NM_001167 0 + 122745047 122766566 0 7 96,909,100,79,43,201,6985, 0,25433,28421,31040,32533,40295,46790,
-chr9 128763240 128783870 NM_174933 0 + 128764156 128783586 0 12 261,118,74,159,76,48,56,63,129,117,127,370, 0,522,875,5630,12374,12603,15040,15175,18961,19191,20037,20260,
-chr9 128787362 128789566 NM_014908 0 - 128787519 128789136 0 1 2204, 0,
-chr9 128789530 128848928 NM_015354 0 + 128789552 128848511 0 44 54,55,74,85,81,45,93,120,212,115,201,90,66,120,127,153,127,88,77,115,121,67,129,140,107,207,170,70,68,196,78,86,146,182,201,93,159,138,75,228,132,74,130,594, 0,1491,5075,8652,9254,10312,11104,11317,20808,21702,23060,25462,31564,32908,33566,34851,35204,35595,35776,37202,38860,39111,39891,40349,42422,45499,45827,46675,47158,47621,50453,50840,51474,51926,53831,54186,55119,55619,57449,57605,57947,58352,58541,58804,
-chr9 128849867 128870133 NM_020145 0 - 128850516 128869987 0 11 757,241,101,90,24,63,93,134,129,142,209, 0,1071,1736,2085,2635,4201,6376,6736,13056,14247,20057,
-chr5 131170738 131357870 AF099740 0 - 131311206 131357817 0 31 112,124,120,81,65,40,120,129,61,88,94,79,72,102,144,117,89,73,96,135,135,78,74,52,33,179,100,102,65,115,248, 0,11593,44117,47607,104668,109739,114675,126366,135488,137518,138009,140437,152389,153373,155388,159269,160793,162981,164403,165577,166119,167611,169501,178260,179675,180901,181658,182260,182953,183706,186884,
-chr5 131424245 131426795 NM_000588 0 + 131424298 131426383 0 5 215,42,90,42,535, 0,313,1658,1872,2015,
-chr5 131556201 131590458 NM_004199 0 - 131556601 131582218 0 15 471,97,69,66,54,100,71,177,194,240,138,152,97,100,170, 0,2316,2802,5596,6269,11138,11472,15098,16528,17674,21306,24587,25142,25935,34087,
-chr5 131621285 131637046 NM_003687 0 + 131621326 131635821 0 7 134,152,82,179,164,118,1430, 0,4915,8770,13221,13609,14097,14331,
-chr1 147962006 147975713 NM_005997 0 - 147962192 147975670 0 6 574,145,177,115,153,160, 0,1543,7859,9048,9340,13547,
-chr1 147984101 148035079 BC007833 0 + 147984545 148033414 0 14 529,32,81,131,118,153,300,206,84,49,85,130,46,1668, 0,25695,28767,33118,33695,33998,35644,38005,39629,40577,41402,43885,48367,49310,
-chr1 148077485 148111797 NM_002651 0 - 148078400 148111728 0 12 1097,121,133,266,124,105,110,228,228,45,937,77, 0,2081,2472,6871,9907,10257,11604,14199,15637,18274,23636,34235,
-chr1 148185113 148187485 NM_002796 0 + 148185136 148187378 0 7 163,207,147,82,117,89,120, 0,416,877,1199,1674,1977,2252,
-chrX 152648233 152662158 NM_000425 0 - 152648964 152662138 0 28 963,12,73,135,156,120,174,123,202,116,223,71,198,111,125,157,167,112,144,132,185,112,171,123,203,106,11,100, 0,1436,1545,1951,2390,2653,2889,3156,3367,3772,4717,5122,5424,5868,6066,6370,6629,6909,7588,7871,8124,8456,8858,9125,10220,10660,11296,13825,
-chrX 152691216 152693487 NM_000054 0 + 152691446 152693029 0 3 255,885,664, 0,616,1607,
-chrX 152693677 152712545 NM_001666 0 - 152694029 152712503 0 22 586,100,93,184,74,234,106,135,78,61,103,28,85,192,102,222,129,183,63,163,205,109, 0,1693,2066,2364,2635,2794,3129,3323,3545,3752,5323,5647,5841,6032,6401,11455,11778,13249,13719,13987,14227,18759,
-chr2 220108603 220116964 NM_001927 0 + 220108689 220116217 0 9 664,61,96,162,126,221,44,83,789, 0,1718,1874,2118,2451,2963,5400,7286,7572,
-chr2 220229182 220233943 NM_024536 0 - 220229609 220233765 0 4 1687,180,574,492, 0,1990,2660,4269,
diff -r 7fe3612562eea76387e88ab7d184fccc3160cb43 -r 7f28b9616f70d4c6515c6fd88061374a2b01a283 test-data/sort_out2.bed
--- a/test-data/sort_out2.bed
+++ b/test-data/sort_out2.bed
@@ -1,3 +1,18 @@
+# comment 1 \n\n''" again
+# comment 2 **}"''' special
+# comment 3 @n/n""" characters
+chr1 147962006 147975713 NM_005997 0 - 147962192 147975670 0 6 574,145,177,115,153,160, 0,1543,7859,9048,9340,13547,
+chr1 147984101 148035079 BC007833 0 + 147984545 148033414 0 14 529,32,81,131,118,153,300,206,84,49,85,130,46,1668, 0,25695,28767,33118,33695,33998,35644,38005,39629,40577,41402,43885,48367,49310,
+chr1 148077485 148111797 NM_002651 0 - 148078400 148111728 0 12 1097,121,133,266,124,105,110,228,228,45,937,77, 0,2081,2472,6871,9907,10257,11604,14199,15637,18274,23636,34235,
+chr1 148185113 148187485 NM_002796 0 + 148185136 148187378 0 7 163,207,147,82,117,89,120, 0,416,877,1199,1674,1977,2252,
+chr2 118288484 118306183 NM_006773 0 + 118288583 118304530 0 14 184,285,144,136,101,200,115,140,162,153,114,57,178,1796, 0,2765,4970,6482,6971,7183,7468,9890,10261,10768,11590,14270,14610,15903,
+chr2 118389378 118390700 BC005078 0 - 118390395 118390500 0 1 1322, 0,
+chr2 220108603 220116964 NM_001927 0 + 220108689 220116217 0 9 664,61,96,162,126,221,44,83,789, 0,1718,1874,2118,2451,2963,5400,7286,7572,
+chr2 220229182 220233943 NM_024536 0 - 220229609 220233765 0 4 1687,180,574,492, 0,1990,2660,4269,
+chr5 131170738 131357870 AF099740 0 - 131311206 131357817 0 31 112,124,120,81,65,40,120,129,61,88,94,79,72,102,144,117,89,73,96,135,135,78,74,52,33,179,100,102,65,115,248, 0,11593,44117,47607,104668,109739,114675,126366,135488,137518,138009,140437,152389,153373,155388,159269,160793,162981,164403,165577,166119,167611,169501,178260,179675,180901,181658,182260,182953,183706,186884,
+chr5 131424245 131426795 NM_000588 0 + 131424298 131426383 0 5 215,42,90,42,535, 0,313,1658,1872,2015,
+chr5 131556201 131590458 NM_004199 0 - 131556601 131582218 0 15 471,97,69,66,54,100,71,177,194,240,138,152,97,100,170, 0,2316,2802,5596,6269,11138,11472,15098,16528,17674,21306,24587,25142,25935,34087,
+chr5 131621285 131637046 NM_003687 0 + 131621326 131635821 0 7 134,152,82,179,164,118,1430, 0,4915,8770,13221,13609,14097,14331,
chr6 108298214 108386086 NM_007214 0 - 108299600 108385906 0 21 1530,105,99,102,159,174,60,83,148,155,93,133,95,109,51,59,62,113,115,100,304, 0,2490,6246,10831,12670,23164,23520,27331,31052,32526,34311,36130,36365,38609,41028,42398,43048,51479,54500,59097,87568,
chr6 108593954 108616704 NM_003269 0 + 108594662 108615360 0 9 733,146,88,236,147,97,150,106,1507, 0,5400,8778,10445,12037,14265,14749,15488,21243,
chr6 108639410 108689143 NM_152827 0 - 108640045 108688818 0 3 741,125,487, 0,2984,49246,
@@ -6,24 +21,12 @@
chr7 116511232 116557294 NM_003391 0 - 116512159 116556994 0 5 1157,265,278,227,383, 0,20384,37843,43339,45679,
chr7 116713967 116902666 NM_000492 0 + 116714099 116901113 0 27 185,111,109,216,90,164,126,247,93,183,192,95,87,724,129,38,251,80,151,228,101,249,156,90,173,106,1754, 0,24290,29071,50936,54313,55285,56585,60137,62053,68678,79501,107776,110390,111971,114967,122863,123569,126711,130556,131618,134650,147559,162475,172879,184725,185496,186945,
chr7 116944658 117107512 AF377960 0 - 116945541 116979926 0 23 1129,102,133,64,186,206,179,188,153,100,87,80,96,276,118,255,151,100,204,1654,225,108,173, 0,7364,8850,10413,13893,14398,17435,24259,24615,35177,35359,45901,47221,49781,56405,66857,69787,72208,73597,80474,100111,150555,162681,
-chr2 118288484 118306183 NM_006773 0 + 118288583 118304530 0 14 184,285,144,136,101,200,115,140,162,153,114,57,178,1796, 0,2765,4970,6482,6971,7183,7468,9890,10261,10768,11590,14270,14610,15903,
-chr2 118389378 118390700 BC005078 0 - 118390395 118390500 0 1 1322, 0,
chr8 118880786 119193239 NM_000127 0 - 118881131 119192466 0 11 531,172,161,90,96,119,133,120,108,94,1735, 0,5355,7850,13505,19068,20309,23098,30863,36077,37741,310718,
-chrX 122719582 122773357 NM_001167 0 + 122745047 122766566 0 7 96,909,100,79,43,201,6985, 0,25433,28421,31040,32533,40295,46790,
chr9 128763240 128783870 NM_174933 0 + 128764156 128783586 0 12 261,118,74,159,76,48,56,63,129,117,127,370, 0,522,875,5630,12374,12603,15040,15175,18961,19191,20037,20260,
chr9 128787362 128789566 NM_014908 0 - 128787519 128789136 0 1 2204, 0,
chr9 128789530 128848928 NM_015354 0 + 128789552 128848511 0 44 54,55,74,85,81,45,93,120,212,115,201,90,66,120,127,153,127,88,77,115,121,67,129,140,107,207,170,70,68,196,78,86,146,182,201,93,159,138,75,228,132,74,130,594, 0,1491,5075,8652,9254,10312,11104,11317,20808,21702,23060,25462,31564,32908,33566,34851,35204,35595,35776,37202,38860,39111,39891,40349,42422,45499,45827,46675,47158,47621,50453,50840,51474,51926,53831,54186,55119,55619,57449,57605,57947,58352,58541,58804,
chr9 128849867 128870133 NM_020145 0 - 128850516 128869987 0 11 757,241,101,90,24,63,93,134,129,142,209, 0,1071,1736,2085,2635,4201,6376,6736,13056,14247,20057,
-chr5 131170738 131357870 AF099740 0 - 131311206 131357817 0 31 112,124,120,81,65,40,120,129,61,88,94,79,72,102,144,117,89,73,96,135,135,78,74,52,33,179,100,102,65,115,248, 0,11593,44117,47607,104668,109739,114675,126366,135488,137518,138009,140437,152389,153373,155388,159269,160793,162981,164403,165577,166119,167611,169501,178260,179675,180901,181658,182260,182953,183706,186884,
-chr5 131424245 131426795 NM_000588 0 + 131424298 131426383 0 5 215,42,90,42,535, 0,313,1658,1872,2015,
-chr5 131556201 131590458 NM_004199 0 - 131556601 131582218 0 15 471,97,69,66,54,100,71,177,194,240,138,152,97,100,170, 0,2316,2802,5596,6269,11138,11472,15098,16528,17674,21306,24587,25142,25935,34087,
-chr5 131621285 131637046 NM_003687 0 + 131621326 131635821 0 7 134,152,82,179,164,118,1430, 0,4915,8770,13221,13609,14097,14331,
-chr1 147962006 147975713 NM_005997 0 - 147962192 147975670 0 6 574,145,177,115,153,160, 0,1543,7859,9048,9340,13547,
-chr1 147984101 148035079 BC007833 0 + 147984545 148033414 0 14 529,32,81,131,118,153,300,206,84,49,85,130,46,1668, 0,25695,28767,33118,33695,33998,35644,38005,39629,40577,41402,43885,48367,49310,
-chr1 148077485 148111797 NM_002651 0 - 148078400 148111728 0 12 1097,121,133,266,124,105,110,228,228,45,937,77, 0,2081,2472,6871,9907,10257,11604,14199,15637,18274,23636,34235,
-chr1 148185113 148187485 NM_002796 0 + 148185136 148187378 0 7 163,207,147,82,117,89,120, 0,416,877,1199,1674,1977,2252,
+chrX 122719582 122773357 NM_001167 0 + 122745047 122766566 0 7 96,909,100,79,43,201,6985, 0,25433,28421,31040,32533,40295,46790,
chrX 152648233 152662158 NM_000425 0 - 152648964 152662138 0 28 963,12,73,135,156,120,174,123,202,116,223,71,198,111,125,157,167,112,144,132,185,112,171,123,203,106,11,100, 0,1436,1545,1951,2390,2653,2889,3156,3367,3772,4717,5122,5424,5868,6066,6370,6629,6909,7588,7871,8124,8456,8858,9125,10220,10660,11296,13825,
chrX 152691216 152693487 NM_000054 0 + 152691446 152693029 0 3 255,885,664, 0,616,1607,
chrX 152693677 152712545 NM_001666 0 - 152694029 152712503 0 22 586,100,93,184,74,234,106,135,78,61,103,28,85,192,102,222,129,183,63,163,205,109, 0,1693,2066,2364,2635,2794,3129,3323,3545,3752,5323,5647,5841,6032,6401,11455,11778,13249,13719,13987,14227,18759,
-chr2 220108603 220116964 NM_001927 0 + 220108689 220116217 0 9 664,61,96,162,126,221,44,83,789, 0,1718,1874,2118,2451,2963,5400,7286,7572,
-chr2 220229182 220233943 NM_024536 0 - 220229609 220233765 0 4 1687,180,574,492, 0,1990,2660,4269,
diff -r 7fe3612562eea76387e88ab7d184fccc3160cb43 -r 7f28b9616f70d4c6515c6fd88061374a2b01a283 tools/filters/sorter.py
--- a/tools/filters/sorter.py
+++ b/tools/filters/sorter.py
@@ -1,49 +1,59 @@
"""
-Sorts tabular data on one or more columns.
+ Sorts tabular data on one or more columns. All comments of the file are collected
+ and placed at the beginning of the sorted output file.
+
+ usage: sorter.py [options]
+ -i, --input: Tabular file to be sorted
+ -o, --output: Sorted output file
+ -k, --key: Key (see manual for bash/sort)
+
+ usage: sorter.py input output [key ...]
+"""
+# 03/05/2013 guerler
-usage: %prog [options]
- -i, --input=i: Tabular file to be sorted
- -o, --out_file1=o: Sorted output file
- -c, --column=c: First column to sort on
- -s, --style=s: Sort style (numerical or alphabetical)
- -r, --order=r: Order (ASC or DESC)
+# imports
+import os, re, string, sys
+from optparse import OptionParser
-usage: %prog input out_file1 column style order [column style ...]
-"""
-
-import os, re, string, sys
-from galaxy import eggs
-import pkg_resources; pkg_resources.require( "bx-python" )
-from bx.cookbook import doc_optparse
-
+# error
def stop_err( msg ):
sys.stderr.write( "%s\n" % msg )
sys.exit()
+# main
def main():
- #Parse Command Line
- options, args = doc_optparse.parse( __doc__ )
+ # define options
+ parser = OptionParser()
+ parser.add_option("-i", "--input")
+ parser.add_option("-o", "--output")
+ parser.add_option("-k", "--key", action="append")
+
+ # parse
+ options, args = parser.parse_args()
+
try:
- inputfile = options.input
- outputfile = '-o %s' % options.out_file1
- columns = [options.column]
- styles = [('','n')[options.style == 'num']]
- orders = [('','r')[options.order == 'DESC']]
- col_style_orders = sys.argv[6:]
- if len(col_style_orders) > 1:
- columns.extend([col_style_orders[i] for i in range(0,len(col_style_orders),3)])
- styles.extend([('','n')[col_style_orders[i] == 'num'] for i in range(1,len(col_style_orders),3)])
- orders.extend([('','r')[col_style_orders[i] == 'DESC'] for i in range(2,len(col_style_orders),3)])
- cols = [ '-k%s,%s%s%s'%(columns[i], columns[i], styles[i], orders[i]) for i in range(len(columns)) ]
+ # retrieve options
+ input = options.input
+ output = options.output
+ key = ["-k" + k for k in options.key]
+
+ # grep comments
+ grep_comments = "(grep '^#' %s) > %s" % (input, output)
+ print grep_comments
+
+ # grep and sort columns
+ sort_columns = "(grep '^[^#]' %s | sort -f -t '\t' %s) >> %s" % (input, ' '.join(key), output)
+ print sort_columns
+
+ # execute
+ os.system(grep_comments)
+ os.system(sort_columns)
+
except Exception, ex:
- stop_err('Error parsing input parameters\n' + str(ex))
+ stop_err('Error running sorter.py\n' + str(ex))
- # Launch sort.
- cmd = "sort -f -t ' ' %s %s %s" % (' '.join(cols), outputfile, inputfile)
- try:
- os.system(cmd)
- except Exception, ex:
- stop_err('Error running sort command\n' + str(ex))
+ # exit
+ sys.exit(0)
if __name__ == "__main__":
main()
diff -r 7fe3612562eea76387e88ab7d184fccc3160cb43 -r 7f28b9616f70d4c6515c6fd88061374a2b01a283 tools/filters/sorter.xml
--- a/tools/filters/sorter.xml
+++ b/tools/filters/sorter.xml
@@ -1,130 +1,61 @@
-<tool id="sort1" name="Sort" version="1.0.1">
- <description>data in ascending or descending order</description>
- <command interpreter="python">
- sorter.py
- --input=$input
- --out_file1=$out_file1
- --column=$column
- --style=$style
- --order=$order
- #for $col in $column_set:
- ${col.other_column}
- ${col.other_style}
- ${col.other_order}
- #end for
- </command>
- <inputs>
- <param format="tabular" name="input" type="data" label="Sort Dataset" />
- <param name="column" label="on column" type="data_column" data_ref="input" accept_default="true"/>
- <param name="style" type="select" label="with flavor">
- <option value="num">Numerical sort</option>
- <option value="alpha">Alphabetical sort</option>
- </param>
- <param name="order" type="select" label="everything in">
- <option value="DESC">Descending order</option>
- <option value="ASC">Ascending order</option>
- </param>
- <repeat name="column_set" title="Column selection">
- <param name="other_column" label="on column" type="data_column" data_ref="input" accept_default="true" />
- <param name="other_style" type="select" label="with flavor">
- <option value="num">Numerical sort</option>
- <option value="alpha">Alphabetical sort</option>
- </param>
- <param name="other_order" type="select" label="everything in">
- <option value="DESC">Descending order</option>
- <option value="ASC">Ascending order</option>
- </param>
- </repeat>
- </inputs>
- <outputs>
- <data format="input" name="out_file1" metadata_source="input"/>
- </outputs>
- <tests>
- <test>
- <param name="input" value="sort_in1.bed"/>
- <param name="column" value="1"/>
- <param name="style" value="num"/>
- <param name="order" value="ASC"/>
- <param name="other_column" value="3"/>
- <param name="other_style" value="num"/>
- <param name="other_order" value="ASC"/>
- <output name="out_file1" file="sort_out1.bed"/>
- </test>
- <test>
- <param name="input" value="sort_in1.bed"/>
- <param name="column" value="3"/>
- <param name="style" value="alpha"/>
- <param name="order" value="ASC"/>
- <param name="other_column" value="1"/>
- <param name="other_style" value="alpha"/>
- <param name="other_order" value="ASC"/>
- <output name="out_file1" file="sort_out2.bed"/>
- </test>
- </tests>
- <help>
+<tool id="sort1" name="Sort" version="1.0.2">
+ <description>data in ascending or descending order</description>
+ <command interpreter="python">
+ sorter.py
+ --input=$input
+ --output=$output
+ --key=$column,$column$style$order
+ #for $col in $column_set:
+ --key=${col.other_column},${col.other_column}${col.other_style}${col.other_order}
+ #end for
+ </command>
+ <inputs>
+ <param format="tabular" name="input" type="data" label="Sort Dataset" />
+ <param name="column" label="on column" type="data_column" data_ref="input" accept_default="true"/>
+ <param name="style" type="select" label="with flavor">
+ <option value="n">Numerical sort</option>
+ <option value="">Alphabetical sort</option>
+ </param>
+ <param name="order" type="select" label="everything in">
+ <option value="r">Descending order</option>
+ <option value="">Ascending order</option>
+ </param>
+ <repeat name="column_set" title="Column selection">
+ <param name="other_column" label="on column" type="data_column" data_ref="input" accept_default="true" />
+ <param name="other_style" type="select" label="with flavor">
+ <option value="n">Numerical sort</option>
+ <option value="">Alphabetical sort</option>
+ </param>
+ <param name="other_order" type="select" label="everything in">
+ <option value="r">Descending order</option>
+ <option value="">Ascending order</option>
+ </param>
+ </repeat>
+ </inputs>
+ <outputs>
+ <data format="input" name="output" metadata_source="input"/>
+ </outputs>
+ <tests>
+ <test>
+ <param name="input" value="sort_in1.bed"/>
+ <param name="column" value="1"/>
+ <param name="style" value=""/>
+ <param name="order" value=""/>
+ <param name="other_column" value="3"/>
+ <param name="other_style" value="n"/>
+ <param name="other_order" value="r"/>
+ <output name="output" file="sort_out1.bed"/>
+ </test>
+ <test>
+ <param name="input" value="sort_in1.bed"/>
+ <param name="column" value="1"/>
+ <param name="style" value=""/>
+ <param name="order" value=""/>
+ <param name="other_column" value="3"/>
+ <param name="other_style" value="n"/>
+ <param name="other_order" value=""/>
+ <output name="output" file="sort_out2.bed"/>
+ </test>
+ </tests>
-.. class:: infomark
-
-**TIP:** If your data is not TAB delimited, use *Text Manipulation->Convert*
-
------
-
-**Syntax**
-
-This tool sorts the dataset on any number of columns in either ascending or descending order.
-
-* Numerical sort orders numbers by their magnitude, ignores all characters besides numbers, and evaluates a string of numbers to the value they signify.
-* Alphabetical sort is a phonebook type sort based on the conventional order of letters in an alphabet. Each nth letter is compared with the nth letter of other words in the list, starting at the first letter of each word and advancing to the second, third, fourth, and so on, until the order is established. Therefore, in an alphabetical sort, 2 comes after 100 (1 < 2).
-
------
-
-**Examples**
-
-The list of numbers 4,17,3,5 collates to 3,4,5,17 by numerical sorting, while it collates to 17,3,4,5 by alphabetical sorting.
-
-Sorting the following::
-
- Q d 7 II jhu 45
- A kk 4 I h 111
- Pd p 1 ktY WS 113
- A g 10 H ZZ 856
- A edf 4 tw b 234
- BBB rt 10 H ZZ 100
- A rew 10 d b 1111
- C sd 19 YH aa 10
- Hah c 23 ver bb 467
- MN gtr 1 a X 32
- N j 9 a T 205
- BBB rrf 10 b Z 134
- odfr ws 6 Weg dew 201
- C f 3 WW SW 34
- A jhg 4 I b 345
- Pd gf 7 Gthe de 567
- rS hty 90 YY LOp 89
- A g 10 H h 43
- A g 4 I h 500
-
-on columns 1 (alpha), 3 (num), and 6 (num) in ascending order will yield::
-
- A kk 4 I h 111
- A edf 4 tw b 234
- A jhg 4 I b 345
- A g 4 I h 500
- A g 10 H h 43
- A g 10 H ZZ 856
- A rew 10 d b 1111
- BBB rt 10 H ZZ 100
- BBB rrf 10 b Z 134
- C f 3 WW SW 34
- C sd 19 YH aa 10
- Hah c 23 ver bb 467
- MN gtr 1 a X 32
- N j 9 a T 205
- odfr ws 6 Weg dew 201
- Pd p 1 ktY WS 113
- Pd gf 7 Gthe de 567
- Q d 7 II jhu 45
- rS hty 90 YY LOp 89
-
- </help></tool>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/d024cb15459d/
changeset: d024cb15459d
user: Kyle Ellrott
date: 2013-02-26 01:48:38
summary: Passing extra parameters from root index to tool index. This will allow for tools to pick up parameters from the request URL and map them with the request_param_translation, even when inside the root frame.
affected #: 2 files
diff -r 26d7b89d9f897a28876214c88faed215973e6fcd -r d024cb15459def8c539b3b894eeac94b6bd95ca3 lib/galaxy/webapps/galaxy/controllers/root.py
--- a/lib/galaxy/webapps/galaxy/controllers/root.py
+++ b/lib/galaxy/webapps/galaxy/controllers/root.py
@@ -22,7 +22,8 @@
return trans.fill_template( "root/index.mako",
tool_id=tool_id,
workflow_id=workflow_id,
- m_c=m_c, m_a=m_a )
+ m_c=m_c, m_a=m_a,
+ params=kwd )
## ---- Tool related -----------------------------------------------------
diff -r 26d7b89d9f897a28876214c88faed215973e6fcd -r d024cb15459def8c539b3b894eeac94b6bd95ca3 templates/webapps/galaxy/root/index.mako
--- a/templates/webapps/galaxy/root/index.mako
+++ b/templates/webapps/galaxy/root/index.mako
@@ -118,7 +118,7 @@
if trans.app.config.require_login and not trans.user:
center_url = h.url_for( controller='user', action='login' )
elif tool_id is not None:
- center_url = h.url_for( 'tool_runner', tool_id=tool_id, from_noframe=True )
+ center_url = h.url_for( 'tool_runner', tool_id=tool_id, from_noframe=True, **params )
elif workflow_id is not None:
center_url = h.url_for( controller='workflow', action='run', id=workflow_id )
elif m_c is not None:
https://bitbucket.org/galaxy/galaxy-central/commits/9fca1c2e2b5d/
changeset: 9fca1c2e2b5d
user: Kyle Ellrott
date: 2013-02-26 20:09:44
summary: For external tool requests, without a 'URL' parameter but with other parameters, still process the incoming parameters.
This will allow for tools that have 'request_param_translation' tables without the 'URL' parameter mapping to accept external requests and fill in their tool form page.
affected #: 1 file
diff -r d024cb15459def8c539b3b894eeac94b6bd95ca3 -r 9fca1c2e2b5db9853db093c241276c5431355b98 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1809,6 +1809,8 @@
if "runtool_btn" not in incoming and "URL" not in incoming:
if not self.display_interface:
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ if len(incoming):
+ self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
return "tool_form.mako", dict( errors={}, tool_state=state, param_values={}, incoming={} )
# Process incoming data
if not( self.check_values ):
https://bitbucket.org/galaxy/galaxy-central/commits/7fe3612562ee/
changeset: 7fe3612562ee
user: dannon
date: 2013-03-08 16:35:38
summary: Merged in kellrott/galaxy-central (pull request #130)
Passing extra parameters from root index to tool index.
affected #: 3 files
diff -r c7bde6953174eabe5510c89d023ffd8232592f11 -r 7fe3612562eea76387e88ab7d184fccc3160cb43 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -1922,6 +1922,8 @@
if "runtool_btn" not in incoming and "URL" not in incoming:
if not self.display_interface:
return 'message.mako', dict( status='info', message="The interface for this tool cannot be displayed", refresh_frames=['everything'] )
+ if len(incoming):
+ self.update_state( trans, self.inputs_by_page[state.page], state.inputs, incoming, old_errors=old_errors or {} )
return "tool_form.mako", dict( errors={}, tool_state=state, param_values={}, incoming={} )
# Process incoming data
if not( self.check_values ):
diff -r c7bde6953174eabe5510c89d023ffd8232592f11 -r 7fe3612562eea76387e88ab7d184fccc3160cb43 lib/galaxy/webapps/galaxy/controllers/root.py
--- a/lib/galaxy/webapps/galaxy/controllers/root.py
+++ b/lib/galaxy/webapps/galaxy/controllers/root.py
@@ -22,7 +22,8 @@
return trans.fill_template( "root/index.mako",
tool_id=tool_id,
workflow_id=workflow_id,
- m_c=m_c, m_a=m_a )
+ m_c=m_c, m_a=m_a,
+ params=kwd )
## ---- Tool related -----------------------------------------------------
diff -r c7bde6953174eabe5510c89d023ffd8232592f11 -r 7fe3612562eea76387e88ab7d184fccc3160cb43 templates/webapps/galaxy/root/index.mako
--- a/templates/webapps/galaxy/root/index.mako
+++ b/templates/webapps/galaxy/root/index.mako
@@ -118,7 +118,7 @@
if trans.app.config.require_login and not trans.user:
center_url = h.url_for( controller='user', action='login' )
elif tool_id is not None:
- center_url = h.url_for( 'tool_runner', tool_id=tool_id, from_noframe=True )
+ center_url = h.url_for( 'tool_runner', tool_id=tool_id, from_noframe=True, **params )
elif workflow_id is not None:
center_url = h.url_for( controller='workflow', action='run', id=workflow_id )
elif m_c is not None:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Enhance the hours_between_check config setting to handle float values in addition to int values. This allows for functional tests to test the Galaxy update manager.
by commits-noreply@bitbucket.org 08 Mar '13
by commits-noreply@bitbucket.org 08 Mar '13
08 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c7bde6953174/
changeset: c7bde6953174
user: greg
date: 2013-03-08 15:14:23
summary: Enhance the hours_between_check config setting to handle float values in addition to int values. This allows for functional tests to test the Galaxy update manager.
affected #: 2 files
diff -r e3ae0bbd800a68532fc51625642e0abc29e1b085 -r c7bde6953174eabe5510c89d023ffd8232592f11 lib/galaxy/config.py
--- a/lib/galaxy/config.py
+++ b/lib/galaxy/config.py
@@ -69,9 +69,14 @@
self.shed_tool_data_table_config = resolve_path( kwargs.get( 'shed_tool_data_table_config', 'shed_tool_data_table_conf.xml' ), self.root )
self.enable_tool_shed_check = string_as_bool( kwargs.get( 'enable_tool_shed_check', False ) )
try:
- self.hours_between_check = int( kwargs.get( 'hours_between_check', 12 ) )
- if self.hours_between_check < 1 or self.hours_between_check > 24:
- self.hours_between_check = 12
+ self.hours_between_check = kwargs.get( 'hours_between_check', 12 )
+ if isinstance( hours_between_check, float ):
+ # Float values are supported for functional tests.
+ if self.hours_between_check < 0.001 or self.hours_between_check > 24.0:
+ self.hours_between_check = 12.0
+ else:
+ if self.hours_between_check < 1 or self.hours_between_check > 24:
+ self.hours_between_check = 12
except:
self.hours_between_check = 12
self.update_integrated_tool_panel = kwargs.get( "update_integrated_tool_panel", True )
diff -r e3ae0bbd800a68532fc51625642e0abc29e1b085 -r c7bde6953174eabe5510c89d023ffd8232592f11 lib/tool_shed/galaxy_install/update_manager.py
--- a/lib/tool_shed/galaxy_install/update_manager.py
+++ b/lib/tool_shed/galaxy_install/update_manager.py
@@ -18,7 +18,7 @@
self.sleeper = Sleeper()
self.restarter = threading.Thread( target=self.__restarter )
self.restarter.start()
- self.seconds_to_sleep = app.config.hours_between_check * 3600
+ self.seconds_to_sleep = int( app.config.hours_between_check * 3600 )
def __restarter( self ):
log.info( 'Update manager restarter starting up...' )
while self.running:
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: carlfeberhard: browser tests: fix broken debug statement
by commits-noreply@bitbucket.org 06 Mar '13
by commits-noreply@bitbucket.org 06 Mar '13
06 Mar '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/e3ae0bbd800a/
changeset: e3ae0bbd800a
user: carlfeberhard
date: 2013-03-07 01:45:31
summary: browser tests: fix broken debug statement
affected #: 1 file
diff -r 68455758f27e572cd637fc2b014773f0dfa9002d -r e3ae0bbd800a68532fc51625642e0abc29e1b085 test/casperjs/modules/historypanel.js
--- a/test/casperjs/modules/historypanel.js
+++ b/test/casperjs/modules/historypanel.js
@@ -161,7 +161,7 @@
//precondition: historyItemWrapper's (hda dom elements) should have an id
// we could use the selector directly, but better if it errors before an attempted delete
var hdaId = spaceghost.getElementInfo( hdaSelector ).attributes.id;
- spaceghost.debug( 'hda id: ' + spaceghost.jsonStr( hdaId ) );
+ spaceghost.debug( 'hda id: ' + hdaId );
// get the delete icon and click it
//TODO: handle disabled delete icon?
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/a601d530ec95/
changeset: a601d530ec95
user: carlfeberhard
date: 2013-03-07 01:35:29
summary: api: add exception type to history_contents, index errors
affected #: 1 file
diff -r 7677b8c3ef43d06eda783f1c08ec521d2bd625e5 -r a601d530ec9577eace628940dd4a385aebb803f3 lib/galaxy/webapps/galaxy/api/history_contents.py
--- a/lib/galaxy/webapps/galaxy/api/history_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/history_contents.py
@@ -73,8 +73,8 @@
# don't fail entire list if hda err's, record and move on
# (making sure http recvr knows it's err'd)
trans.response.status = 500
- log.error( "Error in history API at listing contents " +
- "with history %s, hda %s: %s", history_id, encoded_hda_id, str( exc ) )
+ log.error( "Error in history API at listing contents with history %s, hda %s: (%s) %s",
+ history_id, encoded_hda_id, type( exc ), str( exc ) )
rval.append( self._exception_as_hda_dict( trans, encoded_hda_id, exc ) )
else:
https://bitbucket.org/galaxy/galaxy-central/commits/68455758f27e/
changeset: 68455758f27e
user: carlfeberhard
date: 2013-03-07 01:36:25
summary: objectstore: add messages to ObjectNot* exceptions raised
affected #: 1 file
diff -r a601d530ec9577eace628940dd4a385aebb803f3 -r 68455758f27e572cd637fc2b014773f0dfa9002d lib/galaxy/objectstore/__init__.py
--- a/lib/galaxy/objectstore/__init__.py
+++ b/lib/galaxy/objectstore/__init__.py
@@ -731,7 +731,8 @@
if self.exists(obj, **kwargs):
return bool(self.size(obj, **kwargs) > 0)
else:
- raise ObjectNotFound()
+ raise ObjectNotFound( 'objectstore.empty, object does not exist: %s, kwargs: %s'
+ %( str( obj ), str( kwargs ) ) )
def size(self, obj, **kwargs):
rel_path = self._construct_path(obj, **kwargs)
@@ -816,7 +817,8 @@
# even if it does not exist.
# if dir_only:
# return cache_path
- raise ObjectNotFound()
+ raise ObjectNotFound( 'objectstore.get_filename, no cache_path: %s, kwargs: %s'
+ %( str( obj ), str( kwargs ) ) )
# return cache_path # Until the upload tool does not explicitly create the dataset, return expected path
def update_from_file(self, obj, file_name=None, create=False, **kwargs):
@@ -841,7 +843,8 @@
# Update the file on S3
self._push_to_os(rel_path, source_file)
else:
- raise ObjectNotFound()
+ raise ObjectNotFound( 'objectstore.update_from_file, object does not exist: %s, kwargs: %s'
+ %( str( obj ), str( kwargs ) ) )
def get_object_url(self, obj, **kwargs):
if self.exists(obj, **kwargs):
@@ -947,7 +950,8 @@
try:
obj.object_store_id = random.choice(self.weighted_backend_ids)
except IndexError:
- raise ObjectInvalid()
+ raise ObjectInvalid( 'objectstore.create, could not generate obj.object_store_id: %s, kwargs: %s'
+ %( str( obj ), str( kwargs ) ) )
object_session( obj ).add( obj )
object_session( obj ).flush()
log.debug("Selected backend '%s' for creation of %s %s" % (obj.object_store_id, obj.__class__.__name__, obj.id))
@@ -984,7 +988,8 @@
if object_store_id is not None:
return self.backends[object_store_id].__getattribute__(method)(obj, **kwargs)
if default_is_exception:
- raise default()
+ raise default( 'objectstore, __call_method failed: %s on %s, kwargs: %s'
+ %( method, str( obj ), str( kwargs ) ) )
else:
return default
@@ -1073,4 +1078,3 @@
calling_format=calling_format,
path=config.os_conn_path)
return s3_conn
-
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0