galaxy-commits
Threads by month
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
August 2013
- 1 participants
- 149 discussions
commit/galaxy-central: dan: Fixes for dictify to to_dict change.
by commits-noreply@bitbucket.org 30 Aug '13
by commits-noreply@bitbucket.org 30 Aug '13
30 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/0470feeb593f/
Changeset: 0470feeb593f
User: dan
Date: 2013-08-31 00:06:52
Summary: Fixes for dictify to to_dict change.
Affected #: 8 files
diff -r c59cc42b279385f0d351d31f919d29bb7c012877 -r 0470feeb593f9797fa9fd19dfdac4751a7ca788b config/plugins/visualizations/scatterplot/templates/scatterplot.mako
--- a/config/plugins/visualizations/scatterplot/templates/scatterplot.mako
+++ b/config/plugins/visualizations/scatterplot/templates/scatterplot.mako
@@ -41,7 +41,7 @@
<script type="text/javascript">
$(function(){
- var hda = ${h.to_json_string( trans.security.encode_dict_ids( hda.dictify() ) )},
+ var hda = ${h.to_json_string( trans.security.encode_dict_ids( hda.to_dict() ) )},
querySettings = ${h.to_json_string( query_args )},
chartConfig = _.extend( querySettings, {
containerSelector : '#chart',
diff -r c59cc42b279385f0d351d31f919d29bb7c012877 -r 0470feeb593f9797fa9fd19dfdac4751a7ca788b templates/webapps/galaxy/base_panels.mako
--- a/templates/webapps/galaxy/base_panels.mako
+++ b/templates/webapps/galaxy/base_panels.mako
@@ -16,7 +16,7 @@
"""Bootstrapping user API JSON"""
#TODO: move into common location (poss. BaseController)
if trans.user:
- user_dict = trans.user.dictify( view='element', value_mapper={ 'id': trans.security.encode_id,
+ user_dict = trans.user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id,
'total_disk_usage': float } )
user_dict['quota_percent'] = trans.app.quota_agent.get_percent( trans=trans )
else:
diff -r c59cc42b279385f0d351d31f919d29bb7c012877 -r 0470feeb593f9797fa9fd19dfdac4751a7ca788b templates/webapps/galaxy/dataset/display.mako
--- a/templates/webapps/galaxy/dataset/display.mako
+++ b/templates/webapps/galaxy/dataset/display.mako
@@ -20,7 +20,7 @@
require(['mvc/data'], function(data) {
data.createTabularDatasetChunkedView(
// Dataset config. TODO: encode id.
- _.extend( ${h.to_json_string( item.dictify() )},
+ _.extend( ${h.to_json_string( item.to_dict() )},
{
chunk_url: "${h.url_for( controller='/dataset', action='display',
dataset_id=trans.security.encode_id( item.id ))}",
diff -r c59cc42b279385f0d351d31f919d29bb7c012877 -r 0470feeb593f9797fa9fd19dfdac4751a7ca788b templates/webapps/galaxy/dataset/tabular_chunked.mako
--- a/templates/webapps/galaxy/dataset/tabular_chunked.mako
+++ b/templates/webapps/galaxy/dataset/tabular_chunked.mako
@@ -19,7 +19,7 @@
require(['mvc/data'], function(data) {
data.createTabularDatasetChunkedView(
- _.extend( ${h.to_json_string( trans.security.encode_dict_ids( dataset.dictify() ) )},
+ _.extend( ${h.to_json_string( trans.security.encode_dict_ids( dataset.to_dict() ) )},
{
url_viz: "${h.url_for( controller='/visualization')}",
chunk_url: "${h.url_for( controller='/dataset', action='display',
diff -r c59cc42b279385f0d351d31f919d29bb7c012877 -r 0470feeb593f9797fa9fd19dfdac4751a7ca788b templates/webapps/galaxy/galaxy.masthead.mako
--- a/templates/webapps/galaxy/galaxy.masthead.mako
+++ b/templates/webapps/galaxy/galaxy.masthead.mako
@@ -4,7 +4,7 @@
"""Bootstrapping user API JSON"""
#TODO: move into common location (poss. BaseController)
if trans.user:
- user_dict = trans.user.dictify( view='element', value_mapper={ 'id': trans.security.encode_id,
+ user_dict = trans.user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id,
'total_disk_usage': float } )
user_dict['quota_percent'] = trans.app.quota_agent.get_percent( trans=trans )
else:
diff -r c59cc42b279385f0d351d31f919d29bb7c012877 -r 0470feeb593f9797fa9fd19dfdac4751a7ca788b templates/webapps/galaxy/root/tool_menu.mako
--- a/templates/webapps/galaxy/root/tool_menu.mako
+++ b/templates/webapps/galaxy/root/tool_menu.mako
@@ -25,12 +25,12 @@
hidden: false
}),
tools = new tools_mod.ToolCollection(
- ${ h.to_json_string( trans.app.toolbox.dictify( trans, in_panel=False ) ) }
+ ${ h.to_json_string( trans.app.toolbox.to_dict( trans, in_panel=False ) ) }
),
tool_panel = new tools_mod.ToolPanel({
tool_search: tool_search,
tools: tools,
- layout: ${h.to_json_string( trans.app.toolbox.dictify( trans ) )}
+ layout: ${h.to_json_string( trans.app.toolbox.to_dict( trans ) )}
}),
tool_panel_view = new tools_mod.ToolPanelView({ model: tool_panel });
diff -r c59cc42b279385f0d351d31f919d29bb7c012877 -r 0470feeb593f9797fa9fd19dfdac4751a7ca788b templates/webapps/galaxy/visualization/scatterplot.mako
--- a/templates/webapps/galaxy/visualization/scatterplot.mako
+++ b/templates/webapps/galaxy/visualization/scatterplot.mako
@@ -222,7 +222,7 @@
<script type="text/javascript">
$(function(){
- var hda = ${h.to_json_string( trans.security.encode_dict_ids( hda.dictify() ) )},
+ var hda = ${h.to_json_string( trans.security.encode_dict_ids( hda.to_dict() ) )},
querySettings = ${h.to_json_string( query_args )},
chartConfig = _.extend( querySettings, {
containerSelector : '#chart',
diff -r c59cc42b279385f0d351d31f919d29bb7c012877 -r 0470feeb593f9797fa9fd19dfdac4751a7ca788b templates/webapps/galaxy/visualization/v_fwork_test.mako
--- a/templates/webapps/galaxy/visualization/v_fwork_test.mako
+++ b/templates/webapps/galaxy/visualization/v_fwork_test.mako
@@ -19,7 +19,7 @@
<%def name="process_hda( hda )"><%
- hda_dict = hda.dictify()
+ hda_dict = hda.to_dict()
hda_dict[ 'id' ] = trans.security.encode_id( hda_dict[ 'id' ] )
hda_dict[ 'history_id' ] = trans.security.encode_id( hda_dict[ 'history_id' ] )
del hda_dict[ 'peek' ]
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
3 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/ce4c7a3fb0d8/
Changeset: ce4c7a3fb0d8
User: jgoecks
Date: 2013-08-30 20:09:18
Summary: Use super() to call dictify() rather than calling directly.
Affected #: 1 file
diff -r 9345e15ec4b7f81271a071f0703e89fff0c5b3ac -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -2342,7 +2342,7 @@
self.tags.append(new_swta)
def dictify( self, view='collection', value_mapper = None ):
- rval = DictifiableMixin.dictify(self, view=view, value_mapper = value_mapper)
+ rval = super( StoredWorkflow, self ).dictify(self, view=view, value_mapper = value_mapper)
tags_str_list = []
for tag in self.tags:
tag_str = tag.user_tname
@@ -3831,7 +3831,7 @@
return [ tool_version.tool_id for tool_version in self.get_versions( app ) ]
def dictify( self, view='element' ):
- rval = DictifiableMixin.dictify(self, view)
+ rval = super( ToolVersion, self ).dictify( self, view )
rval['tool_name'] = self.tool_id
for a in self.parent_tool_association:
rval['parent_tool_id'] = a.parent_id
https://bitbucket.org/galaxy/galaxy-central/commits/a4259a97c287/
Changeset: a4259a97c287
User: jgoecks
Date: 2013-08-30 20:17:23
Summary: DictifiableMixin cleanup: (a) remove 'Mixin' from name and (b) rename dictify to to_dict.
Affected #: 32 files
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -23,7 +23,7 @@
import galaxy.datatypes.registry
import galaxy.security.passwords
from galaxy.datatypes.metadata import MetadataCollection
-from galaxy.model.item_attrs import DictifiableMixin, UsesAnnotations
+from galaxy.model.item_attrs import Dictifiable, UsesAnnotations
from galaxy.security import get_permitted_actions
from galaxy.util import is_multi_byte, nice_size, Params, restore_text, send_mail
from galaxy.util.bunch import Bunch
@@ -61,15 +61,15 @@
datatypes_registry = d_registry
-class User( object, DictifiableMixin ):
+class User( object, Dictifiable ):
use_pbkdf2 = True
"""
Data for a Galaxy user or admin and relations to their
histories, credentials, and roles.
"""
- # attributes that will be accessed and returned when calling dictify( view='collection' )
+ # attributes that will be accessed and returned when calling to_dict( view='collection' )
dict_collection_visible_keys = ( 'id', 'email' )
- # attributes that will be accessed and returned when calling dictify( view='element' )
+ # attributes that will be accessed and returned when calling to_dict( view='element' )
dict_element_visible_keys = ( 'id', 'email', 'username', 'total_disk_usage', 'nice_total_disk_usage' )
def __init__( self, email=None, password=None ):
@@ -157,7 +157,7 @@
return total
-class Job( object, DictifiableMixin ):
+class Job( object, Dictifiable ):
dict_collection_visible_keys = [ 'id' ]
dict_element_visible_keys = [ 'id' ]
@@ -363,8 +363,8 @@
dataset.blurb = 'deleted'
dataset.peek = 'Job deleted'
dataset.info = 'Job output deleted by user before job completed'
- def dictify( self, view='collection' ):
- rval = super( Job, self ).dictify( view=view )
+ def to_dict( self, view='collection' ):
+ rval = super( Job, self ).to_dict( view=view )
rval['tool_name'] = self.tool_id
param_dict = dict( [ ( p.name, p.value ) for p in self.parameters ] )
rval['params'] = param_dict
@@ -649,7 +649,7 @@
else:
return False
-class Group( object, DictifiableMixin ):
+class Group( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'name' )
@@ -662,7 +662,7 @@
self.user = user
self.group = group
-class History( object, DictifiableMixin, UsesAnnotations ):
+class History( object, Dictifiable, UsesAnnotations ):
dict_collection_visible_keys = ( 'id', 'name', 'published', 'deleted' )
dict_element_visible_keys = ( 'id', 'name', 'published', 'deleted', 'genome_build', 'purged' )
@@ -780,10 +780,10 @@
history_name = unicode(history_name, 'utf-8')
return history_name
- def dictify( self, view='collection', value_mapper = None ):
+ def to_dict( self, view='collection', value_mapper = None ):
# Get basic value.
- rval = super( History, self ).dictify( view=view, value_mapper=value_mapper )
+ rval = super( History, self ).to_dict( view=view, value_mapper=value_mapper )
# Add tags.
tags_str_list = []
@@ -869,7 +869,7 @@
self.group = group
self.role = role
-class Role( object, DictifiableMixin ):
+class Role( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'name', 'description', 'type' )
private_id = None
@@ -886,19 +886,19 @@
self.type = type
self.deleted = deleted
-class UserQuotaAssociation( object, DictifiableMixin ):
+class UserQuotaAssociation( object, Dictifiable ):
dict_element_visible_keys = ( 'user', )
def __init__( self, user, quota ):
self.user = user
self.quota = quota
-class GroupQuotaAssociation( object, DictifiableMixin ):
+class GroupQuotaAssociation( object, Dictifiable ):
dict_element_visible_keys = ( 'group', )
def __init__( self, group, quota ):
self.group = group
self.quota = quota
-class Quota( object, DictifiableMixin ):
+class Quota( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'name', 'description', 'bytes', 'operation', 'display_amount', 'default', 'users', 'groups' )
valid_operations = ( '+', '-', '=' )
@@ -927,7 +927,7 @@
else:
return nice_size( self.bytes )
-class DefaultQuotaAssociation( Quota, DictifiableMixin ):
+class DefaultQuotaAssociation( Quota, Dictifiable ):
dict_element_visible_keys = ( 'type', )
types = Bunch(
UNREGISTERED = 'unregistered',
@@ -1508,7 +1508,7 @@
return msg
-class HistoryDatasetAssociation( DatasetInstance, DictifiableMixin, UsesAnnotations ):
+class HistoryDatasetAssociation( DatasetInstance, Dictifiable, UsesAnnotations ):
"""
Resource class that creates a relation between a dataset and a user history.
"""
@@ -1680,7 +1680,7 @@
rval += child.get_disk_usage( user )
return rval
- def dictify( self, view='collection' ):
+ def to_dict( self, view='collection' ):
"""
Return attributes of this HDA that are exposed using the API.
"""
@@ -1759,7 +1759,7 @@
self.subset = subset
self.location = location
-class Library( object, DictifiableMixin ):
+class Library( object, Dictifiable ):
permitted_actions = get_permitted_actions( filter='LIBRARY' )
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'deleted', 'name', 'description', 'synopsis' )
@@ -1828,7 +1828,7 @@
name = unicode( name, 'utf-8' )
return name
-class LibraryFolder( object, DictifiableMixin ):
+class LibraryFolder( object, Dictifiable ):
dict_element_visible_keys = ( 'id', 'parent_id', 'name', 'description', 'item_count', 'genome_build' )
def __init__( self, name=None, description=None, item_count=0, order_id=None ):
self.name = name or "Unnamed folder"
@@ -1900,8 +1900,8 @@
if isinstance( name, str ):
name = unicode( name, 'utf-8' )
return name
- def dictify( self, view='collection' ):
- rval = super( LibraryFolder, self ).dictify( view=view )
+ def to_dict( self, view='collection' ):
+ rval = super( LibraryFolder, self ).to_dict( view=view )
info_association, inherited = self.get_info_association()
if info_association:
if inherited:
@@ -1966,7 +1966,7 @@
name = property( get_name, set_name )
def display_name( self ):
self.library_dataset_dataset_association.display_name()
- def dictify( self, view='collection' ):
+ def to_dict( self, view='collection' ):
# Since this class is a proxy to rather complex attributes we want to
# display in other objects, we can't use the simpler method used by
# other model classes.
@@ -2096,7 +2096,7 @@
if restrict:
return None, inherited
return self.library_dataset.folder.get_info_association( inherited=True )
- def dictify( self, view='collection' ):
+ def to_dict( self, view='collection' ):
# Since this class is a proxy to rather complex attributes we want to
# display in other objects, we can't use the simpler method used by
# other model classes.
@@ -2323,7 +2323,7 @@
self.id = None
self.user = None
-class StoredWorkflow( object, DictifiableMixin):
+class StoredWorkflow( object, Dictifiable):
dict_collection_visible_keys = ( 'id', 'name', 'published' )
dict_element_visible_keys = ( 'id', 'name', 'published' )
def __init__( self ):
@@ -2341,8 +2341,8 @@
new_swta.user = target_user
self.tags.append(new_swta)
- def dictify( self, view='collection', value_mapper = None ):
- rval = super( StoredWorkflow, self ).dictify(self, view=view, value_mapper = value_mapper)
+ def to_dict( self, view='collection', value_mapper = None ):
+ rval = super( StoredWorkflow, self ).to_dict(self, view=view, value_mapper = value_mapper)
tags_str_list = []
for tag in self.tags:
tag_str = tag.user_tname
@@ -2434,7 +2434,7 @@
return os.path.abspath( os.path.join( path, "metadata_%d.dat" % self.id ) )
-class FormDefinition( object, DictifiableMixin ):
+class FormDefinition( object, Dictifiable ):
# The following form_builder classes are supported by the FormDefinition class.
supported_field_types = [ AddressField, CheckboxField, PasswordField, SelectField, TextArea, TextField, WorkflowField, WorkflowMappingField, HistoryField ]
types = Bunch( REQUEST = 'Sequencing Request Form',
@@ -2562,7 +2562,7 @@
self.form_definition = form_def
self.content = content
-class Request( object, DictifiableMixin ):
+class Request( object, Dictifiable ):
states = Bunch( NEW = 'New',
SUBMITTED = 'In Progress',
REJECTED = 'Rejected',
@@ -2753,7 +2753,7 @@
def populate_actions( self, trans, item, param_dict=None ):
return self.get_external_service_type( trans ).actions.populate( self, item, param_dict=param_dict )
-class RequestType( object, DictifiableMixin ):
+class RequestType( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name', 'desc' )
dict_element_visible_keys = ( 'id', 'name', 'desc', 'request_form_id', 'sample_form_id' )
rename_dataset_options = Bunch( NO = 'Do not rename',
@@ -2839,7 +2839,7 @@
self.request_type = request_type
self.role = role
-class Sample( object, DictifiableMixin ):
+class Sample( object, Dictifiable ):
# The following form_builder classes are supported by the Sample class.
supported_field_types = [ CheckboxField, SelectField, TextField, WorkflowField, WorkflowMappingField, HistoryField ]
bulk_operations = Bunch( CHANGE_STATE = 'Change state',
@@ -3169,7 +3169,7 @@
def __str__ ( self ):
return "Tag(id=%s, type=%i, parent_id=%s, name=%s)" % ( self.id, self.type, self.parent_id, self.name )
-class ItemTagAssociation ( object, DictifiableMixin ):
+class ItemTagAssociation ( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'user_tname', 'user_value' )
dict_element_visible_keys = dict_collection_visible_keys
@@ -3350,7 +3350,7 @@
self.error_message = error_message
def as_dict( self, value_mapper=None ):
- return self.dictify( view='element', value_mapper=value_mapper )
+ return self.to_dict( view='element', value_mapper=value_mapper )
@property
def can_install( self ):
@@ -3372,7 +3372,7 @@
def can_reinstall_or_activate( self ):
return self.deleted
- def dictify( self, view='collection', value_mapper=None ):
+ def to_dict( self, view='collection', value_mapper=None ):
if value_mapper is None:
value_mapper = {}
rval = {}
@@ -3772,7 +3772,7 @@
self.tool_shed_repository.name,
self.tool_shed_repository.installed_changeset_revision )
-class ToolVersion( object, DictifiableMixin ):
+class ToolVersion( object, Dictifiable ):
dict_element_visible_keys = ( 'id', 'tool_shed_repository' )
def __init__( self, id=None, create_time=None, tool_id=None, tool_shed_repository=None ):
self.id = id
@@ -3830,8 +3830,8 @@
return version_ids
return [ tool_version.tool_id for tool_version in self.get_versions( app ) ]
- def dictify( self, view='element' ):
- rval = super( ToolVersion, self ).dictify( self, view )
+ def to_dict( self, view='element' ):
+ rval = super( ToolVersion, self ).to_dict( self, view )
rval['tool_name'] = self.tool_id
for a in self.parent_tool_association:
rval['parent_tool_id'] = a.parent_id
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/model/item_attrs.py
--- a/lib/galaxy/model/item_attrs.py
+++ b/lib/galaxy/model/item_attrs.py
@@ -158,12 +158,12 @@
class_name = '%sAnnotationAssociation' % item.__class__.__name__
return getattr( galaxy.model, class_name, None )
-class DictifiableMixin:
+class Dictifiable:
""" Mixin that enables objects to be converted to dictionaries. This is useful
when for sharing objects across boundaries, such as the API, tool scripts,
and JavaScript code. """
- def dictify( self, view='collection', value_mapper=None ):
+ def to_dict( self, view='collection', value_mapper=None ):
"""
Return item dictionary.
"""
@@ -176,9 +176,9 @@
Recursive helper function to get item values.
"""
# FIXME: why use exception here? Why not look for key in value_mapper
- # first and then default to dictify?
+ # first and then default to to_dict?
try:
- return item.dictify( view=view, value_mapper=value_mapper )
+ return item.to_dict( view=view, value_mapper=value_mapper )
except:
if key in value_mapper:
return value_mapper.get( key )( item )
@@ -193,7 +193,7 @@
try:
visible_keys = self.__getattribute__( 'dict_' + view + '_visible_keys' )
except AttributeError:
- raise Exception( 'Unknown DictifiableMixin view: %s' % view )
+ raise Exception( 'Unknown Dictifiable view: %s' % view )
for key in visible_keys:
try:
item = self.__getattribute__( key )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/model/search.py
--- a/lib/galaxy/model/search.py
+++ b/lib/galaxy/model/search.py
@@ -560,7 +560,7 @@
return self.view.get_results(True)
def item_to_api_value(self, item):
- r = item.dictify( view='element' )
+ r = item.to_dict( view='element' )
if self.query.field_list.count("*"):
return r
o = {}
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/tools/__init__.py
--- a/lib/galaxy/tools/__init__.py
+++ b/lib/galaxy/tools/__init__.py
@@ -59,7 +59,7 @@
from galaxy.util.template import fill_template
from galaxy.web import url_for
from galaxy.web.form_builder import SelectField
-from galaxy.model.item_attrs import DictifiableMixin
+from galaxy.model.item_attrs import Dictifiable
from tool_shed.util import shed_util_common
from .loader import load_tool, template_macro_params
@@ -93,16 +93,16 @@
class ToolNotFoundException( Exception ):
pass
-def dictify_helper( obj, kwargs ):
- """ Helper function that provides the appropriate kwargs to dictify an object. """
+def to_dict_helper( obj, kwargs ):
+ """ Helper function that provides the appropriate kwargs to to_dict an object. """
- # Label.dictify cannot have kwargs.
+ # Label.to_dict cannot have kwargs.
if isinstance( obj, ToolSectionLabel ):
kwargs = {}
- return obj.dictify( **kwargs )
+ return obj.to_dict( **kwargs )
-class ToolBox( object, DictifiableMixin ):
+class ToolBox( object, Dictifiable ):
"""Container for a collection of tools"""
def __init__( self, config_filenames, tool_root_dir, app ):
@@ -711,9 +711,9 @@
"""
return self.app.model.context
- def dictify( self, trans, in_panel=True, **kwds ):
+ def to_dict( self, trans, in_panel=True, **kwds ):
"""
- Dictify toolbox.
+ to_dict toolbox.
"""
context = Bunch( toolbox=self, trans=trans, **kwds )
@@ -736,11 +736,11 @@
link_details = True
)
for elt in panel_elts:
- rval.append( dictify_helper( elt, kwargs ) )
+ rval.append( to_dict_helper( elt, kwargs ) )
else:
tools = []
for id, tool in self.tools_by_id.items():
- tools.append( tool.dictify( trans, link_details=True ) )
+ tools.append( tool.to_dict( trans, link_details=True ) )
rval = tools
return rval
@@ -801,7 +801,7 @@
-class ToolSection( object, DictifiableMixin ):
+class ToolSection( object, Dictifiable ):
"""
A group of tools with similar type/purpose that will be displayed as a
group in the user interface.
@@ -824,22 +824,22 @@
copy.elems = self.elems.copy()
return copy
- def dictify( self, trans, link_details=False ):
+ def to_dict( self, trans, link_details=False ):
""" Return a dict that includes section's attributes. """
- section_dict = super( ToolSection, self ).dictify()
+ section_dict = super( ToolSection, self ).to_dict()
section_elts = []
kwargs = dict(
trans = trans,
link_details = link_details
)
for elt in self.elems.values():
- section_elts.append( dictify_helper( elt, kwargs ) )
+ section_elts.append( to_dict_helper( elt, kwargs ) )
section_dict[ 'elems' ] = section_elts
return section_dict
-class ToolSectionLabel( object, DictifiableMixin ):
+class ToolSectionLabel( object, Dictifiable ):
"""
A label for a set of tools that can be displayed above groups of tools
and sections in the user interface
@@ -898,7 +898,7 @@
self.rerun_remap_job_id = None
self.inputs = params_from_strings( tool.inputs, values, app, ignore_errors=True )
-class ToolOutput( object, DictifiableMixin ):
+class ToolOutput( object, Dictifiable ):
"""
Represents an output datasets produced by a tool. For backward
compatibility this behaves as if it were the tuple::
@@ -948,7 +948,7 @@
self.type = type
self.version = version
-class Tool( object, DictifiableMixin ):
+class Tool( object, Dictifiable ):
"""
Represents a computational tool that can be executed through Galaxy.
"""
@@ -2962,11 +2962,11 @@
self.sa_session.flush()
return primary_datasets
- def dictify( self, trans, link_details=False, io_details=False ):
+ def to_dict( self, trans, link_details=False, io_details=False ):
""" Returns dict of tool. """
# Basic information
- tool_dict = super( Tool, self ).dictify()
+ tool_dict = super( Tool, self ).to_dict()
# Add link details.
if link_details:
@@ -2983,8 +2983,8 @@
# Add input and output details.
if io_details:
- tool_dict[ 'inputs' ] = [ input.dictify( trans ) for input in self.inputs.values() ]
- tool_dict[ 'outputs' ] = [ output.dictify() for output in self.outputs.values() ]
+ tool_dict[ 'inputs' ] = [ input.to_dict( trans ) for input in self.inputs.values() ]
+ tool_dict[ 'outputs' ] = [ output.to_dict() for output in self.outputs.values() ]
return tool_dict
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/tools/parameters/basic.py
--- a/lib/galaxy/tools/parameters/basic.py
+++ b/lib/galaxy/tools/parameters/basic.py
@@ -12,12 +12,12 @@
import validation, dynamic_options
# For BaseURLToolParameter
from galaxy.web import url_for
-from galaxy.model.item_attrs import DictifiableMixin
+from galaxy.model.item_attrs import Dictifiable
import galaxy.model
log = logging.getLogger(__name__)
-class ToolParameter( object, DictifiableMixin ):
+class ToolParameter( object, Dictifiable ):
"""
Describes a parameter accepted by a tool. This is just a simple stub at the
moment but in the future should encapsulate more complex parameters (lists
@@ -170,10 +170,10 @@
for validator in self.validators:
validator.validate( value, history )
- def dictify( self, trans, view='collection', value_mapper=None ):
- """ Dictify tool parameter. This can be overridden by subclasses. """
+ def to_dict( self, trans, view='collection', value_mapper=None ):
+ """ to_dict tool parameter. This can be overridden by subclasses. """
- tool_dict = super( ToolParameter, self ).dictify()
+ tool_dict = super( ToolParameter, self ).to_dict()
tool_dict[ 'html' ] = urllib.quote( self.get_html( trans ) )
if hasattr( self, 'value' ):
tool_dict[ 'value' ] = self.value
@@ -872,8 +872,8 @@
else:
return []
- def dictify( self, trans, view='collection', value_mapper=None ):
- d = super( SelectToolParameter, self ).dictify( trans )
+ def to_dict( self, trans, view='collection', value_mapper=None ):
+ d = super( SelectToolParameter, self ).to_dict( trans )
# Get options, value.
options = self.get_options( trans, [] )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/tools/parameters/grouping.py
--- a/lib/galaxy/tools/parameters/grouping.py
+++ b/lib/galaxy/tools/parameters/grouping.py
@@ -15,9 +15,9 @@
from galaxy.util import sanitize_for_filename
from galaxy.util.bunch import Bunch
from galaxy.util.expressions import ExpressionContext
-from galaxy.model.item_attrs import DictifiableMixin
+from galaxy.model.item_attrs import Dictifiable
-class Group( object, DictifiableMixin ):
+class Group( object, Dictifiable ):
dict_collection_visible_keys = ( 'name', 'type' )
@@ -48,9 +48,9 @@
"""
raise TypeError( "Not implemented" )
- def dictify( self, trans, view='collection', value_mapper=None ):
- # TODO: need to dictify conditions.
- group_dict = super( Group, self ).dictify( view=view, value_mapper=value_mapper )
+ def to_dict( self, trans, view='collection', value_mapper=None ):
+ # TODO: need to to_dict conditions.
+ group_dict = super( Group, self ).to_dict( view=view, value_mapper=value_mapper )
return group_dict
class Repeat( Group ):
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/visualization/genomes.py
--- a/lib/galaxy/visualization/genomes.py
+++ b/lib/galaxy/visualization/genomes.py
@@ -73,7 +73,7 @@
self.len_file = len_file
self.twobit_file = twobit_file
- def dictify( self, num=None, chrom=None, low=None ):
+ def to_dict( self, num=None, chrom=None, low=None ):
"""
Returns representation of self as a dictionary.
"""
@@ -289,7 +289,7 @@
# Set up return value or log exception if genome not found for key.
rval = None
if genome:
- rval = genome.dictify( num=num, chrom=chrom, low=low )
+ rval = genome.to_dict( num=num, chrom=chrom, low=low )
else:
log.exception( 'genome not found for key %s' % dbkey )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/web/base/controller.py
--- a/lib/galaxy/web/base/controller.py
+++ b/lib/galaxy/web/base/controller.py
@@ -32,7 +32,7 @@
from galaxy.model.orm import eagerload, eagerload_all
from galaxy.security.validate_user_input import validate_publicname
from galaxy.util.sanitize_html import sanitize_html
-from galaxy.model.item_attrs import DictifiableMixin
+from galaxy.model.item_attrs import Dictifiable
from galaxy.datatypes.interval import ChromatinInteractions
from galaxy.datatypes.data import Text
@@ -422,7 +422,7 @@
def get_history_dict( self, trans, history, hda_dictionaries=None ):
"""Returns history data in the form of a dictionary.
"""
- history_dict = history.dictify( view='element', value_mapper={ 'id':trans.security.encode_id })
+ history_dict = history.to_dict( view='element', value_mapper={ 'id':trans.security.encode_id })
history_dict[ 'nice_size' ] = history.get_disk_size( nice_size=True )
history_dict[ 'annotation' ] = history.get_item_annotation_str( trans.sa_session, trans.user, history )
@@ -583,7 +583,7 @@
"""
#precondition: the user's access to this hda has already been checked
#TODO:?? postcondition: all ids are encoded (is this really what we want at this level?)
- hda_dict = hda.dictify( view='element' )
+ hda_dict = hda.to_dict( view='element' )
hda_dict[ 'api_type' ] = "file"
# Add additional attributes that depend on trans can hence must be added here rather than at the model level.
@@ -594,7 +594,7 @@
# ---- return here if deleted AND purged OR can't access
purged = ( hda.purged or hda.dataset.purged )
if ( hda.deleted and purged ):
- #TODO: dictify should really go AFTER this - only summary data
+ #TODO: to_dict should really go AFTER this - only summary data
return trans.security.encode_dict_ids( hda_dict )
if trans.user_is_admin() or trans.app.config.expose_dataset_path:
@@ -920,7 +920,7 @@
return query
return query.all()
- #TODO: move into model (dictify)
+ #TODO: move into model (to_dict)
def get_visualization_summary_dict( self, visualization ):
"""
Return a set of summary attributes for a visualization in dictionary form.
@@ -949,7 +949,7 @@
'user_id' : visualization.user.id,
'dbkey' : visualization.dbkey,
'slug' : visualization.slug,
- # dictify only the latest revision (allow older to be fetched elsewhere)
+ # to_dict only the latest revision (allow older to be fetched elsewhere)
'latest_revision' : self.get_visualization_revision_dict( visualization.latest_revision ),
'revisions' : [ r.id for r in visualization.revisions ],
}
@@ -1129,7 +1129,7 @@
return None
# Get tool definition and add input values from job.
- tool_dict = tool.dictify( trans, io_details=True )
+ tool_dict = tool.to_dict( trans, io_details=True )
inputs_dict = tool_dict[ 'inputs' ]
tool_param_values = dict( [ ( p.name, p.value ) for p in job.parameters ] )
tool_param_values = tool.params_from_strings( tool_param_values, trans.app, ignore_errors=True )
@@ -1139,8 +1139,8 @@
name = t_input[ 'name' ]
if name in tool_param_values:
value = tool_param_values[ name ]
- if isinstance( value, DictifiableMixin ):
- value = value.dictify()
+ if isinstance( value, Dictifiable ):
+ value = value.to_dict()
t_input[ 'value' ] = value
return tool_dict
@@ -1172,7 +1172,7 @@
source='data' )
return {
"track_type": dataset.datatype.track_type,
- "dataset": trans.security.encode_dict_ids( dataset.dictify() ),
+ "dataset": trans.security.encode_dict_ids( dataset.to_dict() ),
"name": track_dict['name'],
"prefs": prefs,
"mode": track_dict.get( 'mode', 'Auto' ),
@@ -1252,7 +1252,7 @@
return {
"track_type": dataset.datatype.track_type,
"name": dataset.name,
- "dataset": trans.security.encode_dict_ids( dataset.dictify() ),
+ "dataset": trans.security.encode_dict_ids( dataset.to_dict() ),
"prefs": {},
"filters": { 'filters' : track_data_provider.get_filters() },
"tool": self.get_tool_def( trans, dataset ),
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/datasets.py
--- a/lib/galaxy/webapps/galaxy/api/datasets.py
+++ b/lib/galaxy/webapps/galaxy/api/datasets.py
@@ -60,7 +60,7 @@
rval[ 'display_types' ] = self.get_old_display_applications( trans, dataset )
rval[ 'display_apps' ] = self.get_display_apps( trans, dataset )
else:
- rval = dataset.dictify()
+ rval = dataset.to_dict()
except Exception, e:
rval = "Error in dataset API at listing contents: " + str( e )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/folders.py
--- a/lib/galaxy/webapps/galaxy/api/folders.py
+++ b/lib/galaxy/webapps/galaxy/api/folders.py
@@ -35,7 +35,7 @@
# check_ownership=false since we are only displaying it.
content = self.get_library_folder( trans, id, check_ownership=False,
check_accessible=True )
- return self.encode_all_ids( trans, content.dictify( view='element' ) )
+ return self.encode_all_ids( trans, content.to_dict( view='element' ) )
@web.expose_api
def create( self, trans, payload, **kwd ):
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/forms.py
--- a/lib/galaxy/webapps/galaxy/api/forms.py
+++ b/lib/galaxy/webapps/galaxy/api/forms.py
@@ -23,7 +23,7 @@
query = trans.sa_session.query( trans.app.model.FormDefinition )#.filter( trans.app.model.FormDefinition.table.c.deleted == False )
rval = []
for form_definition in query:
- item = form_definition.dictify( value_mapper={ 'id': trans.security.encode_id, 'form_definition_current_id': trans.security.encode_id } )
+ item = form_definition.to_dict( value_mapper={ 'id': trans.security.encode_id, 'form_definition_current_id': trans.security.encode_id } )
item['url'] = url_for( 'form', id=trans.security.encode_id( form_definition.id ) )
rval.append( item )
return rval
@@ -47,7 +47,7 @@
if not form_definition or not trans.user_is_admin():
trans.response.status = 400
return "Invalid form definition id ( %s ) specified." % str( form_definition_id )
- item = form_definition.dictify( view='element', value_mapper={ 'id': trans.security.encode_id, 'form_definition_current_id': trans.security.encode_id } )
+ item = form_definition.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'form_definition_current_id': trans.security.encode_id } )
item['url'] = url_for( 'form', id=form_definition_id )
return item
@@ -69,6 +69,6 @@
trans.sa_session.add( form_definition )
trans.sa_session.flush()
encoded_id = trans.security.encode_id( form_definition.id )
- item = form_definition.dictify( view='element', value_mapper={ 'id': trans.security.encode_id, 'form_definition_current_id': trans.security.encode_id } )
+ item = form_definition.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'form_definition_current_id': trans.security.encode_id } )
item['url'] = url_for( 'form', id=encoded_id )
return [ item ]
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/groups.py
--- a/lib/galaxy/webapps/galaxy/api/groups.py
+++ b/lib/galaxy/webapps/galaxy/api/groups.py
@@ -21,7 +21,7 @@
rval = []
for group in trans.sa_session.query( trans.app.model.Group ).filter( trans.app.model.Group.table.c.deleted == False ):
if trans.user_is_admin():
- item = group.dictify( value_mapper={ 'id': trans.security.encode_id } )
+ item = group.to_dict( value_mapper={ 'id': trans.security.encode_id } )
encoded_id = trans.security.encode_id( group.id )
item['url'] = url_for( 'group', id=encoded_id )
rval.append( item )
@@ -65,7 +65,7 @@
"""
trans.sa_session.flush()
encoded_id = trans.security.encode_id( group.id )
- item = group.dictify( view='element', value_mapper={ 'id': trans.security.encode_id } )
+ item = group.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( 'group', id=encoded_id )
return [ item ]
@@ -89,7 +89,7 @@
if not group:
trans.response.status = 400
return "Invalid group id ( %s ) specified." % str( group_id )
- item = group.dictify( view='element', value_mapper={ 'id': trans.security.encode_id } )
+ item = group.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( 'group', id=group_id )
item['users_url'] = url_for( 'group_users', group_id=group_id )
item['roles_url'] = url_for( 'group_roles', group_id=group_id )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/histories.py
--- a/lib/galaxy/webapps/galaxy/api/histories.py
+++ b/lib/galaxy/webapps/galaxy/api/histories.py
@@ -46,14 +46,14 @@
.order_by( desc( trans.app.model.History.table.c.update_time ) )
.all() )
for history in query:
- item = history.dictify(value_mapper={'id':trans.security.encode_id})
+ item = history.to_dict(value_mapper={'id':trans.security.encode_id})
item['url'] = url_for( 'history', id=trans.security.encode_id( history.id ) )
rval.append( item )
elif trans.galaxy_session.current_history:
#No user, this must be session authentication with an anonymous user.
history = trans.galaxy_session.current_history
- item = history.dictify(value_mapper={'id':trans.security.encode_id})
+ item = history.to_dict(value_mapper={'id':trans.security.encode_id})
item['url'] = url_for( 'history', id=trans.security.encode_id( history.id ) )
rval.append(item)
@@ -139,7 +139,7 @@
trans.sa_session.add( new_history )
trans.sa_session.flush()
- item = new_history.dictify(view='element', value_mapper={'id':trans.security.encode_id})
+ item = new_history.to_dict(view='element', value_mapper={'id':trans.security.encode_id})
item['url'] = url_for( 'history', id=item['id'] )
#TODO: copy own history
@@ -254,7 +254,7 @@
:param id: the encoded id of the history to undelete
:type payload: dict
:param payload: a dictionary containing any or all the
- fields in :func:`galaxy.model.History.dictify` and/or the following:
+ fields in :func:`galaxy.model.History.to_dict` and/or the following:
* annotation: an annotation for the history
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/history_contents.py
--- a/lib/galaxy/webapps/galaxy/api/history_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/history_contents.py
@@ -190,7 +190,7 @@
hda = ld.library_dataset_dataset_association.to_history_dataset_association( history, add_to_history=True )
trans.sa_session.flush()
- return hda.dictify()
+ return hda.to_dict()
else:
# TODO: implement other "upload" methods here.
@@ -210,7 +210,7 @@
:param id: the encoded id of the history to undelete
:type payload: dict
:param payload: a dictionary containing any or all the
- fields in :func:`galaxy.model.HistoryDatasetAssociation.dictify`
+ fields in :func:`galaxy.model.HistoryDatasetAssociation.to_dict`
and/or the following:
* annotation: an annotation for the HDA
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/item_tags.py
--- a/lib/galaxy/webapps/galaxy/api/item_tags.py
+++ b/lib/galaxy/webapps/galaxy/api/item_tags.py
@@ -49,7 +49,7 @@
return 'OK'
def _api_value( self, tag, trans, view='element' ):
- return tag.dictify( view=view, value_mapper={ 'id': trans.security.encode_id } )
+ return tag.to_dict( view=view, value_mapper={ 'id': trans.security.encode_id } )
class HistoryContentTagsController( BaseItemTagsController ):
controller_name = "history_content_tags"
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/libraries.py
--- a/lib/galaxy/webapps/galaxy/api/libraries.py
+++ b/lib/galaxy/webapps/galaxy/api/libraries.py
@@ -49,7 +49,7 @@
trans.model.Library.table.c.id.in_( accessible_restricted_library_ids ) ) )
rval = []
for library in query:
- item = library.dictify()
+ item = library.to_dict()
item['url'] = url_for( route, id=trans.security.encode_id( library.id ) )
item['id'] = trans.security.encode_id( item['id'] )
rval.append( item )
@@ -87,7 +87,7 @@
library = None
if not library or not ( trans.user_is_admin() or trans.app.security_agent.can_access_library( trans.get_current_user_roles(), library ) ):
raise HTTPBadRequest( detail='Invalid library id ( %s ) specified.' % id )
- item = library.dictify( view='element' )
+ item = library.to_dict( view='element' )
#item['contents_url'] = url_for( 'contents', library_id=library_id )
item['contents_url'] = url_for( 'library_contents', library_id=library_id )
return item
@@ -162,4 +162,4 @@
library.deleted = True
trans.sa_session.add( library )
trans.sa_session.flush()
- return library.dictify( view='element', value_mapper={ 'id' : trans.security.encode_id } )
+ return library.to_dict( view='element', value_mapper={ 'id' : trans.security.encode_id } )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/library_contents.py
--- a/lib/galaxy/webapps/galaxy/api/library_contents.py
+++ b/lib/galaxy/webapps/galaxy/api/library_contents.py
@@ -105,7 +105,7 @@
:rtype: dict
:returns: detailed library item information
.. seealso::
- :func:`galaxy.model.LibraryDataset.dictify` and
+ :func:`galaxy.model.LibraryDataset.to_dict` and
:attr:`galaxy.model.LibraryFolder.dict_element_visible_keys`
"""
class_name, content_id = self.__decode_library_content_id( trans, id )
@@ -113,7 +113,7 @@
content = self.get_library_folder( trans, content_id, check_ownership=False, check_accessible=True )
else:
content = self.get_library_dataset( trans, content_id, check_ownership=False, check_accessible=True )
- return self.encode_all_ids( trans, content.dictify( view='element' ) )
+ return self.encode_all_ids( trans, content.to_dict( view='element' ) )
@web.expose_api
def create( self, trans, library_id, payload, **kwd ):
@@ -266,7 +266,7 @@
return { 'error' : 'user has no permission to add to library folder (%s)' %( folder_id ) }
ldda = self.copy_hda_to_library_folder( trans, hda, folder, ldda_message=ldda_message )
- ldda_dict = ldda.dictify()
+ ldda_dict = ldda.to_dict()
rval = trans.security.encode_dict_ids( ldda_dict )
except Exception, exc:
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/permissions.py
--- a/lib/galaxy/webapps/galaxy/api/permissions.py
+++ b/lib/galaxy/webapps/galaxy/api/permissions.py
@@ -47,6 +47,6 @@
trans.app.security_agent.copy_library_permissions( trans, library, library.root_folder )
message = "Permissions updated for library '%s'." % library.name
- item = library.dictify( view='element' )
+ item = library.to_dict( view='element' )
return item
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/quotas.py
--- a/lib/galaxy/webapps/galaxy/api/quotas.py
+++ b/lib/galaxy/webapps/galaxy/api/quotas.py
@@ -34,7 +34,7 @@
route = 'quota'
query = query.filter( trans.app.model.Quota.table.c.deleted == False )
for quota in query:
- item = quota.dictify( value_mapper={ 'id': trans.security.encode_id } )
+ item = quota.to_dict( value_mapper={ 'id': trans.security.encode_id } )
encoded_id = trans.security.encode_id( quota.id )
item['url'] = url_for( route, id=encoded_id )
rval.append( item )
@@ -49,7 +49,7 @@
Displays information about a quota.
"""
quota = self.get_quota( trans, id, deleted=util.string_as_bool( deleted ) )
- return quota.dictify( view='element', value_mapper={ 'id': trans.security.encode_id } )
+ return quota.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
@web.expose_api
@web.require_admin
@@ -67,7 +67,7 @@
quota, message = self._create_quota( params )
except ActionInputError, e:
raise HTTPBadRequest( detail=str( e ) )
- item = quota.dictify( value_mapper={ 'id': trans.security.encode_id } )
+ item = quota.to_dict( value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( 'quota', id=trans.security.encode_id( quota.id ) )
item['message'] = message
return item
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/request_types.py
--- a/lib/galaxy/webapps/galaxy/api/request_types.py
+++ b/lib/galaxy/webapps/galaxy/api/request_types.py
@@ -19,7 +19,7 @@
"""
rval = []
for request_type in trans.app.security_agent.get_accessible_request_types( trans, trans.user ):
- item = request_type.dictify( value_mapper={ 'id': trans.security.encode_id, 'request_form_id': trans.security.encode_id, 'sample_form_id': trans.security.encode_id } )
+ item = request_type.to_dict( value_mapper={ 'id': trans.security.encode_id, 'request_form_id': trans.security.encode_id, 'sample_form_id': trans.security.encode_id } )
encoded_id = trans.security.encode_id( request_type.id )
item['url'] = url_for( 'request_type', id=encoded_id )
rval.append( item )
@@ -47,7 +47,7 @@
if not trans.app.security_agent.can_access_request_type( trans.user.all_roles(), request_type ):
trans.response.status = 400
return "No permission to access request_type ( %s )." % str( request_type_id )
- item = request_type.dictify( view='element', value_mapper={ 'id': trans.security.encode_id, 'request_form_id': trans.security.encode_id, 'sample_form_id': trans.security.encode_id } )
+ item = request_type.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'request_form_id': trans.security.encode_id, 'sample_form_id': trans.security.encode_id } )
item['url'] = url_for( 'request_type', id=request_type_id )
return item
@@ -97,6 +97,6 @@
trans.sa_session.add( request_type )
trans.sa_session.flush()
encoded_id = trans.security.encode_id( request_type.id )
- item = request_type.dictify( view='element', value_mapper={ 'id': trans.security.encode_id, 'request_form_id': trans.security.encode_id, 'sample_form_id': trans.security.encode_id } )
+ item = request_type.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'request_form_id': trans.security.encode_id, 'sample_form_id': trans.security.encode_id } )
item['url'] = url_for( 'request_type', id=encoded_id )
return [ item ]
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/requests.py
--- a/lib/galaxy/webapps/galaxy/api/requests.py
+++ b/lib/galaxy/webapps/galaxy/api/requests.py
@@ -30,7 +30,7 @@
.all()
rval = []
for request in query:
- item = request.dictify()
+ item = request.to_dict()
item['url'] = url_for( 'requests', id=trans.security.encode_id( request.id ) )
item['id'] = trans.security.encode_id( item['id'] )
if trans.user_is_admin():
@@ -55,7 +55,7 @@
if not request or not ( trans.user_is_admin() or request.user.id == trans.user.id ):
trans.response.status = 400
return "Invalid request id ( %s ) specified." % str( request_id )
- item = request.dictify()
+ item = request.to_dict()
item['url'] = url_for( 'requests', id=trans.security.encode_id( request.id ) )
item['id'] = trans.security.encode_id( item['id'] )
item['user'] = request.user.email
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/roles.py
--- a/lib/galaxy/webapps/galaxy/api/roles.py
+++ b/lib/galaxy/webapps/galaxy/api/roles.py
@@ -18,7 +18,7 @@
rval = []
for role in trans.sa_session.query( trans.app.model.Role ).filter( trans.app.model.Role.table.c.deleted == False ):
if trans.user_is_admin() or trans.app.security_agent.ok_to_display( trans.user, role ):
- item = role.dictify( value_mapper={ 'id': trans.security.encode_id } )
+ item = role.to_dict( value_mapper={ 'id': trans.security.encode_id } )
encoded_id = trans.security.encode_id( role.id )
item['url'] = url_for( 'role', id=encoded_id )
rval.append( item )
@@ -43,7 +43,7 @@
if not role or not (trans.user_is_admin() or trans.app.security_agent.ok_to_display( trans.user, role )):
trans.response.status = 400
return "Invalid role id ( %s ) specified." % str( role_id )
- item = role.dictify( view='element', value_mapper={ 'id': trans.security.encode_id } )
+ item = role.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( 'role', id=role_id )
return item
@@ -81,6 +81,6 @@
trans.app.security_agent.associate_group_role( group, role )
trans.sa_session.flush()
encoded_id = trans.security.encode_id( role.id )
- item = role.dictify( view='element', value_mapper={ 'id': trans.security.encode_id } )
+ item = role.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( 'role', id=encoded_id )
return [ item ]
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/samples.py
--- a/lib/galaxy/webapps/galaxy/api/samples.py
+++ b/lib/galaxy/webapps/galaxy/api/samples.py
@@ -35,7 +35,7 @@
return "Invalid request id ( %s ) specified." % str( request_id )
rval = []
for sample in request.samples:
- item = sample.dictify()
+ item = sample.to_dict()
item['url'] = url_for( 'samples',
request_id=trans.security.encode_id( request_id ),
id=trans.security.encode_id( sample.id ) )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
--- a/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
+++ b/lib/galaxy/webapps/galaxy/api/tool_shed_repositories.py
@@ -45,7 +45,7 @@
.order_by( trans.app.model.ToolShedRepository.table.c.name ) \
.all()
for tool_shed_repository in query:
- tool_shed_repository_dict = tool_shed_repository.dictify( value_mapper=default_tool_shed_repository_value_mapper( trans, tool_shed_repository ) )
+ tool_shed_repository_dict = tool_shed_repository.to_dict( value_mapper=default_tool_shed_repository_value_mapper( trans, tool_shed_repository ) )
tool_shed_repository_dict[ 'url' ] = web.url_for( controller='tool_shed_repositories',
action='show',
id=trans.security.encode_id( tool_shed_repository.id ) )
@@ -402,7 +402,7 @@
repair_dict = repository_util.repair_tool_shed_repository( trans,
repository,
encoding_util.tool_shed_encode( repo_info_dict ) )
- repository_dict = repository.dictify( value_mapper=default_tool_shed_repository_value_mapper( trans, repository ) )
+ repository_dict = repository.to_dict( value_mapper=default_tool_shed_repository_value_mapper( trans, repository ) )
repository_dict[ 'url' ] = web.url_for( controller='tool_shed_repositories',
action='show',
id=trans.security.encode_id( repository.id ) )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/tools.py
--- a/lib/galaxy/webapps/galaxy/api/tools.py
+++ b/lib/galaxy/webapps/galaxy/api/tools.py
@@ -32,7 +32,7 @@
# Create return value.
try:
- return self.app.toolbox.dictify( trans, in_panel=in_panel, trackster=trackster )
+ return self.app.toolbox.to_dict( trans, in_panel=in_panel, trackster=trackster )
except Exception, exc:
log.error( 'could not convert toolbox to dictionary: %s', str( exc ), exc_info=True )
trans.response.status = 500
@@ -45,7 +45,7 @@
Returns tool information, including parameters and inputs.
"""
try:
- return self.app.toolbox.tools_by_id[ id ].dictify( trans, for_display=True )
+ return self.app.toolbox.tools_by_id[ id ].to_dict( trans, for_display=True )
except Exception, exc:
log.error( 'could not convert tool (%s) to dictionary: %s', id, str( exc ), exc_info=True )
trans.response.status = 500
@@ -105,7 +105,7 @@
outputs = rval[ "outputs" ]
#TODO:?? poss. only return ids?
for output in output_datasets:
- output_dict = output.dictify()
+ output_dict = output.to_dict()
outputs.append( trans.security.encode_dict_ids( output_dict ) )
return rval
@@ -411,7 +411,7 @@
if joda.name == output_name:
output_dataset = joda.dataset
- dataset_dict = output_dataset.dictify()
+ dataset_dict = output_dataset.to_dict()
dataset_dict[ 'id' ] = trans.security.encode_id( dataset_dict[ 'id' ] )
dataset_dict[ 'track_config' ] = self.get_new_track_config( trans, output_dataset );
return dataset_dict
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/users.py
--- a/lib/galaxy/webapps/galaxy/api/users.py
+++ b/lib/galaxy/webapps/galaxy/api/users.py
@@ -33,13 +33,13 @@
query = query.filter( trans.app.model.User.table.c.deleted == False )
# special case: user can see only their own user
if not trans.user_is_admin():
- item = trans.user.dictify( value_mapper={ 'id': trans.security.encode_id } )
+ item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
item['url'] = url_for( route, id=item['id'] )
item['quota_percent'] = trans.app.quota_agent.get_percent( trans=trans )
return [item]
for user in query:
- item = user.dictify( value_mapper={ 'id': trans.security.encode_id } )
+ item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } )
#TODO: move into api_values
item['quota_percent'] = trans.app.quota_agent.get_percent( trans=trans )
item['url'] = url_for( route, id=item['id'] )
@@ -77,7 +77,7 @@
raise
else:
raise HTTPBadRequest( detail='Invalid user id ( %s ) specified' % id )
- item = user.dictify( view='element', value_mapper={ 'id': trans.security.encode_id,
+ item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id,
'total_disk_usage': float } )
#TODO: move into api_values (needs trans, tho - can we do that with api_keys/@property??)
#TODO: works with other users (from admin)??
@@ -94,7 +94,7 @@
raise HTTPNotImplemented( detail='User creation is not allowed in this Galaxy instance' )
if trans.app.config.use_remote_user and trans.user_is_admin():
user = trans.get_or_create_remote_user(remote_user_email=payload['remote_user_email'])
- item = user.dictify( view='element', value_mapper={ 'id': trans.security.encode_id,
+ item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id,
'total_disk_usage': float } )
else:
raise HTTPNotImplemented()
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/visualizations.py
--- a/lib/galaxy/webapps/galaxy/api/visualizations.py
+++ b/lib/galaxy/webapps/galaxy/api/visualizations.py
@@ -212,7 +212,7 @@
# this allows PUT'ing an entire model back to the server without attribute errors on uneditable attrs
valid_but_uneditable_keys = (
'id', 'model_class'
- #TODO: fill out when we create dictify, get_dict, whatevs
+ #TODO: fill out when we create to_dict, get_dict, whatevs
)
#TODO: deleted
#TODO: importable
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/api/workflows.py
--- a/lib/galaxy/webapps/galaxy/api/workflows.py
+++ b/lib/galaxy/webapps/galaxy/api/workflows.py
@@ -31,7 +31,7 @@
for wf in trans.sa_session.query(trans.app.model.StoredWorkflow).filter_by(
user=trans.user, deleted=False).order_by(
desc(trans.app.model.StoredWorkflow.table.c.update_time)).all():
- item = wf.dictify(value_mapper={'id':trans.security.encode_id})
+ item = wf.to_dict(value_mapper={'id':trans.security.encode_id})
encoded_id = trans.security.encode_id(wf.id)
item['url'] = url_for('workflow', id=encoded_id)
rval.append(item)
@@ -39,7 +39,7 @@
user=trans.user ).join( 'stored_workflow' ).filter(
trans.app.model.StoredWorkflow.deleted == False ).order_by(
desc( trans.app.model.StoredWorkflow.update_time ) ).all():
- item = wf_sa.stored_workflow.dictify(value_mapper={'id':trans.security.encode_id})
+ item = wf_sa.stored_workflow.to_dict(value_mapper={'id':trans.security.encode_id})
encoded_id = trans.security.encode_id(wf_sa.stored_workflow.id)
item['url'] = url_for('workflow', id=encoded_id)
rval.append(item)
@@ -67,7 +67,7 @@
except:
trans.response.status = 400
return "That workflow does not exist."
- item = stored_workflow.dictify(view='element', value_mapper={'id':trans.security.encode_id})
+ item = stored_workflow.to_dict(view='element', value_mapper={'id':trans.security.encode_id})
item['url'] = url_for('workflow', id=workflow_id)
latest_workflow = stored_workflow.latest_workflow
inputs = {}
@@ -329,7 +329,7 @@
# return list
rval= [];
- item = workflow.dictify(value_mapper={'id':trans.security.encode_id})
+ item = workflow.to_dict(value_mapper={'id':trans.security.encode_id})
item['url'] = url_for('workflow', id=encoded_id)
rval.append(item);
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/galaxy/controllers/visualization.py
--- a/lib/galaxy/webapps/galaxy/controllers/visualization.py
+++ b/lib/galaxy/webapps/galaxy/controllers/visualization.py
@@ -873,8 +873,8 @@
# Add tool, dataset attributes to config based on id.
tool = trans.app.toolbox.get_tool( viz_config[ 'tool_id' ] )
- viz_config[ 'tool' ] = tool.dictify( trans, io_details=True )
- viz_config[ 'dataset' ] = trans.security.encode_dict_ids( dataset.dictify() )
+ viz_config[ 'tool' ] = tool.to_dict( trans, io_details=True )
+ viz_config[ 'dataset' ] = trans.security.encode_dict_ids( dataset.to_dict() )
return trans.fill_template_mako( "visualization/sweepster.mako", config=viz_config )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/tool_shed/api/repositories.py
--- a/lib/galaxy/webapps/tool_shed/api/repositories.py
+++ b/lib/galaxy/webapps/tool_shed/api/repositories.py
@@ -113,7 +113,7 @@
# Get the repository information.
repository = suc.get_repository_by_name_and_owner( trans.app, name, owner )
encoded_repository_id = trans.security.encode_id( repository.id )
- repository_dict = repository.dictify( view='element', value_mapper=default_repository_value_mapper( trans, repository ) )
+ repository_dict = repository.to_dict( view='element', value_mapper=default_repository_value_mapper( trans, repository ) )
repository_dict[ 'url' ] = web.url_for( controller='repositories',
action='show',
id=encoded_repository_id )
@@ -129,7 +129,7 @@
changeset_revision = new_changeset_revision
if repository_metadata:
encoded_repository_metadata_id = trans.security.encode_id( repository_metadata.id )
- repository_metadata_dict = repository_metadata.dictify( view='collection',
+ repository_metadata_dict = repository_metadata.to_dict( view='collection',
value_mapper=default_repository_metadata_value_mapper( trans, repository_metadata ) )
repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',
action='show',
@@ -164,7 +164,7 @@
.order_by( trans.app.model.Repository.table.c.name ) \
.all()
for repository in query:
- repository_dict = repository.dictify( view='collection', value_mapper=default_repository_value_mapper( trans, repository ) )
+ repository_dict = repository.to_dict( view='collection', value_mapper=default_repository_value_mapper( trans, repository ) )
repository_dict[ 'url' ] = web.url_for( controller='repositories',
action='show',
id=trans.security.encode_id( repository.id ) )
@@ -187,7 +187,7 @@
# Example URL: http://localhost:9009/api/repositories/f9cad7b01a472135
try:
repository = suc.get_repository_in_tool_shed( trans, id )
- repository_dict = repository.dictify( view='element', value_mapper=default_repository_value_mapper( trans, repository ) )
+ repository_dict = repository.to_dict( view='element', value_mapper=default_repository_value_mapper( trans, repository ) )
repository_dict[ 'url' ] = web.url_for( controller='repositories',
action='show',
id=trans.security.encode_id( repository.id ) )
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/tool_shed/api/repository_revisions.py
--- a/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
+++ b/lib/galaxy/webapps/tool_shed/api/repository_revisions.py
@@ -127,7 +127,7 @@
.order_by( trans.app.model.RepositoryMetadata.table.c.repository_id ) \
.all()
for repository_metadata in query:
- repository_metadata_dict = repository_metadata.dictify( view='collection',
+ repository_metadata_dict = repository_metadata.to_dict( view='collection',
value_mapper=default_value_mapper( trans, repository_metadata ) )
repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',
action='show',
diff -r ce4c7a3fb0d847633db4e77d6e90cc94c7d55aa3 -r a4259a97c287927d6a377092bc649579a88cb433 lib/galaxy/webapps/tool_shed/model/__init__.py
--- a/lib/galaxy/webapps/tool_shed/model/__init__.py
+++ b/lib/galaxy/webapps/tool_shed/model/__init__.py
@@ -4,7 +4,7 @@
from galaxy import util
from galaxy.util.bunch import Bunch
from galaxy.util.hash_util import new_secure_hash
-from galaxy.model.item_attrs import DictifiableMixin
+from galaxy.model.item_attrs import Dictifiable
import tool_shed.repository_types.util as rt_util
from galaxy import eggs
@@ -19,7 +19,7 @@
pass
-class User( object, DictifiableMixin ):
+class User( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'email' )
dict_element_visible_keys = ( 'id', 'email', 'username' )
@@ -61,7 +61,7 @@
return 0
-class Group( object, DictifiableMixin ):
+class Group( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'name' )
@@ -70,7 +70,7 @@
self.deleted = False
-class Role( object, DictifiableMixin ):
+class Role( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name' )
dict_element_visible_keys = ( 'id', 'name', 'description', 'type' )
private_id = None
@@ -130,7 +130,7 @@
self.prev_session_id = prev_session_id
-class Repository( object, DictifiableMixin ):
+class Repository( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name', 'type', 'description', 'user_id', 'private', 'deleted', 'times_downloaded', 'deprecated' )
dict_element_visible_keys = ( 'id', 'name', 'type', 'description', 'long_description', 'user_id', 'private', 'deleted', 'times_downloaded',
'deprecated' )
@@ -155,7 +155,7 @@
self.deprecated = deprecated
def as_dict( self, value_mapper=None ):
- return self.dictify( view='element', value_mapper=value_mapper )
+ return self.to_dict( view='element', value_mapper=value_mapper )
def can_change_type( self, app ):
# Allow changing the type only if the repository has no contents, has never been installed, or has never been changed from
@@ -175,7 +175,7 @@
return True
return False
- def dictify( self, view='collection', value_mapper=None ):
+ def to_dict( self, view='collection', value_mapper=None ):
if value_mapper is None:
value_mapper = {}
rval = {}
@@ -244,7 +244,7 @@
fp.close()
-class RepositoryMetadata( object, DictifiableMixin ):
+class RepositoryMetadata( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable', 'has_repository_dependencies', 'includes_datatypes',
'includes_tools', 'includes_tool_dependencies', 'includes_tools_for_display_in_tool_panel', 'includes_workflows' )
dict_element_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'malicious', 'downloadable', 'tools_functionally_correct', 'do_not_test',
@@ -284,9 +284,9 @@
return False
def as_dict( self, value_mapper=None ):
- return self.dictify( view='element', value_mapper=value_mapper )
+ return self.to_dict( view='element', value_mapper=value_mapper )
- def dictify( self, view='collection', value_mapper=None ):
+ def to_dict( self, view='collection', value_mapper=None ):
if value_mapper is None:
value_mapper = {}
rval = {}
@@ -304,7 +304,7 @@
return rval
-class SkipToolTest( object, DictifiableMixin ):
+class SkipToolTest( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_metadata_id', 'initial_changeset_revision' )
dict_element_visible_keys = ( 'id', 'repository_metadata_id', 'initial_changeset_revision', 'comment' )
@@ -315,9 +315,9 @@
self.comment = comment
def as_dict( self, value_mapper=None ):
- return self.dictify( view='element', value_mapper=value_mapper )
+ return self.to_dict( view='element', value_mapper=value_mapper )
- def dictify( self, view='collection', value_mapper=None ):
+ def to_dict( self, view='collection', value_mapper=None ):
if value_mapper is None:
value_mapper = {}
rval = {}
@@ -335,7 +335,7 @@
return rval
-class RepositoryReview( object, DictifiableMixin ):
+class RepositoryReview( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'user_id', 'rating', 'deleted' )
dict_element_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'user_id', 'rating', 'deleted' )
approved_states = Bunch( NO='no', YES='yes' )
@@ -347,7 +347,7 @@
self.rating = rating
self.deleted = deleted
-class ComponentReview( object, DictifiableMixin ):
+class ComponentReview( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_review_id', 'component_id', 'private', 'approved', 'rating', 'deleted' )
dict_element_visible_keys = ( 'id', 'repository_review_id', 'component_id', 'private', 'approved', 'rating', 'deleted' )
approved_states = Bunch( NO='no', YES='yes', NA='not_applicable' )
@@ -389,7 +389,7 @@
self.repository = repository
-class Category( object, DictifiableMixin ):
+class Category( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'name', 'description', 'deleted' )
dict_element_visible_keys = ( 'id', 'name', 'description', 'deleted' )
https://bitbucket.org/galaxy/galaxy-central/commits/c59cc42b2793/
Changeset: c59cc42b2793
User: jgoecks
Date: 2013-08-30 20:23:42
Summary: Remove unneeded to_dict code/methods.
Affected #: 1 file
diff -r a4259a97c287927d6a377092bc649579a88cb433 -r c59cc42b279385f0d351d31f919d29bb7c012877 lib/galaxy/webapps/tool_shed/model/__init__.py
--- a/lib/galaxy/webapps/tool_shed/model/__init__.py
+++ b/lib/galaxy/webapps/tool_shed/model/__init__.py
@@ -176,20 +176,7 @@
return False
def to_dict( self, view='collection', value_mapper=None ):
- if value_mapper is None:
- value_mapper = {}
- rval = {}
- try:
- visible_keys = self.__getattribute__( 'dict_' + view + '_visible_keys' )
- except AttributeError:
- raise Exception( 'Unknown API view: %s' % view )
- for key in visible_keys:
- try:
- rval[ key ] = self.__getattribute__( key )
- if key in value_mapper:
- rval[ key ] = value_mapper.get( key, rval[ key ] )
- except AttributeError:
- rval[ key ] = None
+ rval = super( Repository, self ).to_dict( view=view, value_mapper=value_mapper )
if 'user_id' in rval:
rval[ 'owner' ] = self.user.username
return rval
@@ -286,23 +273,6 @@
def as_dict( self, value_mapper=None ):
return self.to_dict( view='element', value_mapper=value_mapper )
- def to_dict( self, view='collection', value_mapper=None ):
- if value_mapper is None:
- value_mapper = {}
- rval = {}
- try:
- visible_keys = self.__getattribute__( 'dict_' + view + '_visible_keys' )
- except AttributeError:
- raise Exception( 'Unknown API view: %s' % view )
- for key in visible_keys:
- try:
- rval[ key ] = self.__getattribute__( key )
- if key in value_mapper:
- rval[ key ] = value_mapper.get( key, rval[ key ] )
- except AttributeError:
- rval[ key ] = None
- return rval
-
class SkipToolTest( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_metadata_id', 'initial_changeset_revision' )
@@ -317,23 +287,6 @@
def as_dict( self, value_mapper=None ):
return self.to_dict( view='element', value_mapper=value_mapper )
- def to_dict( self, view='collection', value_mapper=None ):
- if value_mapper is None:
- value_mapper = {}
- rval = {}
- try:
- visible_keys = self.__getattribute__( 'dict_' + view + '_visible_keys' )
- except AttributeError:
- raise Exception( 'Unknown API view: %s' % view )
- for key in visible_keys:
- try:
- rval[ key ] = self.__getattribute__( key )
- if key in value_mapper:
- rval[ key ] = value_mapper.get( key, rval[ key ] )
- except AttributeError:
- rval[ key ] = None
- return rval
-
class RepositoryReview( object, Dictifiable ):
dict_collection_visible_keys = ( 'id', 'repository_id', 'changeset_revision', 'user_id', 'rating', 'deleted' )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: greg: Fix for zero-based comparison in the tool shed.
by commits-noreply@bitbucket.org 30 Aug '13
by commits-noreply@bitbucket.org 30 Aug '13
30 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/9345e15ec4b7/
Changeset: 9345e15ec4b7
User: greg
Date: 2013-08-30 19:08:22
Summary: Fix for zero-based comparison in the tool shed.
Affected #: 1 file
diff -r 55bbaa8f5017e32dbe7968514a4f87b941ed2a0d -r 9345e15ec4b7f81271a071f0703e89fff0c5b3ac lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -2695,7 +2695,7 @@
tool_shed_status_dict[ 'revision_upgrade' ] = 'False'
break
if metadata_changeset_revision == changeset_revision:
- if num_metadata_revisions - index > 0:
+ if num_metadata_revisions - index > 1:
tool_shed_status_dict[ 'revision_upgrade' ] = 'True'
else:
tool_shed_status_dict[ 'revision_upgrade' ] = 'False'
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Re-apply changes that got reverted in 10492:8f6f926f912e
by commits-noreply@bitbucket.org 30 Aug '13
by commits-noreply@bitbucket.org 30 Aug '13
30 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/55bbaa8f5017/
Changeset: 55bbaa8f5017
User: dannon
Date: 2013-08-30 18:06:10
Summary: Re-apply changes that got reverted in 10492:8f6f926f912e
Affected #: 2 files
diff -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 -r 55bbaa8f5017e32dbe7968514a4f87b941ed2a0d lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -2664,7 +2664,7 @@
if repository:
repo_dir = repository.repo_path( trans.app )
repo = hg.repository( suc.get_configured_ui(), repo_dir )
- tool_shed_status_dict = {}
+ tool_shed_status_dict = {}
# Handle repository deprecation.
tool_shed_status_dict[ 'repository_deprecated' ] = str( repository.deprecated )
# Handle latest installable revision.
@@ -2680,7 +2680,7 @@
if changeset_revision == repository.tip( trans.app ):
tool_shed_status_dict[ 'revision_update' ] = 'False'
else:
- repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans,
+ repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans,
trans.security.encode_id( repository.id ),
changeset_revision )
if repository_metadata:
diff -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 -r 55bbaa8f5017e32dbe7968514a4f87b941ed2a0d lib/tool_shed/util/shed_util_common.py
--- a/lib/tool_shed/util/shed_util_common.py
+++ b/lib/tool_shed/util/shed_util_common.py
@@ -31,7 +31,7 @@
eggs.require( 'markupsafe' )
import markupsafe
-
+
log = logging.getLogger( __name__ )
CHUNK_SIZE = 2**20 # 1Mb
@@ -209,7 +209,7 @@
return tool_shed_url.split( ':' )[ 0 ]
return tool_shed_url.rstrip( '/' )
-def clone_repository( repository_clone_url, repository_file_dir, ctx_rev ):
+def clone_repository( repository_clone_url, repository_file_dir, ctx_rev ):
"""Clone the repository up to the specified changeset_revision. No subsequent revisions will be present in the cloned repository."""
try:
commands.clone( get_configured_ui(),
@@ -263,7 +263,7 @@
# was later uninstalled, this value should be received as the value of that change set to which the repository had been updated just prior
# to it being uninstalled.
current_changeset_revision = installed_changeset_revision
- sa_session = app.model.context.current
+ sa_session = app.model.context.current
tool_shed = get_tool_shed_from_clone_url( repository_clone_url )
if not owner:
owner = get_repository_owner_from_clone_url( repository_clone_url )
@@ -351,7 +351,7 @@
new_elem = XmlET.SubElement( elem, key )
new_elem.text = value
return elem
-
+
def generate_repository_info_elem_from_repository( tool_shed_repository, parent_elem=None, **kwd ):
return generate_repository_info_elem( tool_shed_repository.tool_shed,
tool_shed_repository.name,
@@ -394,7 +394,7 @@
def generate_tool_guid( repository_clone_url, tool ):
"""
Generate a guid for the installed tool. It is critical that this guid matches the guid for
- the tool in the Galaxy tool shed from which it is being installed. The form of the guid is
+ the tool in the Galaxy tool shed from which it is being installed. The form of the guid is
<tool shed host>/repos/<repository owner>/<repository name>/<tool id>/<tool version>
"""
tmp_url = clean_repository_clone_url( repository_clone_url )
@@ -417,7 +417,7 @@
tool_config = tool_dict[ 'tool_config' ]
file_name = strip_path( tool_config )
guids_and_configs[ guid ] = file_name
- # Parse the shed_tool_conf file in which all of this repository's tools are defined and generate the tool_panel_dict.
+ # Parse the shed_tool_conf file in which all of this repository's tools are defined and generate the tool_panel_dict.
tree, error_message = xml_util.parse_xml( shed_tool_conf )
if tree is None:
return tool_panel_dict
@@ -535,7 +535,7 @@
Send a request to the tool shed to retrieve the ctx_rev for a repository defined by the combination of a name, owner and changeset
revision.
"""
- url = url_join( tool_shed_url,
+ url = url_join( tool_shed_url,
'repository/get_ctx_rev?name=%s&owner=%s&changeset_revision=%s' % ( name, owner, changeset_revision ) )
ctx_rev = common_util.tool_shed_get( app, tool_shed_url, url )
return ctx_rev
@@ -1385,7 +1385,7 @@
def reset_previously_installed_repository( trans, repository ):
"""
- Reset the atrributes of a tool_shed_repository that was previsouly installed. The repository will be in some state other than with a
+ Reset the atrributes of a tool_shed_repository that was previsouly installed. The repository will be in some state other than with a
status of INSTALLED, so all atributes will be set to the default (NEW( state. This will enable the repository to be freshly installed.
"""
repository.deleted = False
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
30 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/8f6f926f912e/
Changeset: 8f6f926f912e
User: greg
Date: 2013-08-30 17:47:27
Summary: Enhance the update_available feature for installed tool shed repositories to enable any type of status from the tool shed for each repository. The Installed tool shed repositories grid now displays whether there are revision updates available, revision upgrades available, whether the revision is the latest installable revision, and whether the repository has been deprecated in the tool shed. This change set includes a 0016 db table migration script for which it has been discovered that it is no longer possible to drop a column from a database table using a migration script if the database is sqlite.
Affected #: 12 files
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/galaxy/model/__init__.py
--- a/lib/galaxy/model/__init__.py
+++ b/lib/galaxy/model/__init__.py
@@ -3306,11 +3306,12 @@
class APIKeys( object ):
pass
+
class ToolShedRepository( object ):
dict_collection_visible_keys = ( 'id', 'tool_shed', 'name', 'owner', 'installed_changeset_revision', 'changeset_revision', 'ctx_rev', 'includes_datatypes',
- 'update_available', 'deleted', 'uninstalled', 'dist_to_shed', 'status', 'error_message' )
+ 'tool_shed_status', 'deleted', 'uninstalled', 'dist_to_shed', 'status', 'error_message' )
dict_element_visible_keys = ( 'id', 'tool_shed', 'name', 'owner', 'installed_changeset_revision', 'changeset_revision', 'ctx_rev', 'includes_datatypes',
- 'update_available', 'deleted', 'uninstalled', 'dist_to_shed', 'status', 'error_message' )
+ 'tool_shed_status', 'deleted', 'uninstalled', 'dist_to_shed', 'status', 'error_message' )
installation_status = Bunch( NEW='New',
CLONING='Cloning',
SETTING_TOOL_VERSIONS='Setting tool versions',
@@ -3326,8 +3327,9 @@
WARNING = 'queued',
ERROR = 'error',
UNINSTALLED = 'deleted_new' )
+
def __init__( self, id=None, create_time=None, tool_shed=None, name=None, description=None, owner=None, installed_changeset_revision=None,
- changeset_revision=None, ctx_rev=None, metadata=None, includes_datatypes=False, update_available=False, deleted=False,
+ changeset_revision=None, ctx_rev=None, metadata=None, includes_datatypes=False, tool_shed_status=None, deleted=False,
uninstalled=False, dist_to_shed=False, status=None, error_message=None ):
self.id = id
self.create_time = create_time
@@ -3340,38 +3342,72 @@
self.ctx_rev = ctx_rev
self.metadata = metadata
self.includes_datatypes = includes_datatypes
- self.update_available = update_available
+ self.tool_shed_status = tool_shed_status
self.deleted = deleted
self.uninstalled = uninstalled
self.dist_to_shed = dist_to_shed
self.status = status
self.error_message = error_message
+
def as_dict( self, value_mapper=None ):
return self.dictify( view='element', value_mapper=value_mapper )
- def repo_files_directory( self, app ):
- repo_path = self.repo_path( app )
- if repo_path:
- return os.path.join( repo_path, self.name )
- return None
- def repo_path( self, app ):
- tool_shed_url = self.tool_shed
- if tool_shed_url.find( ':' ) > 0:
- # Eliminate the port, if any, since it will result in an invalid directory name.
- tool_shed_url = tool_shed_url.split( ':' )[ 0 ]
- tool_shed = tool_shed_url.rstrip( '/' )
- for index, shed_tool_conf_dict in enumerate( app.toolbox.shed_tool_confs ):
- tool_path = shed_tool_conf_dict[ 'tool_path' ]
- relative_path = os.path.join( tool_path, tool_shed, 'repos', self.owner, self.name, self.installed_changeset_revision )
- if os.path.exists( relative_path ):
- return relative_path
- return None
+
@property
- def tool_shed_path_name( self ):
- tool_shed_url = self.tool_shed
- if tool_shed_url.find( ':' ) > 0:
- # Eliminate the port, if any, since it will result in an invalid directory name.
- tool_shed_url = tool_shed_url.split( ':' )[ 0 ]
- return tool_shed_url.rstrip( '/' )
+ def can_install( self ):
+ return self.status == self.installation_status.NEW
+
+ @property
+ def can_reset_metadata( self ):
+ return self.status == self.installation_status.INSTALLED
+
+ @property
+ def can_uninstall( self ):
+ return self.status != self.installation_status.UNINSTALLED
+
+ @property
+ def can_deactivate( self ):
+ return self.status not in [ self.installation_status.DEACTIVATED, self.installation_status.UNINSTALLED ]
+
+ @property
+ def can_reinstall_or_activate( self ):
+ return self.deleted
+
+ def dictify( self, view='collection', value_mapper=None ):
+ if value_mapper is None:
+ value_mapper = {}
+ rval = {}
+ try:
+ visible_keys = self.__getattribute__( 'dict_' + view + '_visible_keys' )
+ except AttributeError:
+ raise Exception( 'Unknown API view: %s' % view )
+ for key in visible_keys:
+ try:
+ rval[ key ] = self.__getattribute__( key )
+ if key in value_mapper:
+ rval[ key ] = value_mapper.get( key, rval[ key ] )
+ except AttributeError:
+ rval[ key ] = None
+ return rval
+
+ def get_shed_config_filename( self ):
+ shed_config_filename = None
+ if self.metadata:
+ shed_config_filename = self.metadata.get( 'shed_config_filename', shed_config_filename )
+ return shed_config_filename
+
+ def get_shed_config_dict( self, app, default=None ):
+ """
+ Return the in-memory version of the shed_tool_conf file, which is stored in the config_elems entry
+ in the shed_tool_conf_dict.
+ """
+ if not self.shed_config_filename:
+ self.guess_shed_config( app, default=default )
+ if self.shed_config_filename:
+ for shed_tool_conf_dict in app.toolbox.shed_tool_confs:
+ if self.shed_config_filename == shed_tool_conf_dict[ 'config_filename' ]:
+ return shed_tool_conf_dict
+ return default
+
def get_tool_relative_path( self, app ):
shed_conf_dict = self.get_shed_config_dict( app )
tool_path = None
@@ -3380,14 +3416,7 @@
tool_path = shed_conf_dict[ 'tool_path' ]
relative_path = os.path.join( self.tool_shed_path_name, 'repos', self.owner, self.name, self.installed_changeset_revision )
return tool_path, relative_path
- def get_shed_config_filename( self ):
- shed_config_filename = None
- if self.metadata:
- shed_config_filename = self.metadata.get( 'shed_config_filename', shed_config_filename )
- return shed_config_filename
- def set_shed_config_filename( self, value ):
- self.metadata[ 'shed_config_filename' ] = value
- shed_config_filename = property( get_shed_config_filename, set_shed_config_filename )
+
def guess_shed_config( self, app, default=None ):
tool_ids = []
metadata = self.metadata or {}
@@ -3423,59 +3452,163 @@
self.shed_config_filename = shed_tool_conf_dict[ 'config_filename' ]
return shed_tool_conf_dict
return default
- def get_shed_config_dict( self, app, default=None ):
- """
- Return the in-memory version of the shed_tool_conf file, which is stored in the config_elems entry
- in the shed_tool_conf_dict.
- """
- if not self.shed_config_filename:
- self.guess_shed_config( app, default=default )
- if self.shed_config_filename:
- for shed_tool_conf_dict in app.toolbox.shed_tool_confs:
- if self.shed_config_filename == shed_tool_conf_dict[ 'config_filename' ]:
- return shed_tool_conf_dict
- return default
- def dictify( self, view='collection', value_mapper=None ):
- if value_mapper is None:
- value_mapper = {}
- rval = {}
- try:
- visible_keys = self.__getattribute__( 'dict_' + view + '_visible_keys' )
- except AttributeError:
- raise Exception( 'Unknown API view: %s' % view )
- for key in visible_keys:
- try:
- rval[ key ] = self.__getattribute__( key )
- if key in value_mapper:
- rval[ key ] = value_mapper.get( key, rval[ key ] )
- except AttributeError:
- rval[ key ] = None
- return rval
- @property
- def can_install( self ):
- return self.status == self.installation_status.NEW
- @property
- def can_reset_metadata( self ):
- return self.status == self.installation_status.INSTALLED
- @property
- def can_uninstall( self ):
- return self.status != self.installation_status.UNINSTALLED
- @property
- def can_deactivate( self ):
- return self.status not in [ self.installation_status.DEACTIVATED, self.installation_status.UNINSTALLED ]
- @property
- def can_reinstall_or_activate( self ):
- return self.deleted
+
@property
def has_readme_files( self ):
if self.metadata:
return 'readme_files' in self.metadata
return False
+
@property
def has_repository_dependencies( self ):
if self.metadata:
return 'repository_dependencies' in self.metadata
return False
+
+ @property
+ def in_error_state( self ):
+ return self.status == self.installation_status.ERROR
+
+ @property
+ def includes_data_managers( self ):
+ if self.metadata:
+ return bool( len( self.metadata.get( 'data_manager', {} ).get( 'data_managers', {} ) ) )
+ return False
+
+ @property
+ def includes_tools( self ):
+ if self.metadata:
+ return 'tools' in self.metadata
+ return False
+
+ @property
+ def includes_tools_for_display_in_tool_panel( self ):
+ if self.includes_tools:
+ tool_dicts = self.metadata[ 'tools' ]
+ for tool_dict in tool_dicts:
+ if tool_dict.get( 'add_to_tool_panel', True ):
+ return True
+ return False
+
+ @property
+ def includes_tool_dependencies( self ):
+ if self.metadata:
+ return 'tool_dependencies' in self.metadata
+ return False
+
+ @property
+ def includes_workflows( self ):
+ if self.metadata:
+ return 'workflows' in self.metadata
+ return False
+
+ @property
+ def installed_repository_dependencies( self ):
+ """Return the repository's repository dependencies that are currently installed."""
+ installed_required_repositories = []
+ for required_repository in self.repository_dependencies:
+ if required_repository.status == self.installation_status.INSTALLED:
+ installed_required_repositories.append( required_repository )
+ return installed_required_repositories
+
+ @property
+ def installed_tool_dependencies( self ):
+ """Return the repository's tool dependencies that are currently installed."""
+ installed_dependencies = []
+ for tool_dependency in self.tool_dependencies:
+ if tool_dependency.status in [ ToolDependency.installation_status.INSTALLED, ToolDependency.installation_status.ERROR ]:
+ installed_dependencies.append( tool_dependency )
+ return installed_dependencies
+
+ @property
+ def is_deprecated_in_tool_shed( self ):
+ if self.tool_shed_status:
+ return galaxy.util.asbool( self.tool_shed_status.get( 'repository_deprecated', False ) )
+ return False
+
+ @property
+ def is_latest_installable_revision( self ):
+ if self.tool_shed_status:
+ return galaxy.util.asbool( self.tool_shed_status.get( 'latest_installable_revision', False ) )
+ return False
+
+ @property
+ def missing_repository_dependencies( self ):
+ """Return the repository's repository dependencies that are not currently installed, and may not ever have been installed."""
+ missing_required_repositories = []
+ for required_repository in self.repository_dependencies:
+ if required_repository.status not in [ self.installation_status.INSTALLED ]:
+ missing_required_repositories.append( required_repository )
+ return missing_required_repositories
+
+ @property
+ def missing_tool_dependencies( self ):
+ """Return the repository's tool dependencies that are not currently installed, and may not ever have been installed."""
+ missing_dependencies = []
+ for tool_dependency in self.tool_dependencies:
+ if tool_dependency.status not in [ ToolDependency.installation_status.INSTALLED ]:
+ missing_dependencies.append( tool_dependency )
+ return missing_dependencies
+
+ def repo_files_directory( self, app ):
+ repo_path = self.repo_path( app )
+ if repo_path:
+ return os.path.join( repo_path, self.name )
+ return None
+
+ def repo_path( self, app ):
+ tool_shed_url = self.tool_shed
+ if tool_shed_url.find( ':' ) > 0:
+ # Eliminate the port, if any, since it will result in an invalid directory name.
+ tool_shed_url = tool_shed_url.split( ':' )[ 0 ]
+ tool_shed = tool_shed_url.rstrip( '/' )
+ for index, shed_tool_conf_dict in enumerate( app.toolbox.shed_tool_confs ):
+ tool_path = shed_tool_conf_dict[ 'tool_path' ]
+ relative_path = os.path.join( tool_path, tool_shed, 'repos', self.owner, self.name, self.installed_changeset_revision )
+ if os.path.exists( relative_path ):
+ return relative_path
+ return None
+
+ @property
+ def repository_dependencies( self ):
+ required_repositories = []
+ for rrda in self.required_repositories:
+ repository_dependency = rrda.repository_dependency
+ required_repository = repository_dependency.repository
+ if required_repository:
+ required_repositories.append( required_repository )
+ return required_repositories
+
+ @property
+ def repository_dependencies_being_installed( self ):
+ """Return the repository's repository dependencies that are currently being installed."""
+ required_repositories_being_installed = []
+ for required_repository in self.repository_dependencies:
+ if required_repository.status == self.installation_status.INSTALLING:
+ required_repositories_being_installed.append( required_repository )
+ return required_repositories_being_installed
+
+ @property
+ def repository_dependencies_missing_or_being_installed( self ):
+ """Return the repository's repository dependencies that are either missing or currently being installed."""
+ required_repositories_missing_or_being_installed = []
+ for required_repository in self.repository_dependencies:
+ if required_repository.status in [ self.installation_status.ERROR,
+ self.installation_status.INSTALLING,
+ self.installation_status.NEVER_INSTALLED,
+ self.installation_status.UNINSTALLED ]:
+ required_repositories_missing_or_being_installed.append( required_repository )
+ return required_repositories_missing_or_being_installed
+
+ @property
+ def repository_dependencies_with_installation_errors( self ):
+ """Return the repository's repository dependencies that have installation errors."""
+ required_repositories_with_installation_errors = []
+ for required_repository in self.repository_dependencies:
+ if required_repository.status == self.installation_status.ERROR:
+ required_repositories_with_installation_errors.append( required_repository )
+ return required_repositories_with_installation_errors
+
@property
def requires_prior_installation_of( self ):
"""
@@ -3501,113 +3634,22 @@
if prior_installation_required:
required_rd_tups_that_must_be_installed.append( ( tool_shed, name, owner, changeset_revision, prior_installation_required ) )
return required_rd_tups_that_must_be_installed
+
@property
- def includes_data_managers( self ):
- if self.metadata:
- return bool( len( self.metadata.get( 'data_manager', {} ).get( 'data_managers', {} ) ) )
+ def revision_update_available( self ):
+ # This method should be named update_available, but since it is no longer possible to drop a table column using migration scripts
+ # with the sqlite database (see ~/galaxy/model/migrate/versions/0016_drop_update_available_col_add_tool_shed_status_col.py), we
+ # have to name it in such a way that it will not conflict with the eliminated tool_shed_repository.update_available column (which
+ # cannot be eliminated if using the sqlite database).
+ if self.tool_shed_status:
+ return galaxy.util.asbool( self.tool_shed_status.get( 'revision_update', False ) )
return False
- @property
- def includes_tools( self ):
- if self.metadata:
- return 'tools' in self.metadata
- return False
- @property
- def includes_tools_for_display_in_tool_panel( self ):
- if self.includes_tools:
- tool_dicts = self.metadata[ 'tools' ]
- for tool_dict in tool_dicts:
- if tool_dict.get( 'add_to_tool_panel', True ):
- return True
- return False
- @property
- def includes_tool_dependencies( self ):
- if self.metadata:
- return 'tool_dependencies' in self.metadata
- return False
- @property
- def includes_workflows( self ):
- if self.metadata:
- return 'workflows' in self.metadata
- return False
- @property
- def in_error_state( self ):
- return self.status == self.installation_status.ERROR
- @property
- def repository_dependencies( self ):
- required_repositories = []
- for rrda in self.required_repositories:
- repository_dependency = rrda.repository_dependency
- required_repository = repository_dependency.repository
- if required_repository:
- required_repositories.append( required_repository )
- return required_repositories
- @property
- def installed_repository_dependencies( self ):
- """Return the repository's repository dependencies that are currently installed."""
- installed_required_repositories = []
- for required_repository in self.repository_dependencies:
- if required_repository.status == self.installation_status.INSTALLED:
- installed_required_repositories.append( required_repository )
- return installed_required_repositories
- @property
- def missing_repository_dependencies( self ):
- """Return the repository's repository dependencies that are not currently installed, and may not ever have been installed."""
- missing_required_repositories = []
- for required_repository in self.repository_dependencies:
- if required_repository.status not in [ self.installation_status.INSTALLED ]:
- missing_required_repositories.append( required_repository )
- return missing_required_repositories
- @property
- def repository_dependencies_being_installed( self ):
- """Return the repository's repository dependencies that are currently being installed."""
- required_repositories_being_installed = []
- for required_repository in self.repository_dependencies:
- if required_repository.status == self.installation_status.INSTALLING:
- required_repositories_being_installed.append( required_repository )
- return required_repositories_being_installed
- @property
- def repository_dependencies_missing_or_being_installed( self ):
- """Return the repository's repository dependencies that are either missing or currently being installed."""
- required_repositories_missing_or_being_installed = []
- for required_repository in self.repository_dependencies:
- if required_repository.status in [ self.installation_status.ERROR,
- self.installation_status.INSTALLING,
- self.installation_status.NEVER_INSTALLED,
- self.installation_status.UNINSTALLED ]:
- required_repositories_missing_or_being_installed.append( required_repository )
- return required_repositories_missing_or_being_installed
- @property
- def repository_dependencies_with_installation_errors( self ):
- """Return the repository's repository dependencies that have installation errors."""
- required_repositories_with_installation_errors = []
- for required_repository in self.repository_dependencies:
- if required_repository.status == self.installation_status.ERROR:
- required_repositories_with_installation_errors.append( required_repository )
- return required_repositories_with_installation_errors
- @property
- def uninstalled_repository_dependencies( self ):
- """Return the repository's repository dependencies that have been uninstalled."""
- uninstalled_required_repositories = []
- for required_repository in self.repository_dependencies:
- if required_repository.status == self.installation_status.UNINSTALLED:
- uninstalled_required_repositories.append( required_repository )
- return uninstalled_required_repositories
- @property
- def installed_tool_dependencies( self ):
- """Return the repository's tool dependencies that are currently installed."""
- installed_dependencies = []
- for tool_dependency in self.tool_dependencies:
- if tool_dependency.status in [ ToolDependency.installation_status.INSTALLED, ToolDependency.installation_status.ERROR ]:
- installed_dependencies.append( tool_dependency )
- return installed_dependencies
- @property
- def missing_tool_dependencies( self ):
- """Return the repository's tool dependencies that are not currently installed, and may not ever have been installed."""
- missing_dependencies = []
- for tool_dependency in self.tool_dependencies:
- if tool_dependency.status not in [ ToolDependency.installation_status.INSTALLED ]:
- missing_dependencies.append( tool_dependency )
- return missing_dependencies
+
+ def set_shed_config_filename( self, value ):
+ self.metadata[ 'shed_config_filename' ] = value
+
+ shed_config_filename = property( get_shed_config_filename, set_shed_config_filename )
+
@property
def tool_dependencies_being_installed( self ):
dependencies_being_installed = []
@@ -3615,6 +3657,7 @@
if tool_dependency.status == ToolDependency.installation_status.INSTALLING:
dependencies_being_installed.append( tool_dependency )
return dependencies_being_installed
+
@property
def tool_dependencies_missing_or_being_installed( self ):
dependencies_missing_or_being_installed = []
@@ -3625,6 +3668,7 @@
ToolDependency.installation_status.UNINSTALLED ]:
dependencies_missing_or_being_installed.append( tool_dependency )
return dependencies_missing_or_being_installed
+
@property
def tool_dependencies_with_installation_errors( self ):
dependencies_with_installation_errors = []
@@ -3632,6 +3676,24 @@
if tool_dependency.status == ToolDependency.installation_status.ERROR:
dependencies_with_installation_errors.append( tool_dependency )
return dependencies_with_installation_errors
+
+ @property
+ def tool_shed_path_name( self ):
+ tool_shed_url = self.tool_shed
+ if tool_shed_url.find( ':' ) > 0:
+ # Eliminate the port, if any, since it will result in an invalid directory name.
+ tool_shed_url = tool_shed_url.split( ':' )[ 0 ]
+ return tool_shed_url.rstrip( '/' )
+
+ @property
+ def uninstalled_repository_dependencies( self ):
+ """Return the repository's repository dependencies that have been uninstalled."""
+ uninstalled_required_repositories = []
+ for required_repository in self.repository_dependencies:
+ if required_repository.status == self.installation_status.UNINSTALLED:
+ uninstalled_required_repositories.append( required_repository )
+ return uninstalled_required_repositories
+
@property
def uninstalled_tool_dependencies( self ):
"""Return the repository's tool dependencies that have been uninstalled."""
@@ -3641,11 +3703,22 @@
uninstalled_tool_dependencies.append( tool_dependency )
return uninstalled_tool_dependencies
+ @property
+ def upgrade_available( self ):
+ if self.tool_shed_status:
+ if self.is_deprecated_in_tool_shed:
+ # Only allow revision upgrades if the repository is not deprecated in the tool shed.
+ return False
+ return galaxy.util.asbool( self.tool_shed_status.get( 'revision_upgrade', False ) )
+ return False
+
+
class RepositoryRepositoryDependencyAssociation( object ):
def __init__( self, tool_shed_repository_id=None, repository_dependency_id=None ):
self.tool_shed_repository_id = tool_shed_repository_id
self.repository_dependency_id = repository_dependency_id
+
class RepositoryDependency( object ):
def __init__( self, tool_shed_repository_id=None ):
self.tool_shed_repository_id = tool_shed_repository_id
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/galaxy/model/mapping.py
--- a/lib/galaxy/model/mapping.py
+++ b/lib/galaxy/model/mapping.py
@@ -405,7 +405,7 @@
Column( "ctx_rev", TrimmedString( 10 ) ),
Column( "metadata", JSONType, nullable=True ),
Column( "includes_datatypes", Boolean, index=True, default=False ),
- Column( "update_available", Boolean, default=False ),
+ Column( "tool_shed_status", JSONType, nullable=True ),
Column( "deleted", Boolean, index=True, default=False ),
Column( "uninstalled", Boolean, default=False ),
Column( "dist_to_shed", Boolean, default=False ),
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/galaxy/model/migrate/versions/0116_drop_update_available_col_add_tool_shed_status_col.py
--- /dev/null
+++ b/lib/galaxy/model/migrate/versions/0116_drop_update_available_col_add_tool_shed_status_col.py
@@ -0,0 +1,77 @@
+"""
+Migration script to drop the update_available Boolean column and replace it with the tool_shed_status JSONType column in the tool_shed_repository table.
+"""
+
+from sqlalchemy import *
+from sqlalchemy.orm import *
+from migrate import *
+from migrate.changeset import *
+import sys, logging
+from galaxy.model.custom_types import *
+from sqlalchemy.exc import *
+import datetime
+now = datetime.datetime.utcnow
+
+log = logging.getLogger( __name__ )
+log.setLevel( logging.DEBUG )
+handler = logging.StreamHandler( sys.stdout )
+format = "%(name)s %(levelname)s %(asctime)s %(message)s"
+formatter = logging.Formatter( format )
+handler.setFormatter( formatter )
+log.addHandler( handler )
+
+metadata = MetaData()
+
+def default_false( migrate_engine ):
+ if migrate_engine.name == 'mysql' or migrate_engine.name == 'sqlite':
+ return "0"
+ elif migrate_engine.name in [ 'postgresql', 'postgres' ]:
+ return "false"
+
+def upgrade( migrate_engine ):
+ metadata.bind = migrate_engine
+ print __doc__
+ metadata.reflect()
+ try:
+ ToolShedRepository_table = Table( "tool_shed_repository", metadata, autoload=True )
+ except NoSuchTableError:
+ ToolShedRepository_table = None
+ log.debug( "Failed loading table tool_shed_repository" )
+ if ToolShedRepository_table is not None:
+ # For some unknown reason it is no longer possible to drop a column in a migration script if using the sqlite database.
+ if migrate_engine.name != 'sqlite':
+ try:
+ col = ToolShedRepository_table.c.update_available
+ col.drop()
+ except Exception, e:
+ print "Dropping column update_available from the tool_shed_repository table failed: %s" % str( e )
+ c = Column( "tool_shed_status", JSONType, nullable=True )
+ try:
+ c.create( ToolShedRepository_table )
+ assert c is ToolShedRepository_table.c.tool_shed_status
+ except Exception, e:
+ print "Adding tool_shed_status column to the tool_shed_repository table failed: %s" % str( e )
+
+def downgrade( migrate_engine ):
+ metadata.bind = migrate_engine
+ metadata.reflect()
+ try:
+ ToolShedRepository_table = Table( "tool_shed_repository", metadata, autoload=True )
+ except NoSuchTableError:
+ ToolShedRepository_table = None
+ log.debug( "Failed loading table tool_shed_repository" )
+ if ToolShedRepository_table is not None:
+ # For some unknown reason it is no longer possible to drop a column in a migration script if using the sqlite database.
+ if migrate_engine.name != 'sqlite':
+ try:
+ col = ToolShedRepository_table.c.tool_shed_status
+ col.drop()
+ except Exception, e:
+ print "Dropping column tool_shed_status from the tool_shed_repository table failed: %s" % str( e )
+ c = Column( "update_available", Boolean, default=False )
+ try:
+ c.create( ToolShedRepository_table )
+ assert c is ToolShedRepository_table.c.update_available
+ migrate_engine.execute( "UPDATE tool_shed_repository SET update_available=%s" % default_false( migrate_engine ) )
+ except Exception, e:
+ print "Adding column update_available to the tool_shed_repository table failed: %s" % str( e )
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/galaxy/web/framework/helpers/grids.py
--- a/lib/galaxy/web/framework/helpers/grids.py
+++ b/lib/galaxy/web/framework/helpers/grids.py
@@ -39,6 +39,7 @@
cur_filter_pref_name = ".filter"
cur_sort_key_pref_name = ".sort_key"
pass_through_operations = {}
+ legend = None
def __init__( self ):
# Determine if any multiple row operations are defined
self.has_multiple_item_operations = False
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
--- a/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
+++ b/lib/galaxy/webapps/galaxy/controllers/admin_toolshed.py
@@ -94,6 +94,10 @@
return trans.response.send_redirect( web.url_for( controller='admin_toolshed',
action='check_for_updates',
**kwd ) )
+ if operation == "update tool shed status":
+ return trans.response.send_redirect( web.url_for( controller='admin_toolshed',
+ action='update_tool_shed_status_for_installed_repository',
+ **kwd ) )
if operation == "reset to install":
kwd[ 'reset_repository' ] = True
return trans.response.send_redirect( web.url_for( controller='admin_toolshed',
@@ -146,8 +150,6 @@
return trans.response.send_redirect( web.url_for( controller='admin_toolshed',
action='deactivate_or_uninstall_repository',
**kwd ) )
- if 'message' not in kwd or not kwd[ 'message' ]:
- kwd[ 'message' ] = 'Names of repositories for which updates are available are highlighted in yellow.'
return self.installed_repository_grid( trans, **kwd )
@web.expose
@@ -1627,10 +1629,15 @@
updating_installed_repository=True,
persist=True )
repository.metadata = metadata_dict
- # Update the repository changeset_revision in the database.
+ # Update the repository.changeset_revision column in the database.
repository.changeset_revision = latest_changeset_revision
repository.ctx_rev = latest_ctx_rev
- repository.update_available = False
+ # Update the repository.tool_shed_status column in the database.
+ tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( trans.app, repository )
+ if tool_shed_status_dict:
+ repository.tool_shed_status = tool_shed_status_dict
+ else:
+ repository.tool_shed_status = None
trans.sa_session.add( repository )
trans.sa_session.flush()
if 'tools' in metadata_dict:
@@ -1678,6 +1685,46 @@
@web.expose
@web.require_admin
+ def update_tool_shed_status_for_installed_repository( self, trans, all_installed_repositories=False, **kwd ):
+ message = kwd.get( 'message', '' )
+ status = kwd.get( 'status', 'done' )
+ if all_installed_repositories:
+ success_count = 0
+ repository_names_not_updated = []
+ updated_count = 0
+ for repository in trans.sa_session.query( trans.model.ToolShedRepository ) \
+ .filter( trans.model.ToolShedRepository.table.c.deleted == False ):
+ ok, updated = suc.check_or_update_tool_shed_status_for_installed_repository( trans, repository )
+ if ok:
+ success_count += 1
+ else:
+ repository_names_not_updated.append( '<b>%s</b>' % str( repository.name ) )
+ if updated:
+ updated_count += 1
+ message = "Checked the status in the tool shed for %d repositories. " % success_count
+ message += "Updated the tool shed status for %d repositories. " % updated_count
+ if repository_names_not_updated:
+ message += "Unable to retrieve status from the tool shed for the following repositories:\n"
+ message += ", ".join( repository_names_not_updated )
+ else:
+ repository_id = kwd.get( 'id', None )
+ repository = suc.get_tool_shed_repository_by_id( trans, repository_id )
+ ok, updated = suc.check_or_update_tool_shed_status_for_installed_repository( trans, repository )
+ if ok:
+ if updated:
+ message = "The tool shed status for repository <b>%s</b> has been updated." % str( repository.name )
+ else:
+ message = "The status has not changed in the tool shed for repository <b>%s</b>." % str( repository.name )
+ else:
+ message = "Unable to retrieve status from the tool shed for repository <b>%s</b>." % str( repository.name )
+ status = 'error'
+ return trans.response.send_redirect( web.url_for( controller='admin_toolshed',
+ action='browse_repositories',
+ message=message,
+ status=status ) )
+
+ @web.expose
+ @web.require_admin
def view_tool_metadata( self, trans, repository_id, tool_id, **kwd ):
message = kwd.get( 'message', '' )
status = kwd.get( 'status', 'done' )
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/galaxy/webapps/tool_shed/controllers/repository.py
--- a/lib/galaxy/webapps/tool_shed/controllers/repository.py
+++ b/lib/galaxy/webapps/tool_shed/controllers/repository.py
@@ -2651,6 +2651,59 @@
owner=owner ) )
@web.expose
+ def status_for_installed_repository( self, trans, **kwd ):
+ """
+ Handle a request from a local Galaxy instance, returning a dictionary with boolean values for whether there are updates available
+ for the repository revision, newer installable revisions available, the revision is the latest installable revision, or if the repository
+ is deprecated.
+ """
+ name = kwd.get( 'name', None )
+ owner = kwd.get( 'owner', None )
+ changeset_revision = kwd.get( 'changeset_revision', None )
+ repository = suc.get_repository_by_name_and_owner( trans.app, name, owner )
+ if repository:
+ repo_dir = repository.repo_path( trans.app )
+ repo = hg.repository( suc.get_configured_ui(), repo_dir )
+ tool_shed_status_dict = {}
+ # Handle repository deprecation.
+ tool_shed_status_dict[ 'repository_deprecated' ] = str( repository.deprecated )
+ # Handle latest installable revision.
+ if changeset_revision == repository.tip( trans.app ):
+ tool_shed_status_dict[ 'latest_installable_revision' ] = 'True'
+ else:
+ next_installable_revision = suc.get_next_downloadable_changeset_revision( repository, repo, changeset_revision )
+ if next_installable_revision:
+ tool_shed_status_dict[ 'latest_installable_revision' ] = 'False'
+ else:
+ tool_shed_status_dict[ 'latest_installable_revision' ] = 'True'
+ # Handle revision updates.
+ if changeset_revision == repository.tip( trans.app ):
+ tool_shed_status_dict[ 'revision_update' ] = 'False'
+ else:
+ repository_metadata = suc.get_repository_metadata_by_changeset_revision( trans,
+ trans.security.encode_id( repository.id ),
+ changeset_revision )
+ if repository_metadata:
+ tool_shed_status_dict[ 'revision_update' ] = 'False'
+ else:
+ tool_shed_status_dict[ 'revision_update' ] = 'True'
+ # Handle revision upgrades.
+ ordered_metadata_changeset_revisions = suc.get_ordered_metadata_changeset_revisions( repository, repo, downloadable=True )
+ num_metadata_revisions = len( ordered_metadata_changeset_revisions )
+ for index, metadata_changeset_revision in enumerate( ordered_metadata_changeset_revisions ):
+ if index == num_metadata_revisions:
+ tool_shed_status_dict[ 'revision_upgrade' ] = 'False'
+ break
+ if metadata_changeset_revision == changeset_revision:
+ if num_metadata_revisions - index > 0:
+ tool_shed_status_dict[ 'revision_upgrade' ] = 'True'
+ else:
+ tool_shed_status_dict[ 'revision_upgrade' ] = 'False'
+ break
+ return encoding_util.tool_shed_encode( tool_shed_status_dict )
+ return encoding_util.tool_shed_encode( {} )
+
+ @web.expose
def updated_changeset_revisions( self, trans, **kwd ):
"""
Handle a request from a local Galaxy instance to retrieve the list of changeset revisions to which an installed repository can be updated. This
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/tool_shed/galaxy_install/grids/admin_toolshed_grids.py
--- a/lib/tool_shed/galaxy_install/grids/admin_toolshed_grids.py
+++ b/lib/tool_shed/galaxy_install/grids/admin_toolshed_grids.py
@@ -3,38 +3,90 @@
from galaxy import model, util
from galaxy.web.framework.helpers import iff, grids
from galaxy.model.orm import or_
+import tool_shed.util.shed_util_common as suc
from tool_shed.util import tool_dependency_util
log = logging.getLogger( __name__ )
+def generate_deprecated_repository_img_str( include_mouse_over=False ):
+ if include_mouse_over:
+ deprecated_tip_str = 'class="icon-button tooltip" title="This repository is deprecated in the Tool Shed"'
+ else:
+ deprecated_tip_str = ''
+ return '<img src="/static/images/icon_error_sml.gif" %s/>' % deprecated_tip_str
+
+def generate_latest_revision_img_str( include_mouse_over=False ):
+ if include_mouse_over:
+ latest_revision_tip_str = 'class="icon-button tooltip" title="This is the latest installable revision of this repository"'
+ else:
+ latest_revision_tip_str = ''
+ return '<img src="/static/june_2007_style/blue/ok_small.png" %s/>' % latest_revision_tip_str
+
+def generate_revision_updates_img_str( include_mouse_over=False ):
+ if include_mouse_over:
+ revision_updates_tip_str = 'class="icon-button tooltip" title="Updates are available in the Tool Shed for this revision"'
+ else:
+ revision_updates_tip_str = ''
+ return '<img src="/static/images/icon_warning_sml.gif" %s/>' % revision_updates_tip_str
+
+def generate_revision_upgrades_img_str( include_mouse_over=False ):
+ if include_mouse_over:
+ revision_upgrades_tip_str = 'class="icon-button tooltip" title="A newer installable revision is available for this repository"'
+ else:
+ revision_upgrades_tip_str = ''
+ return '<img src="/static/images/up.gif" %s/>' % revision_upgrades_tip_str
+
+def generate_unknown_img_str( include_mouse_over=False ):
+ if include_mouse_over:
+ unknown_tip_str = 'class="icon-button tooltip" title="Unable to get information from the Tool Shed"'
+ else:
+ unknown_tip_str = ''
+ return '<img src="/static/june_2007_style/blue/question-octagon-frame.png" %s/>' % unknown_tip_str
+
class InstalledRepositoryGrid( grids.Grid ):
+ class ToolShedStatusColumn( grids.TextColumn ):
+
+ def get_value( self, trans, grid, tool_shed_repository ):
+ if tool_shed_repository.tool_shed_status:
+ tool_shed_status_str = ''
+ if tool_shed_repository.is_deprecated_in_tool_shed:
+ tool_shed_status_str += generate_deprecated_repository_img_str( include_mouse_over=True )
+ if tool_shed_repository.is_latest_installable_revision:
+ tool_shed_status_str += generate_latest_revision_img_str( include_mouse_over=True )
+ if tool_shed_repository.revision_update_available:
+ tool_shed_status_str += generate_revision_updates_img_str( include_mouse_over=True )
+ if tool_shed_repository.upgrade_available:
+ tool_shed_status_str += generate_revision_upgrades_img_str( include_mouse_over=True )
+ else:
+ tool_shed_status_str = generate_unknown_img_str( include_mouse_over=True )
+ return tool_shed_status_str
+
+
class NameColumn( grids.TextColumn ):
def get_value( self, trans, grid, tool_shed_repository ):
- if tool_shed_repository.update_available:
- return '<div class="count-box state-color-running">%s</div>' % tool_shed_repository.name
- return tool_shed_repository.name
+ return str( tool_shed_repository.name )
class DescriptionColumn( grids.TextColumn ):
def get_value( self, trans, grid, tool_shed_repository ):
- return tool_shed_repository.description
+ return util.unicodify( tool_shed_repository.description )
class OwnerColumn( grids.TextColumn ):
def get_value( self, trans, grid, tool_shed_repository ):
- return tool_shed_repository.owner
+ return str( tool_shed_repository.owner )
class RevisionColumn( grids.TextColumn ):
def get_value( self, trans, grid, tool_shed_repository ):
- return tool_shed_repository.changeset_revision
+ return str( tool_shed_repository.changeset_revision )
class StatusColumn( grids.TextColumn ):
@@ -91,6 +143,8 @@
template='/admin/tool_shed_repository/grid.mako'
default_sort_key = "name"
columns = [
+ ToolShedStatusColumn( "",
+ attach_popup=False ),
NameColumn( "Name",
key="name",
link=( lambda item: iff( item.status in [ model.ToolShedRepository.installation_status.CLONING ],
@@ -114,10 +168,18 @@
key="free-text-search",
visible=False,
filterable="standard" ) )
- global_actions = []
- operations = [ grids.GridOperation( "Get updates",
+ global_actions = [
+ grids.GridAction( "Update tool shed status",
+ dict( controller='admin_toolshed', action='update_tool_shed_status_for_installed_repository', all_installed_repositories=True ) )
+ ]
+ operations = [ grids.GridOperation( "Update tool shed status",
allow_multiple=False,
- condition=( lambda item: not item.deleted and item.status not in \
+ condition=( lambda item: not item.deleted ),
+ async_compatible=False,
+ url_args=dict( controller='admin_toolshed', action='browse_repositories', operation='update tool shed status' ) ),
+ grids.GridOperation( "Get updates",
+ allow_multiple=False,
+ condition=( lambda item: not item.deleted and item.revision_update_available and item.status not in \
[ model.ToolShedRepository.installation_status.ERROR, model.ToolShedRepository.installation_status.NEW ] ),
async_compatible=False,
url_args=dict( controller='admin_toolshed', action='browse_repositories', operation='get updates' ) ),
@@ -151,6 +213,15 @@
def build_initial_query( self, trans, **kwd ):
return trans.sa_session.query( self.model_class )
+ @property
+ def legend( self ):
+ legend_str = '%s Updates are available in the Tool Shed for this revision<br/>' % generate_revision_updates_img_str()
+ legend_str += '%s A newer installable revision is available for this repository<br/>' % generate_revision_upgrades_img_str()
+ legend_str += '%s This is the latest installable revision of this repository<br/>' % generate_latest_revision_img_str()
+ legend_str += '%s This repository is deprecated in the Tool Shed<br/>' % generate_deprecated_repository_img_str()
+ legend_str += '%s Unable to get information from the Tool Shed<br/>' % generate_unknown_img_str()
+ return legend_str
+
class RepositoryInstallationGrid( grids.Grid ):
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
--- a/lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
+++ b/lib/tool_shed/galaxy_install/tool_dependencies/td_common_util.py
@@ -14,8 +14,15 @@
def clean_tool_shed_url( base_url ):
if base_url:
- protocol, base = base_url.split( '://' )
- return base.rstrip( '/' )
+ if base_url.find( '://' ) > -1:
+ try:
+ protocol, base = base_url.split( '://' )
+ except ValueError, e:
+ # The received base_url must be an invalid url.
+ log.debug( "Returning unchanged invalid base_url from td_common_util.clean_tool_shed_url: %s" % str( base_url ) )
+ return base_url
+ return base.rstrip( '/' )
+ return base_url.rstrip( '/' )
return base_url
def create_env_var_dict( elem, tool_dependency_install_dir=None, tool_shed_repository_install_dir=None ):
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/tool_shed/galaxy_install/update_manager.py
--- a/lib/tool_shed/galaxy_install/update_manager.py
+++ b/lib/tool_shed/galaxy_install/update_manager.py
@@ -26,29 +26,24 @@
def __restarter( self ):
log.info( 'Update manager restarter starting up...' )
while self.running:
- flush_needed = False
+ # Make a call to the tool shed for each installed repository to get the latest status information in the tool shed for the
+ # repository. This information includes items like newer installable repository revisions, current revision updates, whether
+ # the repository revision is the latest installable revision, and whether the repository has been deprecated in the tool shed.
for repository in self.sa_session.query( self.app.model.ToolShedRepository ) \
- .filter( and_( self.app.model.ToolShedRepository.table.c.update_available == False,
- self.app.model.ToolShedRepository.table.c.deleted == False ) ):
- if self.check_for_update( repository ):
- repository.update_available = True
- self.sa_session.add( repository )
- flush_needed = True
- if flush_needed:
- self.sa_session.flush()
+ .filter( self.app.model.ToolShedRepository.table.c.deleted == False ):
+ tool_shed_status_dict = suc.get_tool_shed_status_for_installed_repository( self.app, repository )
+ if tool_shed_status_dict:
+ if tool_shed_status_dict != repository.tool_shed_status:
+ repository.tool_shed_status = tool_shed_status_dict
+ self.sa_session.flush()
+ else:
+ # The received tool_shed_status_dict is an empty dictionary, so coerce to None.
+ tool_shed_status_dict = None
+ if tool_shed_status_dict != repository.tool_shed_status:
+ repository.tool_shed_status = tool_shed_status_dict
+ self.sa_session.flush()
self.sleeper.sleep( self.seconds_to_sleep )
- log.info( 'Transfer job restarter shutting down...' )
-
- def check_for_update( self, repository ):
- tool_shed_url = suc.get_url_from_tool_shed( self.app, repository.tool_shed )
- url = '%s/repository/check_for_updates?name=%s&owner=%s&changeset_revision=%s&from_update_manager=True' % \
- ( tool_shed_url, repository.name, repository.owner, repository.changeset_revision )
- try:
- text = common_util.tool_shed_get( self.app, tool_shed_url, url )
- except Exception, e:
- # The required tool shed may be unavailable.
- text = 'False'
- return string_as_bool( text )
+ log.info( 'Update manager restarter shutting down...' )
def shutdown( self ):
self.running = False
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/tool_shed/util/repository_dependency_util.py
--- a/lib/tool_shed/util/repository_dependency_util.py
+++ b/lib/tool_shed/util/repository_dependency_util.py
@@ -182,7 +182,7 @@
debug_msg = "Resetting tool_shed_repository '%s' for installation.\n" % str( installed_tool_shed_repository.name )
debug_msg += "The current state of the tool_shed_repository is:\n"
debug_msg += "deleted: %s\n" % str( installed_tool_shed_repository.deleted )
- debug_msg += "update_available: %s\n" % str( installed_tool_shed_repository.update_available )
+ debug_msg += "tool_shed_status: %s\n" % str( installed_tool_shed_repository.tool_shed_status )
debug_msg += "uninstalled: %s\n" % str( installed_tool_shed_repository.uninstalled )
debug_msg += "status: %s\n" % str( installed_tool_shed_repository.status )
debug_msg += "error_message: %s\n" % str( installed_tool_shed_repository.error_message )
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 lib/tool_shed/util/shed_util_common.py
--- a/lib/tool_shed/util/shed_util_common.py
+++ b/lib/tool_shed/util/shed_util_common.py
@@ -17,6 +17,7 @@
from galaxy.model.orm import or_
import sqlalchemy.orm.exc
from tool_shed.util import common_util
+from tool_shed.util import encoding_util
from tool_shed.util import xml_util
from xml.etree import ElementTree as XmlET
from galaxy import eggs
@@ -30,7 +31,7 @@
eggs.require( 'markupsafe' )
import markupsafe
-
+
log = logging.getLogger( __name__ )
CHUNK_SIZE = 2**20 # 1Mb
@@ -171,6 +172,20 @@
return True
return False
+def check_or_update_tool_shed_status_for_installed_repository( trans, repository ):
+ updated = False
+ tool_shed_status_dict = get_tool_shed_status_for_installed_repository( trans.app, repository )
+ if tool_shed_status_dict:
+ ok = True
+ if tool_shed_status_dict != repository.tool_shed_status:
+ repository.tool_shed_status = tool_shed_status_dict
+ trans.sa_session.add( repository )
+ trans.sa_session.flush()
+ updated = True
+ else:
+ ok = False
+ return ok, updated
+
def clean_repository_clone_url( repository_clone_url ):
"""Return a URL that can be used to clone a tool shed repository, eliminating the protocol and user if either exists."""
if repository_clone_url.find( '@' ) > 0:
@@ -194,7 +209,7 @@
return tool_shed_url.split( ':' )[ 0 ]
return tool_shed_url.rstrip( '/' )
-def clone_repository( repository_clone_url, repository_file_dir, ctx_rev ):
+def clone_repository( repository_clone_url, repository_file_dir, ctx_rev ):
"""Clone the repository up to the specified changeset_revision. No subsequent revisions will be present in the cloned repository."""
try:
commands.clone( get_configured_ui(),
@@ -248,7 +263,7 @@
# was later uninstalled, this value should be received as the value of that change set to which the repository had been updated just prior
# to it being uninstalled.
current_changeset_revision = installed_changeset_revision
- sa_session = app.model.context.current
+ sa_session = app.model.context.current
tool_shed = get_tool_shed_from_clone_url( repository_clone_url )
if not owner:
owner = get_repository_owner_from_clone_url( repository_clone_url )
@@ -322,7 +337,6 @@
elem = XmlET.Element( 'tool_shed_repository' )
else:
elem = XmlET.SubElement( parent_elem, 'tool_shed_repository' )
-
tool_shed_elem = XmlET.SubElement( elem, 'tool_shed' )
tool_shed_elem.text = tool_shed
repository_name_elem = XmlET.SubElement( elem, 'repository_name' )
@@ -337,10 +351,14 @@
new_elem = XmlET.SubElement( elem, key )
new_elem.text = value
return elem
-
+
def generate_repository_info_elem_from_repository( tool_shed_repository, parent_elem=None, **kwd ):
- return generate_repository_info_elem( tool_shed_repository.tool_shed, tool_shed_repository.name, tool_shed_repository.installed_changeset_revision, tool_shed_repository.owner, parent_elem=parent_elem, **kwd )
-
+ return generate_repository_info_elem( tool_shed_repository.tool_shed,
+ tool_shed_repository.name,
+ tool_shed_repository.installed_changeset_revision,
+ tool_shed_repository.owner,
+ parent_elem=parent_elem,
+ **kwd )
def generate_sharable_link_for_repository_in_tool_shed( trans, repository, changeset_revision=None ):
"""Generate the URL for sharing a repository that is in the tool shed."""
@@ -376,7 +394,7 @@
def generate_tool_guid( repository_clone_url, tool ):
"""
Generate a guid for the installed tool. It is critical that this guid matches the guid for
- the tool in the Galaxy tool shed from which it is being installed. The form of the guid is
+ the tool in the Galaxy tool shed from which it is being installed. The form of the guid is
<tool shed host>/repos/<repository owner>/<repository name>/<tool id>/<tool version>
"""
tmp_url = clean_repository_clone_url( repository_clone_url )
@@ -399,7 +417,7 @@
tool_config = tool_dict[ 'tool_config' ]
file_name = strip_path( tool_config )
guids_and_configs[ guid ] = file_name
- # Parse the shed_tool_conf file in which all of this repository's tools are defined and generate the tool_panel_dict.
+ # Parse the shed_tool_conf file in which all of this repository's tools are defined and generate the tool_panel_dict.
tree, error_message = xml_util.parse_xml( shed_tool_conf )
if tree is None:
return tool_panel_dict
@@ -517,7 +535,7 @@
Send a request to the tool shed to retrieve the ctx_rev for a repository defined by the combination of a name, owner and changeset
revision.
"""
- url = url_join( tool_shed_url,
+ url = url_join( tool_shed_url,
'repository/get_ctx_rev?name=%s&owner=%s&changeset_revision=%s' % ( name, owner, changeset_revision ) )
ctx_rev = common_util.tool_shed_get( app, tool_shed_url, url )
return ctx_rev
@@ -1105,6 +1123,10 @@
return shed_tool_conf_dict[ 'tool_path' ]
return None
+def get_tool_shed_from_clone_url( repository_clone_url ):
+ tmp_url = clean_repository_clone_url( repository_clone_url )
+ return tmp_url.split( '/repos/' )[ 0 ].rstrip( '/' )
+
def get_tool_shed_repository_by_id( trans, repository_id ):
"""Return a tool shed repository database record defined by the id."""
# This method is used only in Galaxy, not the tool shed.
@@ -1140,9 +1162,23 @@
app.model.ToolShedRepository.table.c.installed_changeset_revision == installed_changeset_revision ) ) \
.first()
-def get_tool_shed_from_clone_url( repository_clone_url ):
- tmp_url = clean_repository_clone_url( repository_clone_url )
- return tmp_url.split( '/repos/' )[ 0 ].rstrip( '/' )
+def get_tool_shed_status_for_installed_repository( app, repository ):
+ """
+ Send a request to the tool shed to retrieve information about newer installable repository revisions, current revision updates,
+ whether the repository revision is the latest downloadable revision, and whether the repository has been deprecated in the tool shed.
+ The received repository is a ToolShedRepository object from Galaxy.
+ """
+ tool_shed_url = get_url_from_tool_shed( app, repository.tool_shed )
+ url = url_join( tool_shed_url,
+ 'repository/status_for_installed_repository?name=%s&owner=%s&changeset_revision=%s' % \
+ ( repository.name, repository.owner, repository.changeset_revision ) )
+ try:
+ encoded_tool_shed_status_dict = common_util.tool_shed_get( app, tool_shed_url, url )
+ tool_shed_status_dict = encoding_util.tool_shed_decode( encoded_tool_shed_status_dict )
+ except Exception, e:
+ log.exception( "Error attemtping to get tool shed status for installed repository %s: %s" % ( str( repository.name ), str( e ) ) )
+ return {}
+ return tool_shed_status_dict
def get_url_from_tool_shed( app, tool_shed ):
"""
@@ -1349,11 +1385,11 @@
def reset_previously_installed_repository( trans, repository ):
"""
- Reset the atrributes of a tool_shed_repository that was previsouly installed. The repository will be in some state other than with a
- status of INSTALLED, so all atributes will be set to the default NEW state. This will enable the repository to be freshly installed.
+ Reset the atrributes of a tool_shed_repository that was previsouly installed. The repository will be in some state other than with a
+ status of INSTALLED, so all atributes will be set to the default (NEW( state. This will enable the repository to be freshly installed.
"""
repository.deleted = False
- repository.update_available = False
+ repository.tool_shed_status = None
repository.uninstalled = False
repository.status = trans.model.ToolShedRepository.installation_status.NEW
repository.error_message = None
diff -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 -r 8f6f926f912e2aea3f11905498fd5f04f857ec97 templates/grid_base.mako
--- a/templates/grid_base.mako
+++ b/templates/grid_base.mako
@@ -454,5 +454,12 @@
</td></tr>
%endif
+ %if grid.legend:
+ <tr>
+ <td colspan="100">
+ ${grid.legend}
+ </td>
+ </tr>
+ %endif
</%def>
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Strip trailing whitespace (and windows line endings) from all python files in lib
by commits-noreply@bitbucket.org 29 Aug '13
by commits-noreply@bitbucket.org 29 Aug '13
29 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/be3b0358acc9/
Changeset: be3b0358acc9
User: dannon
Date: 2013-08-30 05:39:52
Summary: Strip trailing whitespace (and windows line endings) from all python files in lib
Affected #: 256 files
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/app.py
--- a/lib/galaxy/app.py
+++ b/lib/galaxy/app.py
@@ -186,6 +186,6 @@
def configure_fluent_log( self ):
if self.config.fluent_log:
from galaxy.util.log.fluent_log import FluentTraceLogger
- self.trace_logger = FluentTraceLogger( 'galaxy', self.config.fluent_host, self.config.fluent_port )
+ self.trace_logger = FluentTraceLogger( 'galaxy', self.config.fluent_host, self.config.fluent_port )
else:
self.trace_logger = None
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/config.py
--- a/lib/galaxy/config.py
+++ b/lib/galaxy/config.py
@@ -282,7 +282,7 @@
self.biostar_url = kwargs.get( 'biostar_url', None )
self.biostar_key_name = kwargs.get( 'biostar_key_name', None )
self.biostar_key = kwargs.get( 'biostar_key', None )
- # Experimental: This will not be enabled by default and will hide
+ # Experimental: This will not be enabled by default and will hide
# nonproduction code.
# The api_folders refers to whether the API exposes the /folders section.
self.api_folders = string_as_bool( kwargs.get( 'api_folders', False ) )
@@ -302,7 +302,7 @@
@property
def sentry_dsn_public( self ):
"""
- Sentry URL with private key removed for use in client side scripts,
+ Sentry URL with private key removed for use in client side scripts,
sentry server will need to be configured to accept events
"""
if self.sentry_dsn:
@@ -436,8 +436,8 @@
"""
# Get root logger
root = logging.getLogger()
- # PasteScript will have already configured the logger if the
- # 'loggers' section was found in the config file, otherwise we do
+ # PasteScript will have already configured the logger if the
+ # 'loggers' section was found in the config file, otherwise we do
# some simple setup using the 'log_*' values from the config.
if not config.global_conf_parser.has_section( "loggers" ):
format = config.get( "log_format", "%(name)s %(levelname)s %(asctime)s %(message)s" )
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/assembly.py
--- a/lib/galaxy/datatypes/assembly.py
+++ b/lib/galaxy/datatypes/assembly.py
@@ -168,7 +168,7 @@
def regenerate_primary_file(self,dataset):
"""
- cannot do this until we are setting metadata
+ cannot do this until we are setting metadata
"""
log.debug( "Velvet log info %s" % 'JJ regenerate_primary_file')
gen_msg = ''
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/binary.py
--- a/lib/galaxy/datatypes/binary.py
+++ b/lib/galaxy/datatypes/binary.py
@@ -203,7 +203,7 @@
stderr = open( stderr_name ).read().strip()
if stderr:
if exit_code != 0:
- shutil.rmtree( tmp_dir) #clean up
+ shutil.rmtree( tmp_dir) #clean up
raise Exception, "Error Grooming BAM file contents: %s" % stderr
else:
print stderr
@@ -231,7 +231,7 @@
stderr = open( stderr_name ).read().strip()
if stderr:
if exit_code != 0:
- os.unlink( stderr_name ) #clean up
+ os.unlink( stderr_name ) #clean up
raise Exception, "Error Setting BAM Metadata: %s" % stderr
else:
print stderr
@@ -240,7 +240,7 @@
os.unlink( stderr_name )
def sniff( self, filename ):
# BAM is compressed in the BGZF format, and must not be uncompressed in Galaxy.
- # The first 4 bytes of any bam file is 'BAM\1', and the file is binary.
+ # The first 4 bytes of any bam file is 'BAM\1', and the file is binary.
try:
header = gzip.open( filename ).read(4)
if binascii.b2a_hex( header ) == binascii.hexlify( 'BAM\1' ):
@@ -250,7 +250,7 @@
return False
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
- dataset.peek = "Binary bam alignments file"
+ dataset.peek = "Binary bam alignments file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
@@ -278,7 +278,7 @@
samtools_source = dataproviders.dataset.SamtoolsDataProvider( dataset )
settings[ 'comment_char' ] = '@'
return dataproviders.line.RegexLineDataProvider( samtools_source, **settings )
-
+
@dataproviders.decorators.dataprovider_factory( 'column', dataproviders.column.ColumnarDataProvider.settings )
def column_dataprovider( self, dataset, **settings ):
samtools_source = dataproviders.dataset.SamtoolsDataProvider( dataset )
@@ -352,7 +352,7 @@
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
- dataset.peek = "Binary h5 file"
+ dataset.peek = "Binary h5 file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
@@ -372,7 +372,7 @@
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
- dataset.peek = "Binary scf sequence file"
+ dataset.peek = "Binary scf sequence file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
@@ -404,7 +404,7 @@
return False
def set_peek( self, dataset, is_multi_byte=False ):
if not dataset.dataset.purged:
- dataset.peek = "Binary sff file"
+ dataset.peek = "Binary sff file"
dataset.blurb = data.nice_size( dataset.get_size() )
else:
dataset.peek = 'file does not exist'
@@ -451,7 +451,7 @@
return dataset.peek
except:
return "Binary UCSC %s file (%s)" % ( self._name, data.nice_size( dataset.get_size() ) )
-
+
Binary.register_sniffable_binary_format("bigwig", "bigwig", BigWig)
@@ -470,9 +470,9 @@
class TwoBit (Binary):
"""Class describing a TwoBit format nucleotide file"""
-
+
file_ext = "twobit"
-
+
def sniff(self, filename):
try:
# All twobit files start with a 16-byte header. If the file is smaller than 16 bytes, it's obviously not a valid twobit file.
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/bedgraph_to_array_tree_converter.py
--- a/lib/galaxy/datatypes/converters/bedgraph_to_array_tree_converter.py
+++ b/lib/galaxy/datatypes/converters/bedgraph_to_array_tree_converter.py
@@ -23,13 +23,13 @@
if not line:
raise StopIteration()
if line.isspace():
- continue
+ continue
if line[0] == "#":
continue
if line[0].isalpha():
if line.startswith( "track" ) or line.startswith( "browser" ):
continue
-
+
feature = line.strip().split()
chrom = feature[0]
chrom_start = int(feature[1])
@@ -37,19 +37,19 @@
score = float(feature[3])
return chrom, chrom_start, chrom_end, None, score
def main():
-
+
input_fname = sys.argv[1]
out_fname = sys.argv[2]
-
+
reader = BedGraphReader( open( input_fname ) )
-
+
# Fill array from reader
d = array_tree_dict_from_reader( reader, {}, block_size = BLOCK_SIZE )
-
+
for array_tree in d.itervalues():
array_tree.root.build_summary()
-
+
FileArrayTreeDict.dict_to_file( d, open( out_fname, "w" ) )
-if __name__ == "__main__":
+if __name__ == "__main__":
main()
\ No newline at end of file
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/bgzip.py
--- a/lib/galaxy/datatypes/converters/bgzip.py
+++ b/lib/galaxy/datatypes/converters/bgzip.py
@@ -19,13 +19,13 @@
parser.add_option( '-P', '--preset', dest='preset' )
(options, args) = parser.parse_args()
input_fname, output_fname = args
-
+
tmpfile = tempfile.NamedTemporaryFile()
sort_params = None
-
+
if options.chrom_col and options.start_col and options.end_col:
- sort_params = ["sort",
- "-k%(i)s,%(i)s" % { 'i': options.chrom_col },
+ sort_params = ["sort",
+ "-k%(i)s,%(i)s" % { 'i': options.chrom_col },
"-k%(i)i,%(i)in" % { 'i': options.start_col },
"-k%(i)i,%(i)in" % { 'i': options.end_col }
]
@@ -40,9 +40,8 @@
after_sort = subprocess.Popen(sort_params, stdin=grepped.stdout, stderr=subprocess.PIPE, stdout=tmpfile )
grepped.stdout.close()
output, err = after_sort.communicate()
-
+
ctabix.tabix_compress(tmpfile.name, output_fname, force=True)
-
-if __name__ == "__main__":
+
+if __name__ == "__main__":
main()
-
\ No newline at end of file
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/fasta_to_len.py
--- a/lib/galaxy/datatypes/converters/fasta_to_len.py
+++ b/lib/galaxy/datatypes/converters/fasta_to_len.py
@@ -10,7 +10,7 @@
assert sys.version_info[:2] >= ( 2, 4 )
def compute_fasta_length( fasta_file, out_file, keep_first_char, keep_first_word=False ):
-
+
infile = fasta_file
out = open( out_file, 'w')
keep_first_char = int( keep_first_char )
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/fastq_to_fqtoc.py
--- a/lib/galaxy/datatypes/converters/fastq_to_fqtoc.py
+++ b/lib/galaxy/datatypes/converters/fastq_to_fqtoc.py
@@ -38,11 +38,11 @@
chunk_end = in_file.tell()
out_file.write('{"start":"%s","end":"%s","sequences":"%s"},' % (chunk_begin, chunk_end, sequences))
chunk_begin = chunk_end
-
+
chunk_end = in_file.tell()
out_file.write('{"start":"%s","end":"%s","sequences":"%s"}' % (chunk_begin, chunk_end, (current_line % lines_per_chunk) / 4))
out_file.write(']}\n')
-
-if __name__ == "__main__":
+
+if __name__ == "__main__":
main()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/fastqsolexa_to_fasta_converter.py
--- a/lib/galaxy/datatypes/converters/fastqsolexa_to_fasta_converter.py
+++ b/lib/galaxy/datatypes/converters/fastqsolexa_to_fasta_converter.py
@@ -7,7 +7,7 @@
1st line: @title_of_seq
2nd line: nucleotides
3rd line: +title_of_qualityscore (might be skipped)
-4th line: quality scores
+4th line: quality scores
(in three forms: a. digits, b. ASCII codes, the first char as the coding base, c. ASCII codes without the first char.)
Usage:
@@ -52,4 +52,4 @@
outfile.close()
-if __name__ == "__main__": __main__()
\ No newline at end of file
+if __name__ == "__main__": __main__()
\ No newline at end of file
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/fastqsolexa_to_qual_converter.py
--- a/lib/galaxy/datatypes/converters/fastqsolexa_to_qual_converter.py
+++ b/lib/galaxy/datatypes/converters/fastqsolexa_to_qual_converter.py
@@ -7,7 +7,7 @@
1st line: @title_of_seq
2nd line: nucleotides
3rd line: +title_of_qualityscore (might be skipped)
-4th line: quality scores
+4th line: quality scores
(in three forms: a. digits, b. ASCII codes, the first char as the coding base, c. ASCII codes without the first char.)
Usage:
@@ -30,7 +30,7 @@
seq_title_startswith = ''
default_coding_value = 64
fastq_block_lines = 0
-
+
for i, line in enumerate( file( infile_name ) ):
line = line.rstrip()
if not line or line.startswith( '#' ):
@@ -52,7 +52,7 @@
if not qual_title_startswith:
qual_title_startswith = line_startswith
if line_startswith != qual_title_startswith:
- stop_err( 'Invalid fastqsolexa format at line %d: %s.' % ( i + 1, line ) )
+ stop_err( 'Invalid fastqsolexa format at line %d: %s.' % ( i + 1, line ) )
quality_title = line[1:]
if quality_title and read_title != quality_title:
stop_err( 'Invalid fastqsolexa format at line %d: sequence title "%s" differes from score title "%s".' % ( i + 1, read_title, quality_title ) )
@@ -67,15 +67,15 @@
# peek: ascii or digits?
val = line.split()[0]
- try:
+ try:
check = int( val )
fastq_integer = True
except:
fastq_integer = False
-
+
if fastq_integer: # digits
qual = line
- else:
+ else:
# ascii
quality_score_length = len( line )
if quality_score_length == read_length + 1:
@@ -89,8 +89,7 @@
score = ord( char ) - quality_score_startswith # 64
qual = "%s%s " % ( qual, str( score ) )
outfile_score.write( '%s\n' % qual )
-
+
outfile_score.close()
-if __name__ == "__main__": __main__()
-
\ No newline at end of file
+if __name__ == "__main__": __main__()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/gff_to_interval_index_converter.py
--- a/lib/galaxy/datatypes/converters/gff_to_interval_index_converter.py
+++ b/lib/galaxy/datatypes/converters/gff_to_interval_index_converter.py
@@ -18,23 +18,22 @@
def main():
# Arguments
input_fname, out_fname = sys.argv[1:]
-
+
# Do conversion.
index = Indexes()
offset = 0
reader_wrapper = GFFReaderWrapper( fileinput.FileInput( input_fname ), fix_strand=True )
- for feature in list( reader_wrapper ):
+ for feature in list( reader_wrapper ):
# Add feature; index expects BED coordinates.
if isinstance( feature, GenomicInterval ):
convert_gff_coords_to_bed( feature )
index.add( feature.chrom, feature.start, feature.end, offset )
-
+
# Always increment offset, even if feature is not an interval and hence
# not included in the index.
offset += feature.raw_size
index.write( open(out_fname, "w") )
-
-if __name__ == "__main__":
+
+if __name__ == "__main__":
main()
-
\ No newline at end of file
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/interval_to_bed_converter.py
--- a/lib/galaxy/datatypes/converters/interval_to_bed_converter.py
+++ b/lib/galaxy/datatypes/converters/interval_to_bed_converter.py
@@ -1,62 +1,62 @@
-#!/usr/bin/env python
-#Dan Blankenberg
-
-import sys
-from galaxy import eggs
-import pkg_resources; pkg_resources.require( "bx-python" )
-import bx.intervals.io
-
-assert sys.version_info[:2] >= ( 2, 4 )
-
-def stop_err( msg ):
- sys.stderr.write( msg )
- sys.exit()
-
-def __main__():
- output_name = sys.argv[1]
- input_name = sys.argv[2]
- try:
- chromCol = int( sys.argv[3] ) - 1
- except:
- stop_err( "'%s' is an invalid chrom column, correct the column settings before attempting to convert the data format." % str( sys.argv[3] ) )
- try:
- startCol = int( sys.argv[4] ) - 1
- except:
- stop_err( "'%s' is an invalid start column, correct the column settings before attempting to convert the data format." % str( sys.argv[4] ) )
- try:
- endCol = int( sys.argv[5] ) - 1
- except:
- stop_err( "'%s' is an invalid end column, correct the column settings before attempting to convert the data format." % str( sys.argv[5] ) )
- try:
- strandCol = int( sys.argv[6] ) - 1
- except:
- strandCol = -1
- try:
- nameCol = int( sys.argv[7] ) - 1
- except:
- nameCol = -1
- skipped_lines = 0
- first_skipped_line = 0
- out = open( output_name,'w' )
- count = 0
- for count, region in enumerate( bx.intervals.io.NiceReaderWrapper( open( input_name, 'r' ), chrom_col=chromCol, start_col=startCol, end_col=endCol, strand_col=strandCol, fix_strand=True, return_header=False, return_comments=False ) ):
- try:
- if nameCol >= 0:
- name = region.fields[nameCol]
- else:
- raise IndexError
- except:
- name = "region_%i" % count
- try:
-
- out.write( "%s\t%i\t%i\t%s\t%i\t%s\n" % ( region.chrom, region.start, region.end, name, 0, region.strand ) )
- except:
- skipped_lines += 1
- if not first_skipped_line:
- first_skipped_line = count + 1
- out.close()
- print "%i regions converted to BED." % ( count + 1 - skipped_lines )
- if skipped_lines > 0:
- print "Skipped %d blank or invalid lines starting with line # %d." % ( skipped_lines, first_skipped_line )
-
-if __name__ == "__main__": __main__()
+#!/usr/bin/env python
+#Dan Blankenberg
+
+import sys
+from galaxy import eggs
+import pkg_resources; pkg_resources.require( "bx-python" )
+import bx.intervals.io
+
+assert sys.version_info[:2] >= ( 2, 4 )
+
+def stop_err( msg ):
+ sys.stderr.write( msg )
+ sys.exit()
+
+def __main__():
+ output_name = sys.argv[1]
+ input_name = sys.argv[2]
+ try:
+ chromCol = int( sys.argv[3] ) - 1
+ except:
+ stop_err( "'%s' is an invalid chrom column, correct the column settings before attempting to convert the data format." % str( sys.argv[3] ) )
+ try:
+ startCol = int( sys.argv[4] ) - 1
+ except:
+ stop_err( "'%s' is an invalid start column, correct the column settings before attempting to convert the data format." % str( sys.argv[4] ) )
+ try:
+ endCol = int( sys.argv[5] ) - 1
+ except:
+ stop_err( "'%s' is an invalid end column, correct the column settings before attempting to convert the data format." % str( sys.argv[5] ) )
+ try:
+ strandCol = int( sys.argv[6] ) - 1
+ except:
+ strandCol = -1
+ try:
+ nameCol = int( sys.argv[7] ) - 1
+ except:
+ nameCol = -1
+ skipped_lines = 0
+ first_skipped_line = 0
+ out = open( output_name,'w' )
+ count = 0
+ for count, region in enumerate( bx.intervals.io.NiceReaderWrapper( open( input_name, 'r' ), chrom_col=chromCol, start_col=startCol, end_col=endCol, strand_col=strandCol, fix_strand=True, return_header=False, return_comments=False ) ):
+ try:
+ if nameCol >= 0:
+ name = region.fields[nameCol]
+ else:
+ raise IndexError
+ except:
+ name = "region_%i" % count
+ try:
+
+ out.write( "%s\t%i\t%i\t%s\t%i\t%s\n" % ( region.chrom, region.start, region.end, name, 0, region.strand ) )
+ except:
+ skipped_lines += 1
+ if not first_skipped_line:
+ first_skipped_line = count + 1
+ out.close()
+ print "%i regions converted to BED." % ( count + 1 - skipped_lines )
+ if skipped_lines > 0:
+ print "Skipped %d blank or invalid lines starting with line # %d." % ( skipped_lines, first_skipped_line )
+
+if __name__ == "__main__": __main__()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/interval_to_bedstrict_converter.py
--- a/lib/galaxy/datatypes/converters/interval_to_bedstrict_converter.py
+++ b/lib/galaxy/datatypes/converters/interval_to_bedstrict_converter.py
@@ -64,7 +64,7 @@
force_num_columns = int( sys.argv[9] )
except:
force_num_columns = None
-
+
skipped_lines = 0
first_skipped_line = None
out = open( output_name,'w' )
@@ -88,32 +88,32 @@
break
#name (fields[3]) can be anything, no verification needed
if len( fields ) > 4:
- float( fields[4] ) #score - A score between 0 and 1000. If the track line useScore attribute is set to 1 for this annotation data set, the score value will determine the level of gray in which this feature is displayed (higher numbers = darker gray).
+ float( fields[4] ) #score - A score between 0 and 1000. If the track line useScore attribute is set to 1 for this annotation data set, the score value will determine the level of gray in which this feature is displayed (higher numbers = darker gray).
if len( fields ) > 5:
- assert fields[5] in [ '+', '-' ], 'Invalid strand' #strand - Defines the strand - either '+' or '-'.
+ assert fields[5] in [ '+', '-' ], 'Invalid strand' #strand - Defines the strand - either '+' or '-'.
if len( fields ) > 6:
- int( fields[6] ) #thickStart - The starting position at which the feature is drawn thickly (for example, the start codon in gene displays).
+ int( fields[6] ) #thickStart - The starting position at which the feature is drawn thickly (for example, the start codon in gene displays).
if len( fields ) > 7:
- int( fields[7] ) #thickEnd - The ending position at which the feature is drawn thickly (for example, the stop codon in gene displays).
- if len( fields ) > 8:
+ int( fields[7] ) #thickEnd - The ending position at which the feature is drawn thickly (for example, the stop codon in gene displays).
+ if len( fields ) > 8:
if fields[8] != '0': #itemRgb - An RGB value of the form R,G,B (e.g. 255,0,0). If the track line itemRgb attribute is set to "On", this RBG value will determine the display color of the data contained in this BED line. NOTE: It is recommended that a simple color scheme (eight colors or less) be used with this attribute to avoid overwhelming the color resources of the Genome Browser and your Internet browser.
fields2 = fields[8].split( ',' )
assert len( fields2 ) == 3, 'RGB value must be 0 or have length of 3'
for field in fields2:
int( field ) #rgb values are integers
if len( fields ) > 9:
- int( fields[9] ) #blockCount - The number of blocks (exons) in the BED line.
+ int( fields[9] ) #blockCount - The number of blocks (exons) in the BED line.
if len( fields ) > 10:
- if fields[10] != ',': #blockSizes - A comma-separated list of the block sizes. The number of items in this list should correspond to blockCount.
+ if fields[10] != ',': #blockSizes - A comma-separated list of the block sizes. The number of items in this list should correspond to blockCount.
fields2 = fields[10].rstrip( "," ).split( "," ) #remove trailing comma and split on comma
- for field in fields2:
+ for field in fields2:
int( field )
if len( fields ) > 11:
- if fields[11] != ',': #blockStarts - A comma-separated list of block starts. All of the blockStart positions should be calculated relative to chromStart. The number of items in this list should correspond to blockCount.
+ if fields[11] != ',': #blockStarts - A comma-separated list of block starts. All of the blockStart positions should be calculated relative to chromStart. The number of items in this list should correspond to blockCount.
fields2 = fields[11].rstrip( "," ).split( "," ) #remove trailing comma and split on comma
for field in fields2:
int( field )
- except:
+ except:
strict_bed = False
break
if force_num_columns is not None and len( fields ) != force_num_columns:
@@ -122,7 +122,7 @@
else:
strict_bed = False
out.close()
-
+
if not strict_bed:
skipped_lines = 0
first_skipped_line = None
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/interval_to_coverage.py
--- a/lib/galaxy/datatypes/converters/interval_to_coverage.py
+++ b/lib/galaxy/datatypes/converters/interval_to_coverage.py
@@ -50,12 +50,12 @@
forward = forward_covs[partition]
reverse = reverse_covs[partition]
if forward+reverse > 0:
- coverage.write(chrom=chrom, position=xrange(partitions[partition],partitions[partition+1]),
+ coverage.write(chrom=chrom, position=xrange(partitions[partition],partitions[partition+1]),
forward=forward, reverse=reverse)
partitions = []
forward_covs = []
reverse_covs = []
-
+
start_index = bisect(partitions, record.start)
forward = int(record.strand == "+")
reverse = int(record.strand == "-")
@@ -74,43 +74,43 @@
partitions.insert(end_index, record.end)
forward_covs.insert(end_index, forward_covs[end_index-1] - forward )
reverse_covs.insert(end_index, reverse_covs[end_index-1] - reverse )
-
+
if partitions:
for partition in xrange(0, start_index):
forward = forward_covs[partition]
reverse = reverse_covs[partition]
if forward+reverse > 0:
- coverage.write(chrom=chrom, position=xrange(partitions[partition],partitions[partition+1]),
+ coverage.write(chrom=chrom, position=xrange(partitions[partition],partitions[partition+1]),
forward=forward, reverse=reverse)
partitions = partitions[start_index:]
forward_covs = forward_covs[start_index:]
reverse_covs = reverse_covs[start_index:]
-
+
lastchrom = chrom
-
+
# Finish the last chromosome
if partitions:
for partition in xrange(0, len(partitions)-1):
forward = forward_covs[partition]
reverse = reverse_covs[partition]
if forward+reverse > 0:
- coverage.write(chrom=chrom, position=xrange(partitions[partition],partitions[partition+1]),
+ coverage.write(chrom=chrom, position=xrange(partitions[partition],partitions[partition+1]),
forward=forward, reverse=reverse)
-
+
class CoverageWriter( object ):
def __init__( self, out_stream=None, chromCol=0, positionCol=1, forwardCol=2, reverseCol=3 ):
self.out_stream = out_stream
self.reverseCol = reverseCol
self.nlines = 0
- positions = {str(chromCol):'%(chrom)s',
- str(positionCol):'%(position)d',
- str(forwardCol):'%(forward)d',
+ positions = {str(chromCol):'%(chrom)s',
+ str(positionCol):'%(position)d',
+ str(forwardCol):'%(forward)d',
str(reverseCol):'%(reverse)d'}
- if reverseCol < 0:
+ if reverseCol < 0:
self.template = "%(0)s\t%(1)s\t%(2)s\n" % positions
else:
self.template = "%(0)s\t%(1)s\t%(2)s\t%(3)s\n" % positions
-
+
def write(self, **kwargs ):
if self.reverseCol < 0: kwargs['forward'] += kwargs['reverse']
posgen = kwargs['position']
@@ -121,12 +121,12 @@
def close(self):
self.out_stream.flush()
self.out_stream.close()
-
+
if __name__ == "__main__":
options, args = doc_optparse.parse( __doc__ )
try:
chr_col_1, start_col_1, end_col_1, strand_col_1 = [int(x)-1 for x in options.cols1.split(',')]
- chr_col_2, position_col_2, forward_col_2, reverse_col_2 = [int(x)-1 for x in options.cols2.split(',')]
+ chr_col_2, position_col_2, forward_col_2, reverse_col_2 = [int(x)-1 for x in options.cols2.split(',')]
in_fname, out_fname = args
except:
doc_optparse.exception()
@@ -141,7 +141,7 @@
chromCol = chr_col_2, positionCol = position_col_2,
forwardCol = forward_col_2, reverseCol = reverse_col_2, )
temp_file.seek(0)
- interval = io.NiceReaderWrapper( temp_file,
+ interval = io.NiceReaderWrapper( temp_file,
chrom_col=chr_col_1,
start_col=start_col_1,
end_col=end_col_1,
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/interval_to_fli.py
--- a/lib/galaxy/datatypes/converters/interval_to_fli.py
+++ b/lib/galaxy/datatypes/converters/interval_to_fli.py
@@ -78,13 +78,13 @@
if len( fields ) < 4:
continue
- # Process line
+ # Process line
name_loc_dict[ fields[3] ] = {
'contig': fields[0],
'start': int( fields[1] ),
'end': int ( fields[2] )
}
-
+
# Create sorted list of entries.
out = open( out_fname, 'w' )
max_len = 0
@@ -95,7 +95,7 @@
if len( entry ) > max_len:
max_len = len( entry )
entries.append( entry )
-
+
# Write padded entries.
out.write( str( max_len + 1 ).ljust( max_len ) + '\n' )
for entry in entries:
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/interval_to_tabix_converter.py
--- a/lib/galaxy/datatypes/converters/interval_to_tabix_converter.py
+++ b/lib/galaxy/datatypes/converters/interval_to_tabix_converter.py
@@ -20,20 +20,19 @@
parser.add_option( '-P', '--preset', dest='preset' )
(options, args) = parser.parse_args()
input_fname, index_fname, out_fname = args
-
+
# Create index.
if options.preset:
# Preset type.
- ctabix.tabix_index(filename=index_fname, preset=options.preset, keep_original=True,
+ ctabix.tabix_index(filename=index_fname, preset=options.preset, keep_original=True,
already_compressed=True, index_filename=out_fname)
else:
# For interval files; column indices are 0-based.
- ctabix.tabix_index(filename=index_fname, seq_col=(options.chrom_col - 1),
- start_col=(options.start_col - 1), end_col=(options.end_col - 1),
+ ctabix.tabix_index(filename=index_fname, seq_col=(options.chrom_col - 1),
+ start_col=(options.start_col - 1), end_col=(options.end_col - 1),
keep_original=True, already_compressed=True, index_filename=out_fname)
if os.path.getsize(index_fname) == 0:
sys.stderr.write("The converted tabix index file is empty, meaning the input data is invalid.")
-
-if __name__ == "__main__":
+
+if __name__ == "__main__":
main()
-
\ No newline at end of file
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/lped_to_fped_converter.py
--- a/lib/galaxy/datatypes/converters/lped_to_fped_converter.py
+++ b/lib/galaxy/datatypes/converters/lped_to_fped_converter.py
@@ -1,110 +1,110 @@
-# for rgenetics - lped to fbat
-# recode to numeric fbat version
-# much slower so best to always
-# use numeric alleles internally
-
-import sys,os,time
-
-
-prog = os.path.split(sys.argv[0])[-1]
-myversion = 'Oct 10 2009'
-
-galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
-<head>
-<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
-<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
-<title></title>
-<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
-</head>
-<body>
-<div class="document">
-"""
-
-def timenow():
- """return current time as a string
- """
- return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
-
-
-def rgConv(inpedfilepath,outhtmlname,outfilepath):
- """convert linkage ped/map to fbat"""
- recode={'A':'1','C':'2','G':'3','T':'4','N':'0','0':'0','1':'1','2':'2','3':'3','4':'4'}
- basename = os.path.split(inpedfilepath)[-1] # get basename
- inmap = '%s.map' % inpedfilepath
- inped = '%s.ped' % inpedfilepath
- outf = '%s.ped' % basename # note the fbat exe insists that this is the extension for the ped data
- outfpath = os.path.join(outfilepath,outf) # where to write the fbat format file to
- try:
- mf = file(inmap,'r')
- except:
- sys.stderr.write('%s cannot open inmap file %s - do you have permission?\n' % (prog,inmap))
- sys.exit(1)
- try:
- rsl = [x.split()[1] for x in mf]
- except:
- sys.stderr.write('## cannot parse %s' % inmap)
- sys.exit(1)
- try:
- os.makedirs(outfilepath)
- except:
- pass # already exists
- head = ' '.join(rsl) # list of rs numbers
- # TODO add anno to rs but fbat will prolly barf?
- pedf = file(inped,'r')
- o = file(outfpath,'w',2**20)
- o.write(head)
- o.write('\n')
- for i,row in enumerate(pedf):
- if i == 0:
- lrow = row.split()
- try:
- x = [int(x) for x in lrow[10:50]] # look for non numeric codes
- except:
- dorecode = 1
- if dorecode:
- lrow = row.strip().split()
- p = lrow[:6]
- g = lrow[6:]
- gc = [recode.get(x,'0') for x in g]
- lrow = p+gc
- row = '%s\n' % ' '.join(lrow)
- o.write(row)
- o.close()
-
-
-def main():
- """call fbater
- need to work with rgenetics composite datatypes
- so in and out are html files with data in extrafiles path
- <command interpreter="python">rg_convert_lped_fped.py '$input1/$input1.metadata.base_name'
- '$output1' '$output1.extra_files_path'
- </command>
- """
- nparm = 3
- if len(sys.argv) < nparm:
- sys.stderr.write('## %s called with %s - needs %d parameters \n' % (prog,sys.argv,nparm))
- sys.exit(1)
- inpedfilepath = sys.argv[1]
- outhtmlname = sys.argv[2]
- outfilepath = sys.argv[3]
- try:
- os.makedirs(outfilepath)
- except:
- pass
- rgConv(inpedfilepath,outhtmlname,outfilepath)
- f = file(outhtmlname,'w')
- f.write(galhtmlprefix % prog)
- flist = os.listdir(outfilepath)
- print '## Rgenetics: http://rgenetics.org Galaxy Tools %s %s' % (prog,timenow()) # becomes info
- f.write('<div>## Rgenetics: http://rgenetics.org Galaxy Tools %s %s\n<ol>' % (prog,timenow()))
- for i, data in enumerate( flist ):
- f.write('<li><a href="%s">%s</a></li>\n' % (os.path.split(data)[-1],os.path.split(data)[-1]))
- f.write("</div></body></html>")
- f.close()
-
-
-
-if __name__ == "__main__":
- main()
+# for rgenetics - lped to fbat
+# recode to numeric fbat version
+# much slower so best to always
+# use numeric alleles internally
+
+import sys,os,time
+
+
+prog = os.path.split(sys.argv[0])[-1]
+myversion = 'Oct 10 2009'
+
+galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
+<title></title>
+<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
+</head>
+<body>
+<div class="document">
+"""
+
+def timenow():
+ """return current time as a string
+ """
+ return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
+
+
+def rgConv(inpedfilepath,outhtmlname,outfilepath):
+ """convert linkage ped/map to fbat"""
+ recode={'A':'1','C':'2','G':'3','T':'4','N':'0','0':'0','1':'1','2':'2','3':'3','4':'4'}
+ basename = os.path.split(inpedfilepath)[-1] # get basename
+ inmap = '%s.map' % inpedfilepath
+ inped = '%s.ped' % inpedfilepath
+ outf = '%s.ped' % basename # note the fbat exe insists that this is the extension for the ped data
+ outfpath = os.path.join(outfilepath,outf) # where to write the fbat format file to
+ try:
+ mf = file(inmap,'r')
+ except:
+ sys.stderr.write('%s cannot open inmap file %s - do you have permission?\n' % (prog,inmap))
+ sys.exit(1)
+ try:
+ rsl = [x.split()[1] for x in mf]
+ except:
+ sys.stderr.write('## cannot parse %s' % inmap)
+ sys.exit(1)
+ try:
+ os.makedirs(outfilepath)
+ except:
+ pass # already exists
+ head = ' '.join(rsl) # list of rs numbers
+ # TODO add anno to rs but fbat will prolly barf?
+ pedf = file(inped,'r')
+ o = file(outfpath,'w',2**20)
+ o.write(head)
+ o.write('\n')
+ for i,row in enumerate(pedf):
+ if i == 0:
+ lrow = row.split()
+ try:
+ x = [int(x) for x in lrow[10:50]] # look for non numeric codes
+ except:
+ dorecode = 1
+ if dorecode:
+ lrow = row.strip().split()
+ p = lrow[:6]
+ g = lrow[6:]
+ gc = [recode.get(x,'0') for x in g]
+ lrow = p+gc
+ row = '%s\n' % ' '.join(lrow)
+ o.write(row)
+ o.close()
+
+
+def main():
+ """call fbater
+ need to work with rgenetics composite datatypes
+ so in and out are html files with data in extrafiles path
+ <command interpreter="python">rg_convert_lped_fped.py '$input1/$input1.metadata.base_name'
+ '$output1' '$output1.extra_files_path'
+ </command>
+ """
+ nparm = 3
+ if len(sys.argv) < nparm:
+ sys.stderr.write('## %s called with %s - needs %d parameters \n' % (prog,sys.argv,nparm))
+ sys.exit(1)
+ inpedfilepath = sys.argv[1]
+ outhtmlname = sys.argv[2]
+ outfilepath = sys.argv[3]
+ try:
+ os.makedirs(outfilepath)
+ except:
+ pass
+ rgConv(inpedfilepath,outhtmlname,outfilepath)
+ f = file(outhtmlname,'w')
+ f.write(galhtmlprefix % prog)
+ flist = os.listdir(outfilepath)
+ print '## Rgenetics: http://rgenetics.org Galaxy Tools %s %s' % (prog,timenow()) # becomes info
+ f.write('<div>## Rgenetics: http://rgenetics.org Galaxy Tools %s %s\n<ol>' % (prog,timenow()))
+ for i, data in enumerate( flist ):
+ f.write('<li><a href="%s">%s</a></li>\n' % (os.path.split(data)[-1],os.path.split(data)[-1]))
+ f.write("</div></body></html>")
+ f.close()
+
+
+
+if __name__ == "__main__":
+ main()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/lped_to_pbed_converter.py
--- a/lib/galaxy/datatypes/converters/lped_to_pbed_converter.py
+++ b/lib/galaxy/datatypes/converters/lped_to_pbed_converter.py
@@ -1,110 +1,110 @@
-# for rgenetics - lped to pbed
-# where to stop with converters
-# pbed might be central
-# eg lped/eigen/fbat/snpmatrix all to pbed
-# and pbed to lped/eigen/fbat/snpmatrix ?
-# that's a lot of converters
-import sys,os,time,subprocess
-
-
-prog = os.path.split(sys.argv[0])[-1]
-myversion = 'Oct 10 2009'
-
-galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
-<head>
-<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
-<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
-<title></title>
-<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
-</head>
-<body>
-<div class="document">
-"""
-
-def timenow():
- """return current time as a string
- """
- return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
-
-def getMissval(inped=''):
- """
- read some lines...ugly hack - try to guess missing value
- should be N or 0 but might be . or -
- """
- commonmissvals = {'N':'N','0':'0','n':'n','9':'9','-':'-','.':'.'}
- try:
- f = file(inped,'r')
- except:
- return None # signal no in file
- missval = None
- while missval == None: # doggedly continue until we solve the mystery
- try:
- l = f.readline()
- except:
- break
- ll = l.split()[6:] # ignore pedigree stuff
- for c in ll:
- if commonmissvals.get(c,None):
- missval = c
- f.close()
- return missval
- if not missval:
- missval = 'N' # punt
- close(f)
- return missval
-
-def rgConv(inpedfilepath,outhtmlname,outfilepath,plink):
- """
- """
- pedf = '%s.ped' % inpedfilepath
- basename = os.path.split(inpedfilepath)[-1] # get basename
- outroot = os.path.join(outfilepath,basename)
- missval = getMissval(inped = pedf)
- if not missval:
- print '### lped_to_pbed_converter.py cannot identify missing value in %s' % pedf
- missval = '0'
- cl = '%s --noweb --file %s --make-bed --out %s --missing-genotype %s' % (plink,inpedfilepath,outroot,missval)
- p = subprocess.Popen(cl,shell=True,cwd=outfilepath)
- retval = p.wait() # run plink
-
-
-
-
-def main():
- """
- need to work with rgenetics composite datatypes
- so in and out are html files with data in extrafiles path
- <command interpreter="python">lped_to_pbed_converter.py '$input1/$input1.metadata.base_name'
- '$output1' '$output1.extra_files_path' '${GALAXY_DATA_INDEX_DIR}/rg/bin/plink'
- </command>
- """
- nparm = 4
- if len(sys.argv) < nparm:
- sys.stderr.write('## %s called with %s - needs %d parameters \n' % (prog,sys.argv,nparm))
- sys.exit(1)
- inpedfilepath = sys.argv[1]
- outhtmlname = sys.argv[2]
- outfilepath = sys.argv[3]
- try:
- os.makedirs(outfilepath)
- except:
- pass
- plink = sys.argv[4]
- rgConv(inpedfilepath,outhtmlname,outfilepath,plink)
- f = file(outhtmlname,'w')
- f.write(galhtmlprefix % prog)
- flist = os.listdir(outfilepath)
- s = '## Rgenetics: http://rgenetics.org Galaxy Tools %s %s' % (prog,timenow()) # becomes info
- print s
- f.write('<div>%s\n<ol>' % (s))
- for i, data in enumerate( flist ):
- f.write('<li><a href="%s">%s</a></li>\n' % (os.path.split(data)[-1],os.path.split(data)[-1]))
- f.write("</div></body></html>")
- f.close()
-
-
-
-if __name__ == "__main__":
- main()
+# for rgenetics - lped to pbed
+# where to stop with converters
+# pbed might be central
+# eg lped/eigen/fbat/snpmatrix all to pbed
+# and pbed to lped/eigen/fbat/snpmatrix ?
+# that's a lot of converters
+import sys,os,time,subprocess
+
+
+prog = os.path.split(sys.argv[0])[-1]
+myversion = 'Oct 10 2009'
+
+galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
+<title></title>
+<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
+</head>
+<body>
+<div class="document">
+"""
+
+def timenow():
+ """return current time as a string
+ """
+ return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
+
+def getMissval(inped=''):
+ """
+ read some lines...ugly hack - try to guess missing value
+ should be N or 0 but might be . or -
+ """
+ commonmissvals = {'N':'N','0':'0','n':'n','9':'9','-':'-','.':'.'}
+ try:
+ f = file(inped,'r')
+ except:
+ return None # signal no in file
+ missval = None
+ while missval == None: # doggedly continue until we solve the mystery
+ try:
+ l = f.readline()
+ except:
+ break
+ ll = l.split()[6:] # ignore pedigree stuff
+ for c in ll:
+ if commonmissvals.get(c,None):
+ missval = c
+ f.close()
+ return missval
+ if not missval:
+ missval = 'N' # punt
+ close(f)
+ return missval
+
+def rgConv(inpedfilepath,outhtmlname,outfilepath,plink):
+ """
+ """
+ pedf = '%s.ped' % inpedfilepath
+ basename = os.path.split(inpedfilepath)[-1] # get basename
+ outroot = os.path.join(outfilepath,basename)
+ missval = getMissval(inped = pedf)
+ if not missval:
+ print '### lped_to_pbed_converter.py cannot identify missing value in %s' % pedf
+ missval = '0'
+ cl = '%s --noweb --file %s --make-bed --out %s --missing-genotype %s' % (plink,inpedfilepath,outroot,missval)
+ p = subprocess.Popen(cl,shell=True,cwd=outfilepath)
+ retval = p.wait() # run plink
+
+
+
+
+def main():
+ """
+ need to work with rgenetics composite datatypes
+ so in and out are html files with data in extrafiles path
+ <command interpreter="python">lped_to_pbed_converter.py '$input1/$input1.metadata.base_name'
+ '$output1' '$output1.extra_files_path' '${GALAXY_DATA_INDEX_DIR}/rg/bin/plink'
+ </command>
+ """
+ nparm = 4
+ if len(sys.argv) < nparm:
+ sys.stderr.write('## %s called with %s - needs %d parameters \n' % (prog,sys.argv,nparm))
+ sys.exit(1)
+ inpedfilepath = sys.argv[1]
+ outhtmlname = sys.argv[2]
+ outfilepath = sys.argv[3]
+ try:
+ os.makedirs(outfilepath)
+ except:
+ pass
+ plink = sys.argv[4]
+ rgConv(inpedfilepath,outhtmlname,outfilepath,plink)
+ f = file(outhtmlname,'w')
+ f.write(galhtmlprefix % prog)
+ flist = os.listdir(outfilepath)
+ s = '## Rgenetics: http://rgenetics.org Galaxy Tools %s %s' % (prog,timenow()) # becomes info
+ print s
+ f.write('<div>%s\n<ol>' % (s))
+ for i, data in enumerate( flist ):
+ f.write('<li><a href="%s">%s</a></li>\n' % (os.path.split(data)[-1],os.path.split(data)[-1]))
+ f.write("</div></body></html>")
+ f.close()
+
+
+
+if __name__ == "__main__":
+ main()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/maf_to_fasta_converter.py
--- a/lib/galaxy/datatypes/converters/maf_to_fasta_converter.py
+++ b/lib/galaxy/datatypes/converters/maf_to_fasta_converter.py
@@ -1,32 +1,32 @@
-#!/usr/bin/env python
-#Dan Blankenberg
-
-import sys
-from galaxy import eggs
-import pkg_resources; pkg_resources.require( "bx-python" )
-import bx.align.maf
-from galaxy.tools.util import maf_utilities
-
-assert sys.version_info[:2] >= ( 2, 4 )
-
-def __main__():
- output_name = sys.argv.pop(1)
- input_name = sys.argv.pop(1)
- out = open( output_name, 'w' )
- count = 0
- for count, block in enumerate( bx.align.maf.Reader( open( input_name, 'r' ) ) ):
+#!/usr/bin/env python
+#Dan Blankenberg
+
+import sys
+from galaxy import eggs
+import pkg_resources; pkg_resources.require( "bx-python" )
+import bx.align.maf
+from galaxy.tools.util import maf_utilities
+
+assert sys.version_info[:2] >= ( 2, 4 )
+
+def __main__():
+ output_name = sys.argv.pop(1)
+ input_name = sys.argv.pop(1)
+ out = open( output_name, 'w' )
+ count = 0
+ for count, block in enumerate( bx.align.maf.Reader( open( input_name, 'r' ) ) ):
spec_counts = {}
- for c in block.components:
+ for c in block.components:
spec, chrom = maf_utilities.src_split( c.src )
if spec not in spec_counts:
spec_counts[ spec ] = 0
else:
- spec_counts[ spec ] += 1
- out.write( "%s\n" % maf_utilities.get_fasta_header( c, { 'block_index' : count, 'species' : spec, 'sequence_index' : spec_counts[ spec ] }, suffix = "%s_%i_%i" % ( spec, count, spec_counts[ spec ] ) ) )
- out.write( "%s\n" % c.text )
- out.write( "\n" )
- out.close()
- print "%i MAF blocks converted to FASTA." % ( count )
-
-
-if __name__ == "__main__": __main__()
+ spec_counts[ spec ] += 1
+ out.write( "%s\n" % maf_utilities.get_fasta_header( c, { 'block_index' : count, 'species' : spec, 'sequence_index' : spec_counts[ spec ] }, suffix = "%s_%i_%i" % ( spec, count, spec_counts[ spec ] ) ) )
+ out.write( "%s\n" % c.text )
+ out.write( "\n" )
+ out.close()
+ print "%i MAF blocks converted to FASTA." % ( count )
+
+
+if __name__ == "__main__": __main__()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/maf_to_interval_converter.py
--- a/lib/galaxy/datatypes/converters/maf_to_interval_converter.py
+++ b/lib/galaxy/datatypes/converters/maf_to_interval_converter.py
@@ -1,32 +1,32 @@
-#!/usr/bin/env python
-#Dan Blankenberg
-
-import sys
-from galaxy import eggs
-import pkg_resources; pkg_resources.require( "bx-python" )
+#!/usr/bin/env python
+#Dan Blankenberg
+
+import sys
+from galaxy import eggs
+import pkg_resources; pkg_resources.require( "bx-python" )
import bx.align.maf
-from galaxy.tools.util import maf_utilities
-
-assert sys.version_info[:2] >= ( 2, 4 )
-
-def __main__():
- output_name = sys.argv.pop(1)
- input_name = sys.argv.pop(1)
- species = sys.argv.pop(1)
- out = open(output_name,'w')
- count = 0
- #write interval header line
- out.write( "#chrom\tstart\tend\tstrand\n" )
- try:
- for block in bx.align.maf.Reader( open( input_name, 'r' ) ):
- for c in maf_utilities.iter_components_by_src_start( block, species ):
- if c is not None:
- out.write( "%s\t%i\t%i\t%s\n" % ( maf_utilities.src_split( c.src )[-1], c.get_forward_strand_start(), c.get_forward_strand_end(), c.strand ) )
- count += 1
- except Exception, e:
- print >> sys.stderr, "There was a problem processing your input: %s" % e
- out.close()
- print "%i MAF blocks converted to Genomic Intervals for species %s." % ( count, species )
-
-
-if __name__ == "__main__": __main__()
+from galaxy.tools.util import maf_utilities
+
+assert sys.version_info[:2] >= ( 2, 4 )
+
+def __main__():
+ output_name = sys.argv.pop(1)
+ input_name = sys.argv.pop(1)
+ species = sys.argv.pop(1)
+ out = open(output_name,'w')
+ count = 0
+ #write interval header line
+ out.write( "#chrom\tstart\tend\tstrand\n" )
+ try:
+ for block in bx.align.maf.Reader( open( input_name, 'r' ) ):
+ for c in maf_utilities.iter_components_by_src_start( block, species ):
+ if c is not None:
+ out.write( "%s\t%i\t%i\t%s\n" % ( maf_utilities.src_split( c.src )[-1], c.get_forward_strand_start(), c.get_forward_strand_end(), c.strand ) )
+ count += 1
+ except Exception, e:
+ print >> sys.stderr, "There was a problem processing your input: %s" % e
+ out.close()
+ print "%i MAF blocks converted to Genomic Intervals for species %s." % ( count, species )
+
+
+if __name__ == "__main__": __main__()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py
--- a/lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py
+++ b/lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py
@@ -21,7 +21,7 @@
<div class="document">
"""
-plinke = 'plink'
+plinke = 'plink'
def timenow():
@@ -51,7 +51,7 @@
except:
alog.append('### %s Strange - no std out from plink when running command line\n%s\n' % (timenow(),' '.join(vcl)))
return alog
-
+
def makeLDreduced(basename,infpath=None,outfpath=None,plinke='plink',forcerebuild=False,returnFname=False,
winsize="60", winmove="40", r2thresh="0.1" ):
@@ -79,11 +79,11 @@
need to work with rgenetics composite datatypes
so in and out are html files with data in extrafiles path
- .. raw:: xml
+ .. raw:: xml
<command interpreter="python">
- pbed_ldreduced_converter.py '$input1.extra_files_path/$input1.metadata.base_name' '$winsize' '$winmove' '$r2thresh'
- '$output1' '$output1.files_path' 'plink'
+ pbed_ldreduced_converter.py '$input1.extra_files_path/$input1.metadata.base_name' '$winsize' '$winmove' '$r2thresh'
+ '$output1' '$output1.files_path' 'plink'
</command>
"""
@@ -116,7 +116,7 @@
f.write('<li><a href="%s">%s</a></li>\n' % (os.path.split(data)[-1],os.path.split(data)[-1]))
f.write("</div></body></html>")
f.close()
-
+
if __name__ == "__main__":
main()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/pbed_to_lped_converter.py
--- a/lib/galaxy/datatypes/converters/pbed_to_lped_converter.py
+++ b/lib/galaxy/datatypes/converters/pbed_to_lped_converter.py
@@ -1,80 +1,80 @@
-# for rgenetics - lped to pbed
-# where to stop with converters
-# pbed might be central
-# eg lped/eigen/fbat/snpmatrix all to pbed
-# and pbed to lped/eigen/fbat/snpmatrix ?
-# that's a lot of converters
-import sys,os,time,subprocess
-
-
-prog = os.path.split(sys.argv[0])[-1]
-myversion = 'Oct 10 2009'
-
-galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
-<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
-<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
-<head>
-<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
-<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
-<title></title>
-<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
-</head>
-<body>
-<div class="document">
-"""
-
-def timenow():
- """return current time as a string
- """
- return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
+# for rgenetics - lped to pbed
+# where to stop with converters
+# pbed might be central
+# eg lped/eigen/fbat/snpmatrix all to pbed
+# and pbed to lped/eigen/fbat/snpmatrix ?
+# that's a lot of converters
+import sys,os,time,subprocess
-
+
+prog = os.path.split(sys.argv[0])[-1]
+myversion = 'Oct 10 2009'
+
+galhtmlprefix = """<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
+<head>
+<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+<meta name="generator" content="Galaxy %s tool output - see http://g2.trac.bx.psu.edu/" />
+<title></title>
+<link rel="stylesheet" href="/static/style/base.css" type="text/css" />
+</head>
+<body>
+<div class="document">
+"""
+
+def timenow():
+ """return current time as a string
+ """
+ return time.strftime('%d/%m/%Y %H:%M:%S', time.localtime(time.time()))
+
+
def rgConv(inpedfilepath,outhtmlname,outfilepath,plink):
"""
"""
-
- basename = os.path.split(inpedfilepath)[-1] # get basename
+
+ basename = os.path.split(inpedfilepath)[-1] # get basename
outroot = os.path.join(outfilepath,basename)
cl = '%s --noweb --bfile %s --recode --out %s ' % (plink,inpedfilepath,outroot)
p = subprocess.Popen(cl,shell=True,cwd=outfilepath)
retval = p.wait() # run plink
-
-
-
-def main():
- """
- need to work with rgenetics composite datatypes
- so in and out are html files with data in extrafiles path
- <command interpreter="python">pbed_to_lped_converter.py '$input1/$input1.metadata.base_name'
- '$output1' '$output1.extra_files_path' '${GALAXY_DATA_INDEX_DIR}/rg/bin/plink'
- </command>
- """
- nparm = 4
- if len(sys.argv) < nparm:
- sys.stderr.write('## %s called with %s - needs %d parameters \n' % (myname,sys.argv,nparm))
- sys.exit(1)
- inpedfilepath = sys.argv[1]
- outhtmlname = sys.argv[2]
- outfilepath = sys.argv[3]
- try:
- os.makedirs(outfilepath)
- except:
- pass
- plink = sys.argv[4]
- rgConv(inpedfilepath,outhtmlname,outfilepath,plink)
- f = file(outhtmlname,'w')
- f.write(galhtmlprefix % prog)
- flist = os.listdir(outfilepath)
- s = '## Rgenetics: http://rgenetics.org Galaxy Tools %s %s' % (prog,timenow()) # becomes info
- print s
- f.write('<div>%s\n<ol>' % (s))
- for i, data in enumerate( flist ):
- f.write('<li><a href="%s">%s</a></li>\n' % (os.path.split(data)[-1],os.path.split(data)[-1]))
- f.write("</div></body></html>")
- f.close()
-
-
-
-if __name__ == "__main__":
- main()
+
+
+
+def main():
+ """
+ need to work with rgenetics composite datatypes
+ so in and out are html files with data in extrafiles path
+ <command interpreter="python">pbed_to_lped_converter.py '$input1/$input1.metadata.base_name'
+ '$output1' '$output1.extra_files_path' '${GALAXY_DATA_INDEX_DIR}/rg/bin/plink'
+ </command>
+ """
+ nparm = 4
+ if len(sys.argv) < nparm:
+ sys.stderr.write('## %s called with %s - needs %d parameters \n' % (myname,sys.argv,nparm))
+ sys.exit(1)
+ inpedfilepath = sys.argv[1]
+ outhtmlname = sys.argv[2]
+ outfilepath = sys.argv[3]
+ try:
+ os.makedirs(outfilepath)
+ except:
+ pass
+ plink = sys.argv[4]
+ rgConv(inpedfilepath,outhtmlname,outfilepath,plink)
+ f = file(outhtmlname,'w')
+ f.write(galhtmlprefix % prog)
+ flist = os.listdir(outfilepath)
+ s = '## Rgenetics: http://rgenetics.org Galaxy Tools %s %s' % (prog,timenow()) # becomes info
+ print s
+ f.write('<div>%s\n<ol>' % (s))
+ for i, data in enumerate( flist ):
+ f.write('<li><a href="%s">%s</a></li>\n' % (os.path.split(data)[-1],os.path.split(data)[-1]))
+ f.write("</div></body></html>")
+ f.close()
+
+
+
+if __name__ == "__main__":
+ main()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/picard_interval_list_to_bed6_converter.py
--- a/lib/galaxy/datatypes/converters/picard_interval_list_to_bed6_converter.py
+++ b/lib/galaxy/datatypes/converters/picard_interval_list_to_bed6_converter.py
@@ -17,7 +17,7 @@
for i, line in enumerate( open( input_name ) ):
complete_interval = False
line = line.rstrip( '\r\n' )
- if line:
+ if line:
if line.startswith( HEADER_STARTS_WITH ):
header_lines += 1
else:
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/sam_to_bam.py
--- a/lib/galaxy/datatypes/converters/sam_to_bam.py
+++ b/lib/galaxy/datatypes/converters/sam_to_bam.py
@@ -19,12 +19,12 @@
#Parse Command Line
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
-
+
assert len( args ) == 2, 'You must specify the input and output filenames'
input_filename, output_filename = args
-
+
tmp_dir = tempfile.mkdtemp( prefix='tmp-sam_to_bam_converter-' )
-
+
#convert to SAM
unsorted_bam_filename = os.path.join( tmp_dir, 'unsorted.bam' )
unsorted_stderr_filename = os.path.join( tmp_dir, 'unsorted.stderr' )
@@ -43,14 +43,14 @@
else:
break
stderr.close()
-
+
#sort sam, so indexing will not fail
sorted_stderr_filename = os.path.join( tmp_dir, 'sorted.stderr' )
sorting_prefix = os.path.join( tmp_dir, 'sorted_bam' )
cmd = 'samtools sort -o "%s" "%s" > "%s"' % ( unsorted_bam_filename, sorting_prefix, output_filename )
proc = subprocess.Popen( args=cmd, stderr=open( sorted_stderr_filename, 'wb' ), shell=True, cwd=tmp_dir )
return_code = proc.wait()
-
+
if return_code:
stderr_target = sys.stderr
else:
@@ -63,7 +63,7 @@
else:
break
stderr.close()
-
+
cleanup_before_exit( tmp_dir )
if __name__=="__main__": __main__()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/vcf_to_interval_index_converter.py
--- a/lib/galaxy/datatypes/converters/vcf_to_interval_index_converter.py
+++ b/lib/galaxy/datatypes/converters/vcf_to_interval_index_converter.py
@@ -16,20 +16,19 @@
# Read options, args.
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
- in_file, out_file = args
-
+ in_file, out_file = args
+
# Do conversion.
index = Indexes()
- reader = galaxy_utils.sequence.vcf.Reader( open( in_file ) )
+ reader = galaxy_utils.sequence.vcf.Reader( open( in_file ) )
offset = reader.metadata_len
for vcf_line in reader:
- # VCF format provides a chrom and 1-based position for each variant.
+ # VCF format provides a chrom and 1-based position for each variant.
# IntervalIndex expects 0-based coordinates.
index.add( vcf_line.chrom, vcf_line.pos-1, vcf_line.pos, offset )
offset += len( vcf_line.raw_line )
-
+
index.write( open( out_file, "w" ) )
-if __name__ == "__main__":
+if __name__ == "__main__":
main()
-
\ No newline at end of file
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/vcf_to_vcf_bgzip.py
--- a/lib/galaxy/datatypes/converters/vcf_to_vcf_bgzip.py
+++ b/lib/galaxy/datatypes/converters/vcf_to_vcf_bgzip.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python
"""
-Uses pysam to bgzip a vcf file as-is.
+Uses pysam to bgzip a vcf file as-is.
Headers, which are important, are kept.
Original ordering, which may be specifically needed by tools or external display applications, is also maintained.
@@ -17,8 +17,8 @@
parser = optparse.OptionParser()
(options, args) = parser.parse_args()
input_fname, output_fname = args
-
+
ctabix.tabix_compress(input_fname, output_fname, force=True)
-
-if __name__ == "__main__":
+
+if __name__ == "__main__":
main()
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/wiggle_to_array_tree_converter.py
--- a/lib/galaxy/datatypes/converters/wiggle_to_array_tree_converter.py
+++ b/lib/galaxy/datatypes/converters/wiggle_to_array_tree_converter.py
@@ -11,19 +11,19 @@
BLOCK_SIZE = 100
def main():
-
+
input_fname = sys.argv[1]
out_fname = sys.argv[2]
-
+
reader = WiggleReader( open( input_fname ) )
-
+
# Fill array from reader
d = array_tree_dict_from_reader( reader, {}, block_size = BLOCK_SIZE )
-
+
for array_tree in d.itervalues():
array_tree.root.build_summary()
-
+
FileArrayTreeDict.dict_to_file( d, open( out_fname, "w" ) )
-if __name__ == "__main__":
+if __name__ == "__main__":
main()
\ No newline at end of file
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/converters/wiggle_to_simple_converter.py
--- a/lib/galaxy/datatypes/converters/wiggle_to_simple_converter.py
+++ b/lib/galaxy/datatypes/converters/wiggle_to_simple_converter.py
@@ -17,16 +17,16 @@
sys.exit()
def main():
- if len( sys.argv ) > 1:
+ if len( sys.argv ) > 1:
in_file = open( sys.argv[1] )
- else:
+ else:
in_file = open( sys.stdin )
-
+
if len( sys.argv ) > 2:
out_file = open( sys.argv[2], "w" )
else:
out_file = sys.stdout
-
+
try:
for fields in bx.wiggle.IntervalReader( UCSCOutWrapper( in_file ) ):
out_file.write( "%s\n" % "\t".join( map( str, fields ) ) )
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/coverage.py
--- a/lib/galaxy/datatypes/coverage.py
+++ b/lib/galaxy/datatypes/coverage.py
@@ -15,7 +15,7 @@
class LastzCoverage( Tabular ):
file_ext = "coverage"
-
+
MetadataElement( name="chromCol", default=1, desc="Chrom column", param=metadata.ColumnParameter )
MetadataElement( name="positionCol", default=2, desc="Position column", param=metadata.ColumnParameter )
MetadataElement( name="forwardCol", default=3, desc="Forward or aggregate read column", param=metadata.ColumnParameter )
@@ -44,7 +44,7 @@
t_end = math.ceil( end / resolution )
x = numpy.arange( t_start, t_end ) * resolution
y = data[ t_start : t_end ]
-
+
return zip(x.tolist(), y.tolist())
def get_track_resolution( self, dataset, start, end):
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/data.py
--- a/lib/galaxy/datatypes/data.py
+++ b/lib/galaxy/datatypes/data.py
@@ -282,14 +282,14 @@
tmpfh = open( tmpf )
# CANNOT clean up - unlink/rmdir was always failing because file handle retained to return - must rely on a cron job to clean up tmp
trans.response.set_content_type( "application/x-zip-compressed" )
- trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.zip"' % outfname
+ trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.zip"' % outfname
return tmpfh
else:
trans.response.set_content_type( "application/x-tar" )
outext = 'tgz'
if params.do_action == 'tbz':
outext = 'tbz'
- trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.%s"' % (outfname,outext)
+ trans.response.headers[ "Content-Disposition" ] = 'attachment; filename="%s.%s"' % (outfname,outext)
archive.wsgi_status = trans.response.wsgi_status()
archive.wsgi_headeritems = trans.response.wsgi_headeritems()
return archive.stream
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/dataproviders/base.py
--- a/lib/galaxy/datatypes/dataproviders/base.py
+++ b/lib/galaxy/datatypes/dataproviders/base.py
@@ -304,7 +304,7 @@
self.source = self.validate_source( source )
except exceptions.InvalidDataProviderSource, invalid_source:
continue
-
+
parent_gen = super( MultiSourceDataProvider, self ).__iter__()
for datum in parent_gen:
yield datum
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/dataproviders/line.py
--- a/lib/galaxy/datatypes/dataproviders/line.py
+++ b/lib/galaxy/datatypes/dataproviders/line.py
@@ -262,7 +262,7 @@
"""
if self.limit != None and self.num_data_returned >= self.limit:
return None
-
+
last_block = self.assemble_current_block()
self.num_data_read += 1
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/display_applications/application.py
--- a/lib/galaxy/datatypes/display_applications/application.py
+++ b/lib/galaxy/datatypes/display_applications/application.py
@@ -128,7 +128,7 @@
self.data = data
self.dataset_hash = dataset_hash
self.user_hash = user_hash
- self.trans = trans
+ self.trans = trans
self.ready, self.parameters = self.link.build_parameter_dict( self.data, self.dataset_hash, self.user_hash, trans, app_kwds )
def display_ready( self ):
return self.ready
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/display_applications/parameters.py
--- a/lib/galaxy/datatypes/display_applications/parameters.py
+++ b/lib/galaxy/datatypes/display_applications/parameters.py
@@ -10,9 +10,9 @@
class DisplayApplicationParameter( object ):
""" Abstract Class for Display Application Parameters """
-
+
type = None
-
+
@classmethod
def from_elem( cls, elem, link ):
param_type = elem.get( 'type', None )
@@ -42,9 +42,9 @@
class DisplayApplicationDataParameter( DisplayApplicationParameter ):
""" Parameter that returns a file_name containing the requested content """
-
+
type = 'data'
-
+
def __init__( self, elem, link ):
DisplayApplicationParameter.__init__( self, elem, link )
self.extensions = elem.get( 'format', None )
@@ -113,7 +113,7 @@
return False
def ready( self, other_values ):
value = self._get_dataset_like_object( other_values )
- if value:
+ if value:
if value.state == value.states.OK:
return True
elif value.state == value.states.ERROR:
@@ -122,9 +122,9 @@
class DisplayApplicationTemplateParameter( DisplayApplicationParameter ):
""" Parameter that returns a string containing the requested content """
-
+
type = 'template'
-
+
def __init__( self, elem, link ):
DisplayApplicationParameter.__init__( self, elem, link )
self.text = elem.text or ''
@@ -154,7 +154,7 @@
if self.parameter.guess_mime_type:
mime, encoding = mimetypes.guess_type( self._url )
if not mime:
- mime = self.trans.app.datatypes_registry.get_mimetype_by_extension( ".".split( self._url )[ -1 ], None )
+ mime = self.trans.app.datatypes_registry.get_mimetype_by_extension( ".".split( self._url )[ -1 ], None )
if mime:
return mime
return 'text/plain'
@@ -193,7 +193,7 @@
if self.parameter.guess_mime_type:
mime, encoding = mimetypes.guess_type( self._url )
if not mime:
- mime = self.trans.app.datatypes_registry.get_mimetype_by_extension( ".".split( self._url )[ -1 ], None )
+ mime = self.trans.app.datatypes_registry.get_mimetype_by_extension( ".".split( self._url )[ -1 ], None )
if mime:
return mime
if hasattr( self.value, 'get_mime' ):
diff -r f33687b0e59013a924a696f1c9630cae960b74a2 -r be3b0358acc9f251a8c8d66beb87f6b4684c1c10 lib/galaxy/datatypes/display_applications/util.py
--- a/lib/galaxy/datatypes/display_applications/util.py
+++ b/lib/galaxy/datatypes/display_applications/util.py
@@ -10,7 +10,7 @@
user_hash = 'None'
else:
user_hash = str( user.id )
- # Pad to a multiple of 8 with leading "!"
+ # Pad to a multiple of 8 with leading "!"
user_hash = ( "!" * ( 8 - len( user_hash ) % 8 ) ) + user_hash
cipher = Blowfish.new( str( dataset.create_time ) )
user_hash = cipher.encrypt( user_hash ).encode( 'hex' )
This diff is so big that we needed to truncate the remainder.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Strip trailing whitespace in data_providers
by commits-noreply@bitbucket.org 29 Aug '13
by commits-noreply@bitbucket.org 29 Aug '13
29 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/f33687b0e590/
Changeset: f33687b0e590
User: dannon
Date: 2013-08-30 05:32:37
Summary: Strip trailing whitespace in data_providers
Affected #: 1 file
diff -r c05e0e9714f5bd2ecfa96cfd6b6f7152b815e82f -r f33687b0e59013a924a696f1c9630cae960b74a2 lib/galaxy/visualization/data_providers/basic.py
--- a/lib/galaxy/visualization/data_providers/basic.py
+++ b/lib/galaxy/visualization/data_providers/basic.py
@@ -15,37 +15,37 @@
self.original_dataset = original_dataset
self.dependencies = dependencies
self.error_max_vals = error_max_vals
-
+
def has_data( self, **kwargs ):
"""
Returns true if dataset has data in the specified genome window, false
otherwise.
"""
raise Exception( "Unimplemented Function" )
-
+
def get_iterator( self, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end
"""
raise Exception( "Unimplemented Function" )
-
+
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Process data from an iterator to a format that can be provided to client.
"""
- raise Exception( "Unimplemented Function" )
-
+ raise Exception( "Unimplemented Function" )
+
def get_data( self, chrom, start, end, start_val=0, max_vals=sys.maxint, **kwargs ):
- """
- Returns data as specified by kwargs. start_val is the first element to
+ """
+ Returns data as specified by kwargs. start_val is the first element to
return and max_vals indicates the number of values to return.
-
+
Return value must be a dictionary with the following attributes:
dataset_type, data
"""
iterator = self.get_iterator( chrom, start, end )
return self.process_data( iterator, start_val, max_vals, **kwargs )
-
+
def write_data_to_file( self, filename, **kwargs ):
"""
Write data in region defined by chrom, start, and end to a file.
@@ -56,20 +56,20 @@
class ColumnDataProvider( BaseDataProvider ):
""" Data provider for columnar data """
MAX_LINES_RETURNED = 30000
-
+
def __init__( self, original_dataset, max_lines_returned=MAX_LINES_RETURNED ):
# Compatibility check.
if not isinstance( original_dataset.datatype, Tabular ):
raise Exception( "Data provider can only be used with tabular data" )
-
+
# Attribute init.
self.original_dataset = original_dataset
# allow throttling
self.max_lines_returned = max_lines_returned
-
+
def get_data( self, columns=None, start_val=0, max_vals=None, skip_comments=True, **kwargs ):
"""
- Returns data from specified columns in dataset. Format is list of lists
+ Returns data from specified columns in dataset. Format is list of lists
where each list is a line of data.
"""
if not columns:
@@ -81,20 +81,20 @@
max_vals = min([ max_vals, self.max_lines_returned ])
except ( ValueError, TypeError ):
max_vals = self.max_lines_returned
-
+
try:
start_val = int( start_val )
start_val = max([ start_val, 0 ])
except ( ValueError, TypeError ):
start_val = 0
-
+
# skip comment lines (if any/avail)
# pre: should have original_dataset and
if( skip_comments
and self.original_dataset.metadata.comment_lines
and start_val < self.original_dataset.metadata.comment_lines ):
start_val = int( self.original_dataset.metadata.comment_lines )
-
+
# columns is an array of ints for now (should handle column names later)
columns = from_json_string( columns )
for column in columns:
@@ -103,7 +103,7 @@
"column index (%d) must be positive and less" % ( column )
+ " than the number of columns: %d" % ( self.original_dataset.metadata.columns ) )
#print columns, start_val, max_vals, skip_comments, kwargs
-
+
# set up the response, column lists
response = {}
response[ 'data' ] = data = [ [] for column in columns ]
@@ -113,9 +113,9 @@
'count' : 0,
'sum' : 0
} for column in columns ]
-
+
column_types = [ self.original_dataset.metadata.column_types[ column ] for column in columns ]
-
+
# function for casting by column_types
def cast_val( val, type ):
""" Cast value based on type. Return None if can't be cast """
@@ -126,12 +126,12 @@
try: val = float( val )
except: return None
return val
-
+
returning_data = False
f = open( self.original_dataset.file_name )
#TODO: add f.seek if given fptr in kwargs
for count, line in enumerate( f ):
-
+
# check line v. desired start, end
if count < start_val:
continue
@@ -139,7 +139,7 @@
break
returning_data = True
-
+
fields = line.split()
fields_len = len( fields )
#NOTE: this will return None/null for abberrant column values (including bad indeces)
@@ -149,39 +149,39 @@
if column < fields_len:
column_val = cast_val( fields[ column ], column_type )
if column_val != None:
-
+
# if numeric, maintain min, max, sum
if( column_type == 'float' or column_type == 'int' ):
if( ( meta[ index ][ 'min' ] == None ) or ( column_val < meta[ index ][ 'min' ] ) ):
meta[ index ][ 'min' ] = column_val
-
+
if( ( meta[ index ][ 'max' ] == None ) or ( column_val > meta[ index ][ 'max' ] ) ):
meta[ index ][ 'max' ] = column_val
-
+
meta[ index ][ 'sum' ] += column_val
-
+
# maintain a count - for other stats
meta[ index ][ 'count' ] += 1
data[ index ].append( column_val )
-
+
response[ 'endpoint' ] = dict( last_line=( count - 1 ), file_ptr=f.tell() )
f.close()
if not returning_data: return None
-
+
for index, meta in enumerate( response[ 'meta' ] ):
column_type = column_types[ index ]
count = meta[ 'count' ]
-
+
if( ( column_type == 'float' or column_type == 'int' )
and count ):
meta[ 'mean' ] = float( meta[ 'sum' ] ) / count
-
+
sorted_data = sorted( response[ 'data' ][ index ] )
middle_index = ( count / 2 ) - 1
if count % 2 == 0:
meta[ 'median' ] = ( ( sorted_data[ middle_index ] + sorted_data[( middle_index + 1 )] ) / 2.0 )
-
+
else:
meta[ 'median' ] = sorted_data[ middle_index ]
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
commit/galaxy-central: dannon: Realizing it's abstract, still require appropriate inputs to get_data in BaseDataProvider
by commits-noreply@bitbucket.org 29 Aug '13
by commits-noreply@bitbucket.org 29 Aug '13
29 Aug '13
1 new commit in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/c05e0e9714f5/
Changeset: c05e0e9714f5
User: dannon
Date: 2013-08-30 05:31:55
Summary: Realizing it's abstract, still require appropriate inputs to get_data in BaseDataProvider
Affected #: 1 file
diff -r c9b77dbc13165b020dfbe67b88faa99e5bf4d9fb -r c05e0e9714f5bd2ecfa96cfd6b6f7152b815e82f lib/galaxy/visualization/data_providers/basic.py
--- a/lib/galaxy/visualization/data_providers/basic.py
+++ b/lib/galaxy/visualization/data_providers/basic.py
@@ -35,7 +35,7 @@
"""
raise Exception( "Unimplemented Function" )
- def get_data( self, start_val=0, max_vals=sys.maxint, **kwargs ):
+ def get_data( self, chrom, start, end, start_val=0, max_vals=sys.maxint, **kwargs ):
"""
Returns data as specified by kwargs. start_val is the first element to
return and max_vals indicates the number of values to return.
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
2 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/7038567aedc2/
Changeset: 7038567aedc2
User: dannon
Date: 2013-08-30 05:24:24
Summary: Add missing os import to ngsindex.py
Affected #: 1 file
diff -r 068acf051f9acfb8058f2bc50b0361d9a59d8cdb -r 7038567aedc2978d43386838441d3536a3d34650 lib/galaxy/datatypes/ngsindex.py
--- a/lib/galaxy/datatypes/ngsindex.py
+++ b/lib/galaxy/datatypes/ngsindex.py
@@ -1,7 +1,9 @@
"""
NGS indexes
"""
+import os
import logging
+
from metadata import MetadataElement
from images import Html
https://bitbucket.org/galaxy/galaxy-central/commits/c9b77dbc1316/
Changeset: c9b77dbc1316
User: dannon
Date: 2013-08-30 05:26:57
Summary: Data manager element loading assertion would fail if executed; use elem.tag and not root.tag
Affected #: 1 file
diff -r 7038567aedc2978d43386838441d3536a3d34650 -r c9b77dbc13165b020dfbe67b88faa99e5bf4d9fb lib/galaxy/tools/data_manager/manager.py
--- a/lib/galaxy/tools/data_manager/manager.py
+++ b/lib/galaxy/tools/data_manager/manager.py
@@ -2,7 +2,7 @@
pkg_resources.require( "simplejson" )
-import os, shutil, errno
+import os, errno
import simplejson
from galaxy import util
@@ -112,7 +112,7 @@
if elem is not None:
self.load_from_element( elem, tool_path or self.data_managers.tool_path )
def load_from_element( self, elem, tool_path ):
- assert elem.tag == 'data_manager', 'A data manager configuration must have a "data_manager" tag as the root. "%s" is present' % ( root.tag )
+ assert elem.tag == 'data_manager', 'A data manager configuration must have a "data_manager" tag as the root. "%s" is present' % ( elem.tag )
self.declared_id = elem.get( 'id', None )
self.guid = elem.get( 'guid', None )
path = elem.get( 'tool_file', None )
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0
5 new commits in galaxy-central:
https://bitbucket.org/galaxy/galaxy-central/commits/fcabfb819232/
Changeset: fcabfb819232
User: dannon
Date: 2013-08-30 05:06:53
Summary: Explicitly require source argument to RawBedDataProvider.get_iterator instead of relying on it in kwargs
Affected #: 1 file
diff -r facc879fe0543f25e6b4d65e3e5d5efe716ff455 -r fcabfb81923220651c0dd95181eed24d1b21ac68 lib/galaxy/visualization/data_providers/genome.py
--- a/lib/galaxy/visualization/data_providers/genome.py
+++ b/lib/galaxy/visualization/data_providers/genome.py
@@ -568,7 +568,7 @@
for large datasets.
"""
- def get_iterator( self, chrom=None, start=None, end=None, **kwargs ):
+ def get_iterator( self, source, chrom=None, start=None, end=None, **kwargs ):
# Read first line in order to match chrom naming format.
line = source.readline()
dataset_chrom = line.split()[0]
https://bitbucket.org/galaxy/galaxy-central/commits/3314e402ebaf/
Changeset: 3314e402ebaf
User: dannon
Date: 2013-08-30 05:10:02
Summary: Add missing import parse_gff_attributes to genome data provider
Affected #: 1 file
diff -r fcabfb81923220651c0dd95181eed24d1b21ac68 -r 3314e402ebaf326d57665615ba0e2e7b83dcc330 lib/galaxy/visualization/data_providers/genome.py
--- a/lib/galaxy/visualization/data_providers/genome.py
+++ b/lib/galaxy/visualization/data_providers/genome.py
@@ -9,7 +9,7 @@
pkg_resources.require( "pysam" )
pkg_resources.require( "numpy" )
import numpy
-from galaxy.datatypes.util.gff_util import GFFReaderWrapper, GFFInterval, GFFFeature, convert_gff_coords_to_bed
+from galaxy.datatypes.util.gff_util import convert_gff_coords_to_bed, GFFFeature, GFFInterval, GFFReaderWrapper, parse_gff_attributes
from galaxy.util.json import from_json_string
from bx.interval_index_file import Indexes
from bx.bbi.bigwig_file import BigWigFile
https://bitbucket.org/galaxy/galaxy-central/commits/4b86e65ee645/
Changeset: 4b86e65ee645
User: dannon
Date: 2013-08-30 05:14:17
Summary: Strip unused imports (and trailing whitespace) from genome data provider
Affected #: 1 file
diff -r 3314e402ebaf326d57665615ba0e2e7b83dcc330 -r 4b86e65ee645caa6b5923b05e759a06a9f06113f lib/galaxy/visualization/data_providers/genome.py
--- a/lib/galaxy/visualization/data_providers/genome.py
+++ b/lib/galaxy/visualization/data_providers/genome.py
@@ -3,7 +3,6 @@
"""
import os, sys, re
-from math import ceil, log
import pkg_resources
pkg_resources.require( "bx-python" )
pkg_resources.require( "pysam" )
@@ -14,7 +13,6 @@
from bx.interval_index_file import Indexes
from bx.bbi.bigwig_file import BigWigFile
from bx.bbi.bigbed_file import BigBedFile
-from galaxy.util.lrucache import LRUCache
from galaxy.visualization.data_providers.basic import BaseDataProvider
from galaxy.visualization.data_providers.cigar import get_ref_based_read_seq_and_cigar
from galaxy.datatypes.interval import Bed, Gff, Gtf
@@ -33,7 +31,7 @@
return None
else:
return float(n)
-
+
def get_bounds( reads, start_pos_index, end_pos_index ):
'''
Returns the minimum and maximum position for a set of reads.
@@ -76,7 +74,7 @@
line_len = int( textloc_file.readline() )
file_len = os.path.getsize( self.converted_dataset.file_name )
query = query.lower()
-
+
# Find query in file using binary search.
low = 0
high = file_len / line_len
@@ -91,42 +89,42 @@
low = mid + 1
else:
high = mid
-
+
position = low * line_len
-
+
# At right point in file, generate hits.
result = []
while True:
line = textloc_file.readline()
- if not line.startswith( query ):
+ if not line.startswith( query ):
break
- if line[ -1: ] == '\n':
+ if line[ -1: ] == '\n':
line = line[ :-1 ]
result.append( line.split()[1:] )
- textloc_file.close()
+ textloc_file.close()
return result
-
+
class GenomeDataProvider( BaseDataProvider ):
- """
- Base class for genome data providers. All genome providers use BED coordinate
+ """
+ Base class for genome data providers. All genome providers use BED coordinate
format (0-based, half-open coordinates) for both queries and returned data.
"""
dataset_type = None
-
- """
+
+ """
Mapping from column name to payload data; this mapping is used to create
- filters. Key is column name, value is a dict with mandatory key 'index' and
+ filters. Key is column name, value is a dict with mandatory key 'index' and
optional key 'name'. E.g. this defines column 4
col_name_data_attr_mapping = {4 : { index: 5, name: 'Score' } }
"""
col_name_data_attr_mapping = {}
-
+
def __init__( self, converted_dataset=None, original_dataset=None, dependencies=None,
error_max_vals="Only the first %i %s in this region are displayed." ):
- super( GenomeDataProvider, self ).__init__( converted_dataset=converted_dataset,
+ super( GenomeDataProvider, self ).__init__( converted_dataset=converted_dataset,
original_dataset=original_dataset,
dependencies=dependencies,
error_max_vals=error_max_vals )
@@ -135,44 +133,44 @@
# queries, such as is necessary for genome-wide data.
# TODO: add functions to (a) create data_file and (b) clean up data_file.
self.data_file = None
-
+
def write_data_to_file( self, regions, filename ):
"""
Write data in region defined by chrom, start, and end to a file.
"""
raise Exception( "Unimplemented Function" )
-
+
def valid_chroms( self ):
"""
Returns chroms/contigs that the dataset contains
"""
return None # by default
-
+
def has_data( self, chrom, start, end, **kwargs ):
"""
Returns true if dataset has data in the specified genome window, false
otherwise.
"""
raise Exception( "Unimplemented Function" )
-
+
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end
"""
raise Exception( "Unimplemented Function" )
-
+
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Process data from an iterator to a format that can be provided to client.
"""
- raise Exception( "Unimplemented Function" )
-
+ raise Exception( "Unimplemented Function" )
+
def get_data( self, chrom=None, low=None, high=None, start_val=0, max_vals=sys.maxint, **kwargs ):
- """
+ """
Returns data in region defined by chrom, start, and end. start_val and
- max_vals are used to denote the data to return: start_val is the first element to
+ max_vals are used to denote the data to return: start_val is the first element to
return and max_vals indicates the number of values to return.
-
+
Return value must be a dictionary with the following attributes:
dataset_type, data
"""
@@ -204,12 +202,12 @@
'dataset_type': self.dataset_type
}
-
+
def get_filters( self ):
- """
- Returns filters for provider's data. Return value is a list of
+ """
+ Returns filters for provider's data. Return value is a list of
filters; each filter is a dictionary with the keys 'name', 'index', 'type'.
- NOTE: This method uses the original dataset's datatype and metadata to
+ NOTE: This method uses the original dataset's datatype and metadata to
create the filters.
"""
# Get column names.
@@ -220,18 +218,18 @@
column_names = range( self.original_dataset.metadata.columns )
except: # Give up
return []
-
+
# Dataset must have column types; if not, cannot create filters.
try:
column_types = self.original_dataset.metadata.column_types
except AttributeError:
return []
-
+
# Create and return filters.
filters = []
if self.original_dataset.metadata.viz_filter_cols:
for viz_col_index in self.original_dataset.metadata.viz_filter_cols:
- # Some columns are optional, so can't assume that a filter
+ # Some columns are optional, so can't assume that a filter
# column is in dataset.
if viz_col_index >= len( column_names ):
continue;
@@ -248,7 +246,7 @@
def get_default_max_vals( self ):
return 5000
-
+
#
# -- Base mixins and providers --
#
@@ -256,26 +254,26 @@
class FilterableMixin:
def get_filters( self ):
""" Returns a dataset's filters. """
-
+
# is_ functions taken from Tabular.set_meta
def is_int( column_text ):
try:
int( column_text )
return True
- except:
+ except:
return False
def is_float( column_text ):
try:
float( column_text )
return True
- except:
+ except:
if column_text.strip().lower() == 'na':
return True #na is special cased to be a float
return False
-
+
#
# Get filters.
- # TODOs:
+ # TODOs:
# (a) might be useful to move this into each datatype's set_meta method;
# (b) could look at first N lines to ensure GTF attribute types are consistent.
#
@@ -284,9 +282,9 @@
filter_col = 8
if isinstance( self.original_dataset.datatype, Gff ):
# Can filter by score and GTF attributes.
- filters = [ { 'name': 'Score',
- 'type': 'number',
- 'index': filter_col,
+ filters = [ { 'name': 'Score',
+ 'type': 'number',
+ 'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c6' } ]
filter_col += 1
@@ -294,10 +292,10 @@
# Create filters based on dataset metadata.
for name, a_type in self.original_dataset.metadata.attribute_types.items():
if a_type in [ 'int', 'float' ]:
- filters.append(
+ filters.append(
{ 'name': name,
- 'type': 'number',
- 'index': filter_col,
+ 'type': 'number',
+ 'index': filter_col,
'tool_id': 'gff_filter_by_attribute',
'tool_exp_name': name } )
filter_col += 1
@@ -324,9 +322,9 @@
'''
elif isinstance( self.original_dataset.datatype, Bed ):
# Can filter by score column only.
- filters = [ { 'name': 'Score',
- 'type': 'number',
- 'index': filter_col,
+ filters = [ { 'name': 'Score',
+ 'type': 'number',
+ 'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c5'
} ]
@@ -340,19 +338,19 @@
"""
Tabix index data provider for the Galaxy track browser.
"""
-
+
col_name_data_attr_mapping = { 4 : { 'index': 4 , 'name' : 'Score' } }
-
+
def get_iterator( self, chrom, start, end, **kwargs ):
start, end = int(start), int(end)
if end >= (2<<29):
end = (2<<29 - 1) # Tabix-enforced maximum
-
+
bgzip_fname = self.dependencies['bgzip'].file_name
-
+
if not self.data_file:
self.data_file = ctabix.Tabixfile(bgzip_fname, index_filename=self.converted_dataset.file_name)
-
+
# Get iterator using either naming scheme.
iterator = iter( [] )
if chrom in self.data_file.contigs:
@@ -365,10 +363,10 @@
return iterator
-
+
def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
-
+
for region in regions:
# Write data in region.
chrom = region.chrom
@@ -377,7 +375,7 @@
iterator = self.get_iterator( chrom, start, end )
for line in iterator:
out.write( "%s\n" % line )
-
+
out.close()
#
@@ -389,20 +387,20 @@
"""
Processes interval data from native format to payload format.
-
+
Payload format: [ uid (offset), start, end, name, strand, thick_start, thick_end, blocks ]
"""
-
+
def get_iterator( self, chrom, start, end, **kwargs ):
raise Exception( "Unimplemented Function" )
-
+
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <strand> ]
- #
+ #
# First three entries are mandatory, others are optional.
#
filter_cols = from_json_string( kwargs.get( "filter_cols", "[]" ) )
@@ -421,7 +419,7 @@
if max_vals and count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "features" )
break
-
+
feature = line.split()
length = len(feature)
# Unique id is just a hash of the line
@@ -439,7 +437,7 @@
if not name_col: payload.append( "" )
payload.append( feature[strand_col] )
- # Score (filter data)
+ # Score (filter data)
if length >= 5 and filter_cols and filter_cols[0] == "Score":
try:
payload.append( float( feature[4] ) )
@@ -467,23 +465,23 @@
class BedDataProvider( GenomeDataProvider ):
"""
Processes BED data from native format to payload format.
-
+
Payload format: [ uid (offset), start, end, name, strand, thick_start, thick_end, blocks ]
"""
dataset_type = 'interval_index'
-
+
def get_iterator( self, chrom, start, end, **kwargs ):
raise Exception( "Unimplemented Method" )
-
+
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
# Build data to return. Payload format is:
- # [ <guid/offset>, <start>, <end>, <name>, <strand>, <thick_start>,
+ # [ <guid/offset>, <start>, <end>, <name>, <strand>, <thick_start>,
# <thick_end>, <blocks> ]
- #
+ #
# First three entries are mandatory, others are optional.
#
filter_cols = from_json_string( kwargs.get( "filter_cols", "[]" ) )
@@ -524,10 +522,10 @@
blocks = zip( block_sizes, block_starts )
payload.append( [ ( int(feature[1]) + block[1], int(feature[1]) + block[1] + block[0] ) for block in blocks ] )
- # Score (filter data)
+ # Score (filter data)
if length >= 5 and filter_cols and filter_cols[0] == "Score":
- # If dataset doesn't have name/strand/thick start/thick end/blocks,
- # add placeholders. There should be 8 entries if all attributes
+ # If dataset doesn't have name/strand/thick start/thick end/blocks,
+ # add placeholders. There should be 8 entries if all attributes
# are present.
payload.extend( [ None for i in range( 8 - len( payload ) ) ] )
@@ -542,7 +540,7 @@
def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
-
+
for region in regions:
# Write data in region.
chrom = region.chrom
@@ -551,15 +549,15 @@
iterator = self.get_iterator( chrom, start, end )
for line in iterator:
out.write( "%s\n" % line )
-
+
out.close()
-
+
class BedTabixDataProvider( TabixDataProvider, BedDataProvider ):
"""
Provides data from a BED file indexed via tabix.
"""
pass
-
+
class RawBedDataProvider( BedDataProvider ):
"""
Provide data from BED file.
@@ -590,7 +588,7 @@
or ( end is not None and feature_end < start ):
continue
yield line
-
+
return line_filter_iter()
#
@@ -601,10 +599,10 @@
"""
Abstract class that processes VCF data from native format to payload format.
- Payload format: An array of entries for each locus in the file. Each array
+ Payload format: An array of entries for each locus in the file. Each array
has the following entries:
1. GUID (unused)
- 2. location (0-based)
+ 2. location (0-based)
3. reference base(s)
4. alternative base(s)
5. quality score
@@ -613,20 +611,20 @@
denotes the reference genotype
8-end: allele counts for each alternative
"""
-
+
col_name_data_attr_mapping = { 'Qual' : { 'index': 6 , 'name' : 'Qual' } }
dataset_type = 'variant'
-
+
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Returns a dict with the following attributes::
- data - a list of variants with the format
+ data - a list of variants with the format
.. raw:: text
- [<guid>, <start>, <end>, <name>, cigar, seq]
+ [<guid>, <start>, <end>, <name>, cigar, seq]
message - error/informative message
@@ -636,8 +634,8 @@
def get_mapping( ref, alt ):
"""
- Returns ( offset, new_seq, cigar ) tuple that defines mapping of
- alt to ref. Cigar format is an array of [ op_index, length ] pairs
+ Returns ( offset, new_seq, cigar ) tuple that defines mapping of
+ alt to ref. Cigar format is an array of [ op_index, length ] pairs
where op_index is the 0-based index into the string "MIDNSHP=X"
"""
@@ -676,7 +674,7 @@
samples_data = feature [ 9: ]
# VCF is 1-based.
pos = int( pos ) - 1
-
+
# FIXME: OK to skip?
if alt == '.':
count -= 1
@@ -707,7 +705,7 @@
has_alleles = True
except ValueError:
pass
-
+
# If no alleles, use empty string as proxy.
if not has_alleles:
genotype = ''
@@ -732,7 +730,7 @@
def write_data_to_file( self, regions, filename ):
out = open( filename, "w" )
-
+
for region in regions:
# Write data in region.
chrom = region.chrom
@@ -747,7 +745,7 @@
"""
Provides data from a VCF file indexed via tabix.
"""
-
+
dataset_type = 'variant'
class RawVcfDataProvider( VcfDataProvider ):
@@ -797,17 +795,17 @@
for data_line in source:
if line_in_region( data_line, chrom, start, end ):
yield data_line
-
+
return line_filter_iter()
class BamDataProvider( GenomeDataProvider, FilterableMixin ):
"""
- Provides access to intervals from a sorted indexed BAM file. Coordinate
+ Provides access to intervals from a sorted indexed BAM file. Coordinate
data is reported in BED format: 0-based, half-open.
"""
dataset_type = 'bai'
-
+
def get_filters( self ):
"""
Returns filters for dataset.
@@ -815,31 +813,31 @@
# HACK: first 7 fields are for drawing, so start filter column index at 7.
filter_col = 7
filters = []
- filters.append( { 'name': 'Mapping Quality',
- 'type': 'number',
+ filters.append( { 'name': 'Mapping Quality',
+ 'type': 'number',
'index': filter_col
} )
return filters
-
-
+
+
def write_data_to_file( self, regions, filename ):
"""
Write reads in regions to file.
"""
-
+
# Open current BAM file using index.
bamfile = csamtools.Samfile( filename=self.original_dataset.file_name, mode='rb', \
index_filename=self.converted_dataset.file_name )
# TODO: write headers as well?
new_bamfile = csamtools.Samfile( template=bamfile, filename=filename, mode='wb' )
-
+
for region in regions:
# Write data from region.
chrom = region.chrom
start = region.start
end = region.end
-
+
try:
data = bamfile.fetch(start=start, end=end, reference=chrom)
except ValueError, e:
@@ -853,11 +851,11 @@
# Write reads in region.
for i, read in enumerate( data ):
new_bamfile.write( read )
-
+
# Cleanup.
new_bamfile.close()
bamfile.close()
-
+
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end
@@ -865,7 +863,7 @@
start, end = int( start ), int( end )
orig_data_filename = self.original_dataset.file_name
index_filename = self.converted_dataset.file_name
-
+
# Attempt to open the BAM file with index
bamfile = csamtools.Samfile( filename=orig_data_filename, mode='rb', index_filename=index_filename )
try:
@@ -878,12 +876,12 @@
except ValueError:
return None
return data
-
+
def process_data( self, iterator, start_val=0, max_vals=None, ref_seq=None, start=0, **kwargs ):
"""
Returns a dict with the following attributes::
- data - a list of reads with the format
+ data - a list of reads with the format
[<guid>, <start>, <end>, <name>, <read_1>, <read_2>, [empty], <mapq_scores>]
where <read_1> has the format
@@ -895,10 +893,10 @@
Field 7 is empty so that mapq scores' location matches that in single-end reads.
For single-end reads, read has format:
[<guid>, <start>, <end>, <name>, <cigar>, <strand>, <seq>, <mapq_score>]
-
+
NOTE: read end and sequence data are not valid for reads outside of
requested region and should not be used.
-
+
max_low - lowest coordinate for the returned reads
max_high - highest coordinate for the returned reads
message - error/informative message
@@ -919,7 +917,7 @@
return "+"
else:
return "-"
-
+
#
# Encode reads as list of lists.
#
@@ -933,13 +931,13 @@
if ( count - start_val - unmapped ) >= max_vals:
message = self.error_max_vals % ( max_vals, "reads" )
break
-
+
# If not mapped, skip read.
is_mapped = ( read.flag & 0x0004 == 0 )
if not is_mapped:
unmapped += 1
continue
-
+
qname = read.qname
seq = read.seq
strand = decode_strand( read.flag, 0x0010 )
@@ -951,11 +949,11 @@
if read.is_proper_pair:
if qname in paired_pending: # one in dict is always first
pair = paired_pending[qname]
- results.append( [ "%i_%s" % ( pair['start'], qname ),
- pair['start'],
- read.pos + read_len,
- qname,
- [ pair['start'], pair['end'], pair['cigar'], pair['strand'], pair['seq'] ],
+ results.append( [ "%i_%s" % ( pair['start'], qname ),
+ pair['start'],
+ read.pos + read_len,
+ qname,
+ [ pair['start'], pair['end'], pair['cigar'], pair['strand'], pair['seq'] ],
[ read.pos, read.pos + read_len, read.cigar, strand, seq ],
None, [ pair['mapq'], read.mapq ]
] )
@@ -964,10 +962,10 @@
paired_pending[qname] = { 'start': read.pos, 'end': read.pos + read_len, 'seq': seq, 'mate_start': read.mpos,
'rlen': read_len, 'strand': strand, 'cigar': read.cigar, 'mapq': read.mapq }
else:
- results.append( [ "%i_%s" % ( read.pos, qname ),
- read.pos, read.pos + read_len, qname,
+ results.append( [ "%i_%s" % ( read.pos, qname ),
+ read.pos, read.pos + read_len, qname,
read.cigar, strand, read.seq, read.mapq ] )
-
+
# Take care of reads whose mates are out of range.
# TODO: count paired reads when adhering to max_vals?
for qname, read in paired_pending.iteritems():
@@ -989,7 +987,7 @@
r2 = [ read['mate_start'], read['mate_start'] ]
results.append( [ "%i_%s" % ( read_start, qname ), read_start, read_end, qname, r1, r2, [read[ 'mapq' ], 125] ] )
-
+
# Clean up. TODO: is this needed? If so, we'll need a cleanup function after processing the data.
# bamfile.close()
@@ -999,10 +997,10 @@
'''
Process a read using the designated fields.
'''
- read_seq, read_cigar = get_ref_based_read_seq_and_cigar( read[ seq_field ].upper(),
- read[ start_field ],
- ref_seq,
- start,
+ read_seq, read_cigar = get_ref_based_read_seq_and_cigar( read[ seq_field ].upper(),
+ read[ start_field ],
+ ref_seq,
+ start,
read[ cigar_field ] )
read[ seq_field ] = read_seq
read[ cigar_field ] = read_cigar
@@ -1012,7 +1010,7 @@
Process single-end read.
'''
process_read( read, 1, 4, 6)
-
+
def process_pe_read( read ):
'''
Process paired-end read.
@@ -1034,28 +1032,28 @@
process_se_read( read )
max_low, max_high = get_bounds( results, 1, 2 )
-
+
return { 'data': results, 'message': message, 'max_low': max_low, 'max_high': max_high }
-
+
class SamDataProvider( BamDataProvider ):
dataset_type = 'bai'
-
+
def __init__( self, converted_dataset=None, original_dataset=None, dependencies=None ):
""" Create SamDataProvider. """
super( SamDataProvider, self ).__init__( converted_dataset=converted_dataset,
original_dataset=original_dataset,
dependencies=dependencies )
-
- # To use BamDataProvider, original dataset must be BAM and
+
+ # To use BamDataProvider, original dataset must be BAM and
# converted dataset must be BAI. Use BAI from BAM metadata.
if converted_dataset:
self.original_dataset = converted_dataset
self.converted_dataset = converted_dataset.metadata.bam_index
-
+
class BBIDataProvider( GenomeDataProvider ):
"""
- BBI data provider for the Galaxy track browser.
+ BBI data provider for the Galaxy track browser.
"""
dataset_type = 'bigwig'
@@ -1063,7 +1061,7 @@
def valid_chroms( self ):
# No way to return this info as of now
return None
-
+
def has_data( self, chrom ):
f, bbi = self._get_dataset()
all_dat = bbi.query( chrom, 0, 2147483647, 1 ) or \
@@ -1081,18 +1079,18 @@
return bbi.summarize( chrom, start, end, num_points ) or \
bbi.summarize( _convert_between_ucsc_and_ensemble_naming( chrom ) , start, end, num_points )
- # Bigwig can be a standalone bigwig file, in which case we use
- # original_dataset, or coming from wig->bigwig conversion in
+ # Bigwig can be a standalone bigwig file, in which case we use
+ # original_dataset, or coming from wig->bigwig conversion in
# which we use converted_dataset
f, bbi = self._get_dataset()
-
+
# If stats requested, compute overall summary data for the range
- # start:endbut no reduced data. This is currently used by client
+ # start:endbut no reduced data. This is currently used by client
# to determine the default range.
if 'stats' in kwargs:
summary = _summarize_bbi( bbi, chrom, start, end, 1 )
f.close()
-
+
min_val = 0
max_val = 0
mean = 0
@@ -1127,12 +1125,12 @@
summary = _summarize_bbi( bbi, chrom, start, end, num_points )
if summary:
#mean = summary.sum_data / summary.valid_count
-
+
## Standard deviation by bin, not yet used
## var = summary.sum_squares - mean
## var /= minimum( valid_count - 1, 1 )
## sd = sqrt( var )
-
+
pos = start
step_size = (end - start) / num_points
@@ -1150,34 +1148,34 @@
num_points = end - start + 1
end += 1
else:
- #
- # The goal is to sample the region between start and end uniformly
- # using ~N (num_samples) data points. The challenge is that the size of
- # sampled intervals rarely is full bases, so sampling using N points
- # will leave the end of the region unsampled due to remainders for
- # each interval. To recitify this, a new N is calculated based on the
+ #
+ # The goal is to sample the region between start and end uniformly
+ # using ~N (num_samples) data points. The challenge is that the size of
+ # sampled intervals rarely is full bases, so sampling using N points
+ # will leave the end of the region unsampled due to remainders for
+ # each interval. To recitify this, a new N is calculated based on the
# step size that covers as much of the region as possible.
#
- # However, this still leaves some of the region unsampled. This
- # could be addressed by repeatedly sampling remainder using a
- # smaller and smaller step_size, but that would require iteratively
+ # However, this still leaves some of the region unsampled. This
+ # could be addressed by repeatedly sampling remainder using a
+ # smaller and smaller step_size, but that would require iteratively
# going to BBI, which could be time consuming.
#
# Start with N samples.
num_points = num_samples
step_size = ( end - start ) / num_points
- # Add additional points to sample in the remainder not covered by
+ # Add additional points to sample in the remainder not covered by
# the initial N samples.
remainder_start = start + step_size * num_points
additional_points = ( end - remainder_start ) / step_size
num_points += additional_points
-
+
result = summarize_region( bbi, chrom, start, end, num_points )
-
+
# Cleanup and return.
f.close()
- return {
+ return {
'data': result,
'dataset_type': self.dataset_type
}
@@ -1190,7 +1188,7 @@
class BigWigDataProvider ( BBIDataProvider ):
"""
- Provides data from BigWig files; position data is reported in 1-based
+ Provides data from BigWig files; position data is reported in 1-based
coordinate system, i.e. wiggle format.
"""
def _get_dataset( self ):
@@ -1199,7 +1197,7 @@
else:
f = open( self.original_dataset.file_name )
return f, BigWigFile(file=f)
-
+
class IntervalIndexDataProvider( FilterableMixin, GenomeDataProvider ):
"""
Interval index files used for GFF, Pileup files.
@@ -1207,7 +1205,7 @@
col_name_data_attr_mapping = { 4 : { 'index': 4 , 'name' : 'Score' } }
dataset_type = 'interval_index'
-
+
def write_data_to_file( self, regions, filename ):
source = open( self.original_dataset.file_name )
index = Indexes( self.converted_dataset.file_name )
@@ -1230,10 +1228,10 @@
feature = reader.next()
for interval in feature.intervals:
out.write( '\t'.join( interval.fields ) + '\n' )
-
+
source.close()
out.close()
-
+
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an array with values: (a) source file and (b) an iterator that
@@ -1246,7 +1244,7 @@
if chrom not in index.indexes:
# Try alternative naming.
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
-
+
return index.find(chrom, start, end)
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
@@ -1258,7 +1256,7 @@
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <score>, <strand>, <thick_start>,
# <thick_end>, <blocks> ]
- #
+ #
# First three entries are mandatory, others are optional.
#
filter_cols = from_json_string( kwargs.get( "filter_cols", "[]" ) )
@@ -1272,7 +1270,7 @@
break
source.seek( offset )
# TODO: can we use column metadata to fill out payload?
-
+
# GFF dataset.
reader = GFFReaderWrapper( source, fix_strand=True )
feature = reader.next()
@@ -1286,13 +1284,13 @@
class RawGFFDataProvider( GenomeDataProvider ):
"""
Provide data from GFF file that has not been indexed.
-
+
NOTE: this data provider does not use indices, and hence will be very slow
for large datasets.
"""
dataset_type = 'interval_index'
-
+
def get_iterator( self, chrom, start, end, **kwargs ):
"""
Returns an iterator that provides data in the region chrom:start-end as well as
@@ -1302,18 +1300,18 @@
# Read first line in order to match chrom naming format.
line = source.readline()
-
+
# If line empty, assume file is empty and return empty iterator.
if len( line ) == 0:
return iter([])
-
+
# Determine chromosome naming format.
dataset_chrom = line.split()[0]
if not _chrom_naming_matches( chrom, dataset_chrom ):
chrom = _convert_between_ucsc_and_ensemble_naming( chrom )
# Undo read.
source.seek( 0 )
-
+
def features_in_region_iter():
offset = 0
for feature in GFFReaderWrapper( source, fix_strand=True ):
@@ -1324,7 +1322,7 @@
offset += feature.raw_size
return features_in_region_iter()
-
+
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Process data from an iterator to a format that can be provided to client.
@@ -1340,22 +1338,22 @@
if count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "reads" )
break
-
+
payload = package_gff_feature( feature, no_detail=no_detail, filter_cols=filter_cols )
payload.insert( 0, offset )
results.append( payload )
-
+
return { 'data': results, 'dataset_type': self.dataset_type, 'message': message }
-
+
class GtfTabixDataProvider( TabixDataProvider ):
"""
Returns data from GTF datasets that are indexed via tabix.
"""
-
+
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
# Loop through lines and group by transcript_id; each group is a feature.
-
+
# TODO: extend this code or use code in gff_util to process GFF/3 as well
# and then create a generic GFFDataProvider that can be used with both
# raw and tabix datasets.
@@ -1369,7 +1367,7 @@
feature = []
features[ transcript_id ] = feature
feature.append( GFFInterval( None, line.split( '\t') ) )
-
+
# Process data.
filter_cols = from_json_string( kwargs.get( "filter_cols", "[]" ) )
no_detail = ( "no_detail" in kwargs )
@@ -1382,12 +1380,12 @@
if count-start_val >= max_vals:
message = self.error_max_vals % ( max_vals, "reads" )
break
-
- feature = GFFFeature( None, intervals=intervals )
+
+ feature = GFFFeature( None, intervals=intervals )
payload = package_gff_feature( feature, no_detail=no_detail, filter_cols=filter_cols )
payload.insert( 0, feature.intervals[ 0 ].attributes[ 'transcript_id' ] )
results.append( payload )
-
+
return { 'data': results, 'message': message }
#
@@ -1397,26 +1395,26 @@
class ENCODEPeakDataProvider( GenomeDataProvider ):
"""
Abstract class that processes ENCODEPeak data from native format to payload format.
-
+
Payload format: [ uid (offset), start, end, name, strand, thick_start, thick_end, blocks ]
"""
-
+
def get_iterator( self, chrom, start, end, **kwargs ):
raise "Unimplemented Method"
-
+
def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ):
"""
Provides
"""
-
+
## FIXMEs:
# (1) should be able to unify some of this code with BedDataProvider.process_data
# (2) are optional number of parameters supported?
-
+
# Build data to return. Payload format is:
# [ <guid/offset>, <start>, <end>, <name>, <strand>, <thick_start>,
# <thick_end>, <blocks> ]
- #
+ #
# First three entries are mandatory, others are optional.
#
no_detail = ( "no_detail" in kwargs )
@@ -1431,16 +1429,16 @@
feature = line.split()
length = len( feature )
-
+
# Feature initialization.
payload = [
# GUID is just a hash of the line
hash( line ),
# Add start, end.
- int( feature[1] ),
+ int( feature[1] ),
int( feature[2] )
]
-
+
if no_detail:
rval.append( payload )
continue
@@ -1448,7 +1446,7 @@
# Extend with additional data.
payload.extend( [
# Add name, strand.
- feature[3],
+ feature[3],
feature[5],
# Thick start, end are feature start, end for now.
int( feature[1] ),
@@ -1465,12 +1463,12 @@
rval.append( payload )
return { 'data': rval, 'message': message }
-
+
class ENCODEPeakTabixDataProvider( TabixDataProvider, ENCODEPeakDataProvider ):
"""
Provides data from an ENCODEPeak dataset indexed via tabix.
"""
-
+
def get_filters( self ):
"""
Returns filters for dataset.
@@ -1478,26 +1476,26 @@
# HACK: first 8 fields are for drawing, so start filter column index at 9.
filter_col = 8
filters = []
- filters.append( { 'name': 'Score',
- 'type': 'number',
+ filters.append( { 'name': 'Score',
+ 'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c6' } )
filter_col += 1
- filters.append( { 'name': 'Signal Value',
- 'type': 'number',
+ filters.append( { 'name': 'Signal Value',
+ 'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c7' } )
filter_col += 1
- filters.append( { 'name': 'pValue',
- 'type': 'number',
+ filters.append( { 'name': 'pValue',
+ 'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c8' } )
filter_col += 1
- filters.append( { 'name': 'qValue',
- 'type': 'number',
+ filters.append( { 'name': 'qValue',
+ 'type': 'number',
'index': filter_col,
'tool_id': 'Filter1',
'tool_exp_name': 'c9' } )
@@ -1523,7 +1521,7 @@
feature = line.split()
length = len( feature )
-
+
s1 = int( feature[1] )
e1 = int( feature[2] )
c = feature[3]
@@ -1538,14 +1536,14 @@
# Add start1, end1, chr2, start2, end2, value.
s1, e1, c, s2, e2, v
]
-
+
rval.append( payload )
return { 'data': rval, 'message': message }
def get_default_max_vals( self ):
return 100000;
-
+
class ChromatinInteractionsTabixDataProvider( TabixDataProvider, ChromatinInteractionsDataProvider ):
def get_iterator( self, chrom, start=0, end=sys.maxint, interchromosomal=False, **kwargs ):
"""
@@ -1556,7 +1554,7 @@
def filter( iter ):
for line in iter:
feature = line.split()
- s1 = int( feature[1] )
+ s1 = int( feature[1] )
e1 = int( feature[2] )
c = feature[3]
s2 = int( feature[4] )
@@ -1568,22 +1566,22 @@
if interchromosomal and c != chrom:
yield line
return filter( TabixDataProvider.get_iterator( self, chrom, filter_start, end ) )
-
-#
+
+#
# -- Helper methods. --
#
def package_gff_feature( feature, no_detail=False, filter_cols=[] ):
""" Package a GFF feature in an array for data providers. """
feature = convert_gff_coords_to_bed( feature )
-
+
# No detail means only start, end.
if no_detail:
return [ feature.start, feature.end ]
-
+
# Return full feature.
- payload = [ feature.start,
- feature.end,
+ payload = [ feature.start,
+ feature.end,
feature.name(),
feature.strand,
# No notion of thick start, end in GFF, so make everything
@@ -1591,9 +1589,9 @@
feature.start,
feature.end
]
-
- # HACK: ignore interval with name 'transcript' from feature.
- # Cufflinks puts this interval in each of its transcripts,
+
+ # HACK: ignore interval with name 'transcript' from feature.
+ # Cufflinks puts this interval in each of its transcripts,
# and they mess up trackster by covering the feature's blocks.
# This interval will always be a feature's first interval,
# and the GFF's third column is its feature name.
@@ -1605,7 +1603,7 @@
block_starts = [ ( interval.start - feature.start ) for interval in feature_intervals ]
blocks = zip( block_sizes, block_starts )
payload.append( [ ( feature.start + block[1], feature.start + block[1] + block[0] ) for block in blocks ] )
-
+
# Add filter data to payload.
for col in filter_cols:
if col == "Score":
https://bitbucket.org/galaxy/galaxy-central/commits/d2a624fd6dc2/
Changeset: d2a624fd6dc2
User: dannon
Date: 2013-08-30 05:17:36
Summary: dataprovider dataset cleanup, add missing bx.bbi import
Affected #: 1 file
diff -r 4b86e65ee645caa6b5923b05e759a06a9f06113f -r d2a624fd6dc2fecdc319848f3d35c2f4b66a389e lib/galaxy/datatypes/dataproviders/dataset.py
--- a/lib/galaxy/datatypes/dataproviders/dataset.py
+++ b/lib/galaxy/datatypes/dataproviders/dataset.py
@@ -6,19 +6,18 @@
(e.g. parsing genomic regions from their source)
"""
-from galaxy import eggs
-import pkg_resources
-pkg_resources.require( 'bx-python' )
-from bx import seq as bx_seq
-from bx import wiggle as bx_wig
-
-import exceptions
import base
import line
import column
import external
+from galaxy import eggs
+eggs.require( 'bx-python' )
+from bx import seq as bx_seq
+from bx import wiggle as bx_wig
+from bx import bbi as bx_bbi
+
_TODO = """
use bx as much as possible
gff3 hierarchies
https://bitbucket.org/galaxy/galaxy-central/commits/068acf051f9a/
Changeset: 068acf051f9a
User: dannon
Date: 2013-08-30 05:20:05
Summary: Variable confusion in dataproviders/dataset -- clarify and use correct indices var
Affected #: 1 file
diff -r d2a624fd6dc2fecdc319848f3d35c2f4b66a389e -r 068acf051f9acfb8058f2bc50b0361d9a59d8cdb lib/galaxy/datatypes/dataproviders/dataset.py
--- a/lib/galaxy/datatypes/dataproviders/dataset.py
+++ b/lib/galaxy/datatypes/dataproviders/dataset.py
@@ -145,10 +145,10 @@
:returns: list of column indeces for the named columns.
"""
region_column_names = ( 'chromCol', 'startCol', 'endCol' )
- region_indeces = [ self.get_metadata_column_index_by_name( name ) for name in region_column_names ]
- if check and not all( map( lambda i: i != None, indeces ) ):
- raise ValueError( "Could not determine proper column indeces for chrom, start, end: %s" %( str( indeces ) ) )
- return region_indeces
+ region_indices = [ self.get_metadata_column_index_by_name( name ) for name in region_column_names ]
+ if check and not all( map( lambda i: i != None, region_indices) ):
+ raise ValueError( "Could not determine proper column indices for chrom, start, end: %s" %( str( region_indices ) ) )
+ return region_indices
class ConvertedDatasetDataProvider( DatasetDataProvider ):
Repository URL: https://bitbucket.org/galaxy/galaxy-central/
--
This is a commit notification from bitbucket.org. You are receiving
this because you have the service enabled, addressing the recipient of
this email.
1
0