commit/galaxy-central: clements: Modified docstrings so that Sphinx would not complain about them. However, I couldn't get Sphinx to be happy with all docstrings, so we are still getting 10 warnings (down from over 70 though).
1 new commit in galaxy-central: https://bitbucket.org/galaxy/galaxy-central/changeset/ec51a727a497/ changeset: ec51a727a497 user: clements date: 2012-11-02 07:25:39 summary: Modified docstrings so that Sphinx would not complain about them. However, I couldn't get Sphinx to be happy with all docstrings, so we are still getting 10 warnings (down from over 70 though). affected #: 25 files diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/datatypes/converters/fastq_to_fqtoc.py --- a/lib/galaxy/datatypes/converters/fastq_to_fqtoc.py +++ b/lib/galaxy/datatypes/converters/fastq_to_fqtoc.py @@ -6,11 +6,13 @@ def main(): """ - The format of the file is JSON: - { "sections" : [ - { "start" : "x", "end" : "y", "sequences" : "z" }, - ... - ]} + The format of the file is JSON:: + + { "sections" : [ + { "start" : "x", "end" : "y", "sequences" : "z" }, + ... + ]} + This works only for UNCOMPRESSED fastq files. The Python GzipFile does not provide seekable offsets via tell(), so clients just have to split the slow way """ diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/datatypes/converters/interval_to_fli.py --- a/lib/galaxy/datatypes/converters/interval_to_fli.py +++ b/lib/galaxy/datatypes/converters/interval_to_fli.py @@ -1,12 +1,16 @@ ''' Creates a feature location index (FLI) for a given BED/GFF file. -FLI index has the form: +FLI index has the form:: + [line_length] <symbol1_in_lowercase><tab><symbol1><tab><location><symbol2_in_lowercase><tab><symbol2><tab><location> ... + where location is formatted as: + contig:start-end + and symbols are sorted in lexigraphical order. ''' @@ -94,4 +98,4 @@ out.close() if __name__ == '__main__': - main() \ No newline at end of file + main() diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py --- a/lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py +++ b/lib/galaxy/datatypes/converters/pbed_ldreduced_converter.py @@ -78,10 +78,14 @@ """ need to work with rgenetics composite datatypes so in and out are html files with data in extrafiles path - <command interpreter="python"> - pbed_ldreduced_converter.py '$input1.extra_files_path/$input1.metadata.base_name' '$winsize' '$winmove' '$r2thresh' - '$output1' '$output1.files_path' 'plink' - </command> + + .. raw:: xml + + <command interpreter="python"> + pbed_ldreduced_converter.py '$input1.extra_files_path/$input1.metadata.base_name' '$winsize' '$winmove' '$r2thresh' + '$output1' '$output1.files_path' 'plink' + </command> + """ nparm = 7 if len(sys.argv) < nparm: diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/datatypes/data.py --- a/lib/galaxy/datatypes/data.py +++ b/lib/galaxy/datatypes/data.py @@ -800,11 +800,12 @@ def get_file_peek( file_name, is_multi_byte=False, WIDTH=256, LINE_COUNT=5, skipchars=[] ): """ - Returns the first LINE_COUNT lines wrapped to WIDTH:: + Returns the first LINE_COUNT lines wrapped to WIDTH - ## >>> fname = get_test_fname('4.bed') - ## >>> get_file_peek(fname) - ## 'chr22 30128507 31828507 uc003bnx.1_cds_2_0_chr22_29227_f 0 +\n' + ## >>> fname = get_test_fname('4.bed') + ## >>> get_file_peek(fname) + ## 'chr22 30128507 31828507 uc003bnx.1_cds_2_0_chr22_29227_f 0 +\n' + """ # Set size for file.readline() to a negative number to force it to # read until either a newline or EOF. Needed for datasets with very diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/datatypes/tabular.py --- a/lib/galaxy/datatypes/tabular.py +++ b/lib/galaxy/datatypes/tabular.py @@ -46,6 +46,7 @@ process all data lines. Items of interest: + 1. We treat 'overwrite' as always True (we always want to set tabular metadata when called). 2. If a tabular file has no data, it will have one column of type 'str'. 3. We used to check only the first 100 lines when setting metadata and this class's @@ -356,15 +357,18 @@ Determines whether the file is in SAM format A file in SAM format consists of lines of tab-separated data. - The following header line may be the first line: - @QNAME FLAG RNAME POS MAPQ CIGAR MRNM MPOS ISIZE SEQ QUAL - or - @QNAME FLAG RNAME POS MAPQ CIGAR MRNM MPOS ISIZE SEQ QUAL OPT + The following header line may be the first line:: + + @QNAME FLAG RNAME POS MAPQ CIGAR MRNM MPOS ISIZE SEQ QUAL + or + @QNAME FLAG RNAME POS MAPQ CIGAR MRNM MPOS ISIZE SEQ QUAL OPT + Data in the OPT column is optional and can consist of tab-separated data For complete details see http://samtools.sourceforge.net/SAM1.pdf - Rules for sniffing as True: + Rules for sniffing as True:: + There must be 11 or more columns of data on each line Columns 2 (FLAG), 4(POS), 5 (MAPQ), 8 (MPOS), and 9 (ISIZE) must be numbers (9 can be negative) We will only check that up to the first 5 alignments are correctly formatted. @@ -579,10 +583,11 @@ A file in ELAND export format consists of lines of tab-separated data. There is no header. - Rules for sniffing as True: - There must be 22 columns on each line - LANE, TILEm X, Y, INDEX, READ_NO, SEQ, QUAL, POSITION, *STRAND, FILT must be correct - We will only check that up to the first 5 alignments are correctly formatted. + Rules for sniffing as True:: + + - There must be 22 columns on each line + - LANE, TILEm X, Y, INDEX, READ_NO, SEQ, QUAL, POSITION, *STRAND, FILT must be correct + - We will only check that up to the first 5 alignments are correctly formatted. """ import gzip try: diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/objectstore/__init__.py --- a/lib/galaxy/objectstore/__init__.py +++ b/lib/galaxy/objectstore/__init__.py @@ -51,6 +51,7 @@ store, False otherwise. FIELD DESCRIPTIONS (these apply to all the methods in this class): + :type obj: object :param obj: A Galaxy object with an assigned database ID accessible via the .id attribute. @@ -118,6 +119,7 @@ """ Deletes the object identified by `obj`. See `exists` method for the description of other fields. + :type entire_dir: bool :param entire_dir: If True, delete the entire directory pointed to by extra_dir. For safety reasons, this option applies diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/quota/__init__.py --- a/lib/galaxy/quota/__init__.py +++ b/lib/galaxy/quota/__init__.py @@ -41,6 +41,7 @@ def get_quota( self, user, nice_size=False ): """ Calculated like so: + 1. Anonymous users get the default quota. 2. Logged in users start with the highest of their associated '=' quotas or the default quota, if there are no associated '=' diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/security/__init__.py --- a/lib/galaxy/security/__init__.py +++ b/lib/galaxy/security/__init__.py @@ -173,17 +173,21 @@ which the request is sent. We cannot use trans.user_is_admin() because the controller is what is important since admin users do not necessarily have permission to do things on items outside of the admin view. + If cntrller is from the admin side ( e.g., library_admin ): - -if item is public, all roles, including private roles, are legitimate. - -if item is restricted, legitimate roles are derived from the users and groups associated - with each role that is associated with the access permission ( i.e., DATASET_MANAGE_PERMISSIONS or - LIBRARY_MANAGE ) on item. Legitimate roles will include private roles. + + - if item is public, all roles, including private roles, are legitimate. + - if item is restricted, legitimate roles are derived from the users and groups associated + with each role that is associated with the access permission ( i.e., DATASET_MANAGE_PERMISSIONS or + LIBRARY_MANAGE ) on item. Legitimate roles will include private roles. + If cntrller is not from the admin side ( e.g., root, library ): - -if item is public, all non-private roles, except for the current user's private role, - are legitimate. - -if item is restricted, legitimate roles are derived from the users and groups associated - with each role that is associated with the access permission on item. Private roles, except - for the current user's private role, will be excluded. + + - if item is public, all non-private roles, except for the current user's private role, + are legitimate. + - if item is restricted, legitimate roles are derived from the users and groups associated + with each role that is associated with the access permission on item. Private roles, except + for the current user's private role, will be excluded. """ admin_controller = cntrller in [ 'library_admin' ] roles = set() @@ -1063,9 +1067,10 @@ comma-separated string of folder ids. This method works with the show_library_item() method below, and it returns libraries for which the received user has permission to perform the received actions. Here is an example call to this method to return all - libraries for which the received user has LIBRARY_ADD permission: - libraries = trans.app.security_agent.get_permitted_libraries( trans, user, - [ trans.app.security_agent.permitted_actions.LIBRARY_ADD ] ) + libraries for which the received user has LIBRARY_ADD permission:: + + libraries = trans.app.security_agent.get_permitted_libraries( trans, user, + [ trans.app.security_agent.permitted_actions.LIBRARY_ADD ] ) """ all_libraries = trans.sa_session.query( trans.app.model.Library ) \ .filter( trans.app.model.Library.table.c.deleted == False ) \ diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/tool_shed/tool_dependencies/install_util.py --- a/lib/galaxy/tool_shed/tool_dependencies/install_util.py +++ b/lib/galaxy/tool_shed/tool_dependencies/install_util.py @@ -250,10 +250,11 @@ def set_environment( app, elem, tool_shed_repository ): """ Create a ToolDependency to set an environment variable. This is different from the process used to set an environment variable that is associated - with a package. An example entry in a tool_dependencies.xml file is: - <set_environment version="1.0"> - <environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable> - </set_environment> + with a package. An example entry in a tool_dependencies.xml file is:: + + <set_environment version="1.0"> + <environment_variable name="R_SCRIPT_PATH" action="set_to">$REPOSITORY_INSTALL_DIR</environment_variable> + </set_environment> """ sa_session = app.model.context.current tool_dependency = None diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/tools/__init__.py --- a/lib/galaxy/tools/__init__.py +++ b/lib/galaxy/tools/__init__.py @@ -112,16 +112,20 @@ def init_tools( self, config_filename ): """ Read the configuration file and load each tool. The following tags are currently supported: - <toolbox> - <tool file="data_source/upload.xml"/> # tools outside sections - <label text="Basic Tools" id="basic_tools" /> # labels outside sections - <workflow id="529fd61ab1c6cc36" /> # workflows outside sections - <section name="Get Data" id="getext"> # sections - <tool file="data_source/biomart.xml" /> # tools inside sections - <label text="In Section" id="in_section" /> # labels inside sections - <workflow id="adb5f5c93f827949" /> # workflows inside sections - </section> - </toolbox> + + .. raw:: xml + + <toolbox> + <tool file="data_source/upload.xml"/> # tools outside sections + <label text="Basic Tools" id="basic_tools" /> # labels outside sections + <workflow id="529fd61ab1c6cc36" /> # workflows outside sections + <section name="Get Data" id="getext"> # sections + <tool file="data_source/biomart.xml" /> # tools inside sections + <label text="In Section" id="in_section" /> # labels inside sections + <workflow id="adb5f5c93f827949" /> # workflows inside sections + </section> + </toolbox> + """ if self.app.config.get_bool( 'enable_tool_tags', False ): log.info("removing all tool tag associations (" + str( self.sa_session.query( self.app.model.ToolTagAssociation ).count() ) + ")" ) @@ -740,7 +744,8 @@ class ToolOutput( object ): """ Represents an output datasets produced by a tool. For backward - compatibility this behaves as if it were the tuple: + compatibility this behaves as if it were the tuple:: + (format, metadata_source, parent) """ @@ -1079,7 +1084,7 @@ else: self.trackster_conf = None def parse_inputs( self, root ): - """ + r""" Parse the "<inputs>" element and create appropriate `ToolParameter`s. This implementation supports multiple pages and grouping constructs. """ diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/tools/data/__init__.py --- a/lib/galaxy/tools/data/__init__.py +++ b/lib/galaxy/tools/data/__init__.py @@ -31,10 +31,11 @@ def load_from_config_file( self, config_filename, tool_data_path, from_shed_config=False ): """ This method is called under 3 conditions: - 1) When the ToolDataTableManager is initialized (see __init__ above). - 2) Just after the ToolDataTableManager is initialized and the additional entries defined by shed_tool_data_table_conf.xml + + 1. When the ToolDataTableManager is initialized (see __init__ above). + 2. Just after the ToolDataTableManager is initialized and the additional entries defined by shed_tool_data_table_conf.xml are being loaded into the ToolDataTableManager.data_tables. - 3) When a tool shed repository that includes a tool_data_table_conf.xml.sample file is being installed into a local + 3. When a tool shed repository that includes a tool_data_table_conf.xml.sample file is being installed into a local Galaxy instance. In this case, we have 2 entry types to handle, files whose root tag is <tables>, for example: """ tree = util.parse_xml( config_filename ) @@ -57,20 +58,24 @@ def add_new_entries_from_config_file( self, config_filename, tool_data_path, shed_tool_data_table_config, persist=False ): """ This method is called when a tool shed repository that includes a tool_data_table_conf.xml.sample file is being - installed into a local galaxy instance. We have 2 cases to handle, files whose root tag is <tables>, for example: - <tables> + installed into a local galaxy instance. We have 2 cases to handle, files whose root tag is <tables>, for example:: + + <tables> + <!-- Location of Tmap files --> + <table name="tmap_indexes" comment_char="#"> + <columns>value, dbkey, name, path</columns> + <file path="tool-data/tmap_index.loc" /> + </table> + </tables> + + and files whose root tag is <table>, for example:: + <!-- Location of Tmap files --><table name="tmap_indexes" comment_char="#"><columns>value, dbkey, name, path</columns><file path="tool-data/tmap_index.loc" /></table> - </tables> - and files whose root tag is <table>, for example: - <!-- Location of Tmap files --> - <table name="tmap_indexes" comment_char="#"> - <columns>value, dbkey, name, path</columns> - <file path="tool-data/tmap_index.loc" /> - </table> + """ tree = util.parse_xml( config_filename ) root = tree.getroot() @@ -119,13 +124,14 @@ class TabularToolDataTable( ToolDataTable ): """ Data stored in a tabular / separated value format on disk, allows multiple - files to be merged but all must have the same column definitions. + files to be merged but all must have the same column definitions:: - <table type="tabular" name="test"> - <column name='...' index = '...' /> - <file path="..." /> - <file path="..." /> - </table> + <table type="tabular" name="test"> + <column name='...' index = '...' /> + <file path="..." /> + <file path="..." /> + </table> + """ type_key = 'tabular' diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/tools/parameters/dynamic_options.py --- a/lib/galaxy/tools/parameters/dynamic_options.py +++ b/lib/galaxy/tools/parameters/dynamic_options.py @@ -64,16 +64,20 @@ Type: data_meta - When no 'from_' source has been specified in the <options> tag, this will populate the options list with (meta_value, meta_value, False). + When no 'from' source has been specified in the <options> tag, this will populate the options list with (meta_value, meta_value, False). Otherwise, options which do not match the metadata value in the column are discarded. Required Attributes: - ref: Name of input dataset - key: Metadata key to use for comparison - column: column in options to compare with (not required when not associated with input options) + + - ref: Name of input dataset + - key: Metadata key to use for comparison + - column: column in options to compare with (not required when not associated with input options) + Optional Attributes: - multiple: Option values are multiple, split column by separator (True) - separator: When multiple split by this (,) + + - multiple: Option values are multiple, split column by separator (True) + - separator: When multiple split by this (,) + """ def __init__( self, d_option, elem ): Filter.__init__( self, d_option, elem ) @@ -132,12 +136,16 @@ Type: param_value Required Attributes: - ref: Name of input value - column: column in options to compare with + + - ref: Name of input value + - column: column in options to compare with + Optional Attributes: - keep: Keep columns matching value (True) - Discard columns matching value (False) - ref_attribute: Period (.) separated attribute chain of input (ref) to use as value for filter + + - keep: Keep columns matching value (True) + Discard columns matching value (False) + - ref_attribute: Period (.) separated attribute chain of input (ref) to use as value for filter + """ def __init__( self, d_option, elem ): Filter.__init__( self, d_option, elem ) @@ -294,13 +302,15 @@ Type: remove_value - Required Attributes: + Required Attributes:: + value: value to remove from select list or ref: param to refer to or meta_ref: dataset to refer to key: metadata key to compare to + """ def __init__( self, d_option, elem ): Filter.__init__( self, d_option, elem ) diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/util/heartbeat.py --- a/lib/galaxy/util/heartbeat.py +++ b/lib/galaxy/util/heartbeat.py @@ -134,7 +134,9 @@ Scans a given backtrace stack frames, returns a single quadraple of [filename, line, function-name, text] of the single, deepest, most interesting frame. - Interesting being: + + Interesting being:: + inside the galaxy source code ("/lib/galaxy"), prefreably not an egg. """ diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/util/shed_util.py --- a/lib/galaxy/util/shed_util.py +++ b/lib/galaxy/util/shed_util.py @@ -2397,11 +2397,13 @@ def update_existing_tool_dependency( app, repository, original_dependency_dict, new_dependencies_dict ): """ Update an exsiting tool dependency whose definition was updated in a change set pulled by a Galaxy administrator when getting updates - to an installed tool shed repository. The original_dependency_dict is a single tool dependency definition, an example of which is: - {"name": "bwa", - "readme": "\\nCompiling BWA requires zlib and libpthread to be present on your system.\\n ", - "type": "package", - "version": "0.6.2"} + to an installed tool shed repository. The original_dependency_dict is a single tool dependency definition, an example of which is:: + + {"name": "bwa", + "readme": "\\nCompiling BWA requires zlib and libpthread to be present on your system.\\n ", + "type": "package", + "version": "0.6.2"} + The new_dependencies_dict is the dictionary generated by the generate_tool_dependency_metadata method. """ new_tool_dependency = None diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/util/topsort.py --- a/lib/galaxy/util/topsort.py +++ b/lib/galaxy/util/topsort.py @@ -9,18 +9,24 @@ value is a list, representing a total ordering that respects all the input constraints. E.g., + topsort( [(1,2), (3,3)] ) + may return any of (but nothing other than) + [3, 1, 2] [1, 3, 2] [1, 2, 3] + because those are the permutations of the input elements that respect the "1 precedes 2" and "3 precedes 3" input constraints. Note that a constraint of the form (x, x) is really just a trick to make sure x appears *somewhere* in the output list. If there's a cycle in the constraints, say + topsort( [(1,2), (2,1)] ) + then CycleError is raised, and the exception object supports many methods to help analyze and break the cycles. This requires a good deal more code than topsort itself! diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/visualization/data_providers/genome.py --- a/lib/galaxy/visualization/data_providers/genome.py +++ b/lib/galaxy/visualization/data_providers/genome.py @@ -610,11 +610,16 @@ def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ): """ - Returns a dict with the following attributes: + Returns a dict with the following attributes:: + data - a list of variants with the format + + .. raw:: text + [<guid>, <start>, <end>, <name>, cigar, seq] message - error/informative message + """ rval = [] message = None @@ -893,13 +898,17 @@ def process_data( self, iterator, start_val=0, max_vals=None, **kwargs ): """ - Returns a dict with the following attributes: + Returns a dict with the following attributes:: + data - a list of reads with the format - [<guid>, <start>, <end>, <name>, <read_1>, <read_2>, [empty], <mapq_scores>] + [<guid>, <start>, <end>, <name>, <read_1>, <read_2>, [empty], <mapq_scores>] + where <read_1> has the format [<start>, <end>, <cigar>, <strand>, <read_seq>] + and <read_2> has the format [<start>, <end>, <cigar>, <strand>, <read_seq>] + Field 7 is empty so that mapq scores' location matches that in single-end reads. For single-end reads, read has format: [<guid>, <start>, <end>, <name>, <cigar>, <strand>, <seq>, <mapq_score>] @@ -910,6 +919,7 @@ max_low - lowest coordinate for the returned reads max_high - highest coordinate for the returned reads message - error/informative message + """ # No iterator indicates no reads. if iterator is None: diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/visualization/data_providers/phyloviz/newickparser.py --- a/lib/galaxy/visualization/data_providers/phyloviz/newickparser.py +++ b/lib/galaxy/visualization/data_providers/phyloviz/newickparser.py @@ -112,9 +112,12 @@ def parseNode(self, string, depth): """ Recursive method for parsing newick string, works by stripping down the string into substring of newick contained with brackers, which is used to call itself. - Eg ... ( A, B, (D, E)C, F, G ) ... + + Eg ... ( A, B, (D, E)C, F, G ) ... + We will make the preceeding nodes first A, B, then the internal node C, its children D, E, - and finally the succeeding nodes F, G""" + and finally the succeeding nodes F, G + """ # Base case where there is only an empty string if string == "": diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/web/form_builder.py --- a/lib/galaxy/web/form_builder.py +++ b/lib/galaxy/web/form_builder.py @@ -708,9 +708,12 @@ selected_value='none', refresh_on_change=False, multiple=False, display=None, size=None ): """ Build a SelectField given a set of objects. The received params are: + - objs: the set of objects used to populate the option list - label_attr: the attribute of each obj (e.g., name, email, etc ) whose value is used to populate each option label. + - If the string 'self' is passed as label_attr, each obj in objs is assumed to be a string, so the obj itself is used + - select_field_name: the name of the SelectField - initial_value: the value of the first option in the SelectField - allows for an option telling the user to select something - selected_value: the value of the currently selected option diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/web/framework/__init__.py --- a/lib/galaxy/web/framework/__init__.py +++ b/lib/galaxy/web/framework/__init__.py @@ -624,6 +624,7 @@ def handle_user_login( self, user ): """ Login a new user (possibly newly created) + - create a new session - associate new session with user - if old session had a history and it was not associated with a user, associate it with the new session, diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/webapps/community/config.py --- a/lib/galaxy/webapps/community/config.py +++ b/lib/galaxy/webapps/community/config.py @@ -140,7 +140,7 @@ def get_database_engine_options( kwargs ): """ Allow options for the SQLAlchemy database engine to be passed by using - the prefix "database_engine_option_". + the prefix "database_engine_option". """ conversions = { 'convert_unicode': string_as_bool, diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/webapps/galaxy/api/genomes.py --- a/lib/galaxy/webapps/galaxy/api/genomes.py +++ b/lib/galaxy/webapps/galaxy/api/genomes.py @@ -48,25 +48,25 @@ POST /api/genomes Download and/or index a genome. - Parameters: + Parameters:: - dbkey DB key of the build to download, ignored unless 'UCSC' is specified as the source - ncbi_name NCBI's genome identifier, ignored unless NCBI is specified as the source - ensembl_dbkey Ensembl's genome identifier, ignored unless Ensembl is specified as the source - url_dbkey DB key to use for this build, ignored unless URL is specified as the source - source Data source for this build. Can be: UCSC, Ensembl, NCBI, URL - indexers POST array of indexers to run after downloading (indexers[] = first, indexers[] = second, ...) - func Allowed values: - 'download' Download and index - 'index' Index only - - Returns: + dbkey DB key of the build to download, ignored unless 'UCSC' is specified as the source + ncbi_name NCBI's genome identifier, ignored unless NCBI is specified as the source + ensembl_dbkey Ensembl's genome identifier, ignored unless Ensembl is specified as the source + url_dbkey DB key to use for this build, ignored unless URL is specified as the source + source Data source for this build. Can be: UCSC, Ensembl, NCBI, URL + indexers POST array of indexers to run after downloading (indexers[] = first, indexers[] = second, ...) + func Allowed values: + 'download' Download and index + 'index' Index only + + Returns:: - If no error: - dict( status: 'ok', job: <job ID> ) + If no error: + dict( status: 'ok', job: <job ID> ) - If error: - dict( status: 'error', error: <error message> ) + If error: + dict( status: 'error', error: <error message> ) """ params = util.Params( payload ) diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/webapps/galaxy/api/tools.py --- a/lib/galaxy/webapps/galaxy/api/tools.py +++ b/lib/galaxy/webapps/galaxy/api/tools.py @@ -13,12 +13,15 @@ @web.expose_api def index( self, trans, **kwds ): """ - GET /api/tools: returns a list of tools defined by parameters + GET /api/tools: returns a list of tools defined by parameters:: + parameters: + in_panel - if true, tools are returned in panel structure, including sections and labels trackster - if true, only tools that are compatible with Trackster are returned + """ # Read params. diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/webapps/galaxy/controllers/workflow.py --- a/lib/galaxy/webapps/galaxy/controllers/workflow.py +++ b/lib/galaxy/webapps/galaxy/controllers/workflow.py @@ -1980,8 +1980,8 @@ def edgelist_for_workflow_steps( steps ): """ - Create a list of tuples representing edges between `WorkflowSteps` based - on associated `WorkflowStepConnection`s + Create a list of tuples representing edges between ``WorkflowSteps`` based + on associated ``WorkflowStepConnection``s """ edges = [] steps_to_index = dict( ( step, i ) for i, step in enumerate( steps ) ) diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/galaxy/webapps/reports/config.py --- a/lib/galaxy/webapps/reports/config.py +++ b/lib/galaxy/webapps/reports/config.py @@ -56,7 +56,7 @@ def get_database_engine_options( kwargs ): """ Allow options for the SQLAlchemy database engine to be passed by using - the prefix "database_engine_option_". + the prefix "database_engine_option". """ conversions = { 'convert_unicode': string_as_bool, diff -r bc5fa254bafc981c70bdffd394e9c44c8dda4ab8 -r ec51a727a497d5912fdd4269571d7c26d7523436 lib/mimeparse.py --- a/lib/mimeparse.py +++ b/lib/mimeparse.py @@ -39,18 +39,21 @@ return (type.strip(), subtype.strip(), params) def parse_media_range(range): - """Carves up a media range and returns a tuple of the - (type, subtype, params) where 'params' is a dictionary - of all the parameters for the media range. - For example, the media range 'application/*;q=0.5' would - get parsed into: + r""" + Carves up a media range and returns a tuple of the + (type, subtype, params) where 'params' is a dictionary + of all the parameters for the media range. + For example, the media range 'application/*;q=0.5' would + get parsed into: - ('application', '*', {'q', '0.5'}) + .. raw:: text - In addition this function also guarantees that there - is a value for 'q' in the params dictionary, filling it - in with a proper default if necessary. - """ + ('application', '*', {'q', '0.5'}) + + In addition this function also guarantees that there + is a value for 'q' in the params dictionary, filling it + in with a proper default if necessary. + """ (type, subtype, params) = parse_mime_type(range) if not params.has_key('q') or not params['q'] or \ not float(params['q']) or float(params['q']) > 1\ Repository URL: https://bitbucket.org/galaxy/galaxy-central/ -- This is a commit notification from bitbucket.org. You are receiving this because you have the service enabled, addressing the recipient of this email.
participants (1)
-
Bitbucket