Big i18n tools update, I/II.
Notes: * Everything is still a bit raw and sometimes hackish. * Not every feature implemented yet. * A bunch of cleanup is still needed. * Doc needs to be updated too!
This commit is contained in:
		
							
								
								
									
										891
									
								
								release/scripts/modules/bl_i18n_utils/bl_extract_messages.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										891
									
								
								release/scripts/modules/bl_i18n_utils/bl_extract_messages.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,891 @@
 | 
			
		||||
# ***** BEGIN GPL LICENSE BLOCK *****
 | 
			
		||||
#
 | 
			
		||||
# This program is free software; you can redistribute it and/or
 | 
			
		||||
# modify it under the terms of the GNU General Public License
 | 
			
		||||
# as published by the Free Software Foundation; either version 2
 | 
			
		||||
# of the License, or (at your option) any later version.
 | 
			
		||||
#
 | 
			
		||||
# This program is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 | 
			
		||||
#
 | 
			
		||||
# ***** END GPL LICENSE BLOCK *****
 | 
			
		||||
 | 
			
		||||
# <pep8 compliant>
 | 
			
		||||
 | 
			
		||||
# Populate a template file (POT format currently) from Blender RNA/py/C data.
 | 
			
		||||
# XXX: This script is meant to be used from inside Blender!
 | 
			
		||||
#      You should not directly use this script, rather use update_msg.py!
 | 
			
		||||
 | 
			
		||||
import collections
 | 
			
		||||
import copy
 | 
			
		||||
import datetime
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
# XXX Relative import does not work here when used from Blender...
 | 
			
		||||
from bl_i18n_utils import settings as i18n_settings, utils
 | 
			
		||||
 | 
			
		||||
import bpy
 | 
			
		||||
 | 
			
		||||
##### Utils #####
 | 
			
		||||
 | 
			
		||||
# check for strings like "+%f°"
 | 
			
		||||
ignore_reg = re.compile(r"^(?:[-*.()/\\+%°0-9]|%d|%f|%s|%r|\s)*$")
 | 
			
		||||
filter_message = ignore_reg.match
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def init_spell_check(settings, lang="en_US"):
 | 
			
		||||
    try:
 | 
			
		||||
        from bl_i18n_utils import spell_check_utils
 | 
			
		||||
        return spell_check_utils.SpellChecker(settings, lang)
 | 
			
		||||
    except Exception as e:
 | 
			
		||||
        print("Failed to import spell_check_utils ({})".format(str(e)))
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _gen_check_ctxt(settings):
 | 
			
		||||
    return {
 | 
			
		||||
        "multi_rnatip": set(),
 | 
			
		||||
        "multi_lines": set(),
 | 
			
		||||
        "py_in_rna": set(),
 | 
			
		||||
        "not_capitalized": set(),
 | 
			
		||||
        "end_point": set(),
 | 
			
		||||
        "undoc_ops": set(),
 | 
			
		||||
        "spell_checker": init_spell_check(settings),
 | 
			
		||||
        "spell_errors": {},
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _gen_reports(check_ctxt):
 | 
			
		||||
    return {
 | 
			
		||||
        "check_ctxt": check_ctxt,
 | 
			
		||||
        "rna_structs": [],
 | 
			
		||||
        "rna_structs_skipped": [],
 | 
			
		||||
        "rna_props": [],
 | 
			
		||||
        "rna_props_skipped": [],
 | 
			
		||||
        "py_messages": [],
 | 
			
		||||
        "py_messages_skipped": [],
 | 
			
		||||
        "src_messages": [],
 | 
			
		||||
        "src_messages_skipped": [],
 | 
			
		||||
        "messages_skipped": set(),
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def check(check_ctxt, msgs, key, msgsrc, settings):
 | 
			
		||||
    """
 | 
			
		||||
    Performs a set of checks over the given key (context, message)...
 | 
			
		||||
    """
 | 
			
		||||
    if check_ctxt is None:
 | 
			
		||||
        return
 | 
			
		||||
    multi_rnatip = check_ctxt.get("multi_rnatip")
 | 
			
		||||
    multi_lines = check_ctxt.get("multi_lines")
 | 
			
		||||
    py_in_rna = check_ctxt.get("py_in_rna")
 | 
			
		||||
    not_capitalized = check_ctxt.get("not_capitalized")
 | 
			
		||||
    end_point = check_ctxt.get("end_point")
 | 
			
		||||
    undoc_ops = check_ctxt.get("undoc_ops")
 | 
			
		||||
    spell_checker = check_ctxt.get("spell_checker")
 | 
			
		||||
    spell_errors = check_ctxt.get("spell_errors")
 | 
			
		||||
 | 
			
		||||
    if multi_rnatip is not None:
 | 
			
		||||
        if key in msgs and key not in multi_rnatip:
 | 
			
		||||
            multi_rnatip.add(key)
 | 
			
		||||
    if multi_lines is not None:
 | 
			
		||||
        if '\n' in key[1]:
 | 
			
		||||
            multi_lines.add(key)
 | 
			
		||||
    if py_in_rna is not None:
 | 
			
		||||
        if key in py_in_rna[1]:
 | 
			
		||||
            py_in_rna[0].add(key)
 | 
			
		||||
    if not_capitalized is not None:
 | 
			
		||||
        if(key[1] not in settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED and
 | 
			
		||||
           key[1][0].isalpha() and not key[1][0].isupper()):
 | 
			
		||||
            not_capitalized.add(key)
 | 
			
		||||
    if end_point is not None:
 | 
			
		||||
        if (key[1].strip().endswith('.') and not key[1].strip().endswith('...') and
 | 
			
		||||
            key[1] not in settings.WARN_MSGID_END_POINT_ALLOWED):
 | 
			
		||||
            end_point.add(key)
 | 
			
		||||
    if undoc_ops is not None:
 | 
			
		||||
        if key[1] == settings.UNDOC_OPS_STR:
 | 
			
		||||
            undoc_ops.add(key)
 | 
			
		||||
    if spell_checker is not None and spell_errors is not None:
 | 
			
		||||
        err = spell_checker.check(key[1])
 | 
			
		||||
        if err:
 | 
			
		||||
            spell_errors[key] = err
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def print_info(reports, pot):
 | 
			
		||||
    def _print(*args, **kwargs):
 | 
			
		||||
        kwargs["file"] = sys.stderr
 | 
			
		||||
        print(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
    pot.update_info()
 | 
			
		||||
 | 
			
		||||
    _print("{} RNA structs were processed (among which {} were skipped), containing {} RNA properties "
 | 
			
		||||
           "(among which {} were skipped).".format(len(reports["rna_structs"]), len(reports["rna_structs_skipped"]),
 | 
			
		||||
                                                   len(reports["rna_props"]), len(reports["rna_props_skipped"])))
 | 
			
		||||
    _print("{} messages were extracted from Python UI code (among which {} were skipped), and {} from C source code "
 | 
			
		||||
           "(among which {} were skipped).".format(len(reports["py_messages"]), len(reports["py_messages_skipped"]),
 | 
			
		||||
                                                   len(reports["src_messages"]), len(reports["src_messages_skipped"])))
 | 
			
		||||
    _print("{} messages were rejected.".format(len(reports["messages_skipped"])))
 | 
			
		||||
    _print("\n")
 | 
			
		||||
    _print("Current POT stats:")
 | 
			
		||||
    pot.print_stats(prefix="\t", output=_print)
 | 
			
		||||
    _print("\n")
 | 
			
		||||
 | 
			
		||||
    check_ctxt = reports["check_ctxt"]
 | 
			
		||||
    if check_ctxt is None:
 | 
			
		||||
        return
 | 
			
		||||
    multi_rnatip = check_ctxt.get("multi_rnatip")
 | 
			
		||||
    multi_lines = check_ctxt.get("multi_lines")
 | 
			
		||||
    py_in_rna = check_ctxt.get("py_in_rna")
 | 
			
		||||
    not_capitalized = check_ctxt.get("not_capitalized")
 | 
			
		||||
    end_point = check_ctxt.get("end_point")
 | 
			
		||||
    undoc_ops = check_ctxt.get("undoc_ops")
 | 
			
		||||
    spell_errors = check_ctxt.get("spell_errors")
 | 
			
		||||
 | 
			
		||||
    # XXX Temp, no multi_rnatip nor py_in_rna, see below.
 | 
			
		||||
    keys = multi_lines | not_capitalized | end_point | undoc_ops | spell_errors.keys()
 | 
			
		||||
    if keys:
 | 
			
		||||
        _print("WARNINGS:")
 | 
			
		||||
        for key in keys:
 | 
			
		||||
            if undoc_ops and key in undoc_ops:
 | 
			
		||||
                _print("\tThe following operators are undocumented!")
 | 
			
		||||
            else:
 | 
			
		||||
                _print("\t“{}”|“{}”:".format(*key))
 | 
			
		||||
                if multi_lines and key in multi_lines:
 | 
			
		||||
                    _print("\t\t-> newline in this message!")
 | 
			
		||||
                if not_capitalized and key in not_capitalized:
 | 
			
		||||
                    _print("\t\t-> message not capitalized!")
 | 
			
		||||
                if end_point and key in end_point:
 | 
			
		||||
                    _print("\t\t-> message with endpoint!")
 | 
			
		||||
                # XXX Hide this one for now, too much false positives.
 | 
			
		||||
#                if multi_rnatip and key in multi_rnatip:
 | 
			
		||||
#                    _print("\t\t-> tip used in several RNA items")
 | 
			
		||||
#                if py_in_rna and key in py_in_rna:
 | 
			
		||||
#                    _print("\t\t-> RNA message also used in py UI code!")
 | 
			
		||||
                if spell_errors and spell_errors.get(key):
 | 
			
		||||
                    lines = ["\t\t-> {}: misspelled, suggestions are ({})".format(w, "'" + "', '".join(errs) + "'")
 | 
			
		||||
                             for w, errs in  spell_errors[key]]
 | 
			
		||||
                    _print("\n".join(lines))
 | 
			
		||||
            _print("\t\t{}".format("\n\t\t".join(pot.msgs[key].sources)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def enable_addons(addons={}, support={}, disable=False):
 | 
			
		||||
    """
 | 
			
		||||
    Enable (or disable) addons based either on a set of names, or a set of 'support' types.
 | 
			
		||||
    Returns the list of all affected addons (as fake modules)!
 | 
			
		||||
    """
 | 
			
		||||
    import addon_utils
 | 
			
		||||
 | 
			
		||||
    userpref = bpy.context.user_preferences
 | 
			
		||||
    used_ext = {ext.module for ext in userpref.addons}
 | 
			
		||||
 | 
			
		||||
    ret = [mod for mod in addon_utils.modules(addon_utils.addons_fake_modules)
 | 
			
		||||
               if ((addons and mod.__name__ in addons) or
 | 
			
		||||
                   (not addons and addon_utils.module_bl_info(mod)["support"] in support))]
 | 
			
		||||
 | 
			
		||||
    for mod in ret:
 | 
			
		||||
        module_name = mod.__name__
 | 
			
		||||
        if disable:
 | 
			
		||||
            if module_name not in used_ext:
 | 
			
		||||
                continue
 | 
			
		||||
            print("    Disabling module ", module_name)
 | 
			
		||||
            bpy.ops.wm.addon_disable(module=module_name)
 | 
			
		||||
        else:
 | 
			
		||||
            if module_name in used_ext:
 | 
			
		||||
                continue
 | 
			
		||||
            print("    Enabling module ", module_name)
 | 
			
		||||
            bpy.ops.wm.addon_enable(module=module_name)
 | 
			
		||||
 | 
			
		||||
    # XXX There are currently some problems with bpy/rna...
 | 
			
		||||
    #     *Very* tricky to solve!
 | 
			
		||||
    #     So this is a hack to make all newly added operator visible by
 | 
			
		||||
    #     bpy.types.OperatorProperties.__subclasses__()
 | 
			
		||||
    for cat in dir(bpy.ops):
 | 
			
		||||
        cat = getattr(bpy.ops, cat)
 | 
			
		||||
        for op in dir(cat):
 | 
			
		||||
            getattr(cat, op).get_rna()
 | 
			
		||||
 | 
			
		||||
    return ret
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt, settings):
 | 
			
		||||
    if filter_message(msgid):
 | 
			
		||||
        reports["messages_skipped"].add((msgid, msgsrc))
 | 
			
		||||
        return
 | 
			
		||||
    if not msgctxt:
 | 
			
		||||
        # We do *not* want any "" context!
 | 
			
		||||
        msgctxt = settings.DEFAULT_CONTEXT
 | 
			
		||||
    # Always unescape keys!
 | 
			
		||||
    msgctxt = utils.I18nMessage.do_unescape(msgctxt)
 | 
			
		||||
    msgid = utils.I18nMessage.do_unescape(msgid)
 | 
			
		||||
    key = (msgctxt, msgid)
 | 
			
		||||
    check(check_ctxt, msgs, key, msgsrc, settings)
 | 
			
		||||
    msgsrc = settings.PO_COMMENT_PREFIX_SOURCE_CUSTOM + msgsrc
 | 
			
		||||
    if key not in msgs:
 | 
			
		||||
        msgs[key] = utils.I18nMessage([msgctxt], [msgid], [], [msgsrc], settings=settings)
 | 
			
		||||
    else:
 | 
			
		||||
        msgs[key].comment_lines.append(msgsrc)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##### RNA #####
 | 
			
		||||
def dump_messages_rna(msgs, reports, settings):
 | 
			
		||||
    """
 | 
			
		||||
    Dump into messages dict all RNA-defined UI messages (labels en tooltips).
 | 
			
		||||
    """
 | 
			
		||||
    def class_blacklist():
 | 
			
		||||
        blacklist_rna_class = [
 | 
			
		||||
            # core classes
 | 
			
		||||
            "Context", "Event", "Function", "UILayout", "BlendData", "UnknownType",
 | 
			
		||||
            # registerable classes
 | 
			
		||||
            "Panel", "Menu", "Header", "RenderEngine", "Operator", "OperatorMacro", "Macro", "KeyingSetInfo",
 | 
			
		||||
            # window classes
 | 
			
		||||
            "Window",
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
        # Collect internal operators
 | 
			
		||||
        # extend with all internal operators
 | 
			
		||||
        # note that this uses internal api introspection functions
 | 
			
		||||
        # all possible operator names
 | 
			
		||||
        op_ids = set(cls.bl_rna.identifier for cls in bpy.types.OperatorProperties.__subclasses__()) | \
 | 
			
		||||
                 set(cls.bl_rna.identifier for cls in bpy.types.Operator.__subclasses__()) | \
 | 
			
		||||
                 set(cls.bl_rna.identifier for cls in bpy.types.OperatorMacro.__subclasses__())
 | 
			
		||||
 | 
			
		||||
        get_instance = __import__("_bpy").ops.get_instance
 | 
			
		||||
#        path_resolve = type(bpy.context).__base__.path_resolve
 | 
			
		||||
        for idname in op_ids:
 | 
			
		||||
            op = get_instance(idname)
 | 
			
		||||
            # XXX Do not skip INTERNAL's anymore, some of those ops show up in UI now!
 | 
			
		||||
#            if 'INTERNAL' in path_resolve(op, "bl_options"):
 | 
			
		||||
#                blacklist_rna_class.append(idname)
 | 
			
		||||
 | 
			
		||||
        # Collect builtin classes we don't need to doc
 | 
			
		||||
        blacklist_rna_class.append("Property")
 | 
			
		||||
        blacklist_rna_class.extend([cls.__name__ for cls in bpy.types.Property.__subclasses__()])
 | 
			
		||||
 | 
			
		||||
        # Collect classes which are attached to collections, these are api access only.
 | 
			
		||||
        collection_props = set()
 | 
			
		||||
        for cls_id in dir(bpy.types):
 | 
			
		||||
            cls = getattr(bpy.types, cls_id)
 | 
			
		||||
            for prop in cls.bl_rna.properties:
 | 
			
		||||
                if prop.type == 'COLLECTION':
 | 
			
		||||
                    prop_cls = prop.srna
 | 
			
		||||
                    if prop_cls is not None:
 | 
			
		||||
                        collection_props.add(prop_cls.identifier)
 | 
			
		||||
        blacklist_rna_class.extend(sorted(collection_props))
 | 
			
		||||
 | 
			
		||||
        return blacklist_rna_class
 | 
			
		||||
 | 
			
		||||
    check_ctxt_rna = check_ctxt_rna_tip = None
 | 
			
		||||
    check_ctxt = reports["check_ctxt"]
 | 
			
		||||
    if check_ctxt:
 | 
			
		||||
        check_ctxt_rna = {
 | 
			
		||||
            "multi_lines": check_ctxt.get("multi_lines"),
 | 
			
		||||
            "not_capitalized": check_ctxt.get("not_capitalized"),
 | 
			
		||||
            "end_point": check_ctxt.get("end_point"),
 | 
			
		||||
            "undoc_ops": check_ctxt.get("undoc_ops"),
 | 
			
		||||
            "spell_checker": check_ctxt.get("spell_checker"),
 | 
			
		||||
            "spell_errors": check_ctxt.get("spell_errors"),
 | 
			
		||||
        }
 | 
			
		||||
        check_ctxt_rna_tip = check_ctxt_rna
 | 
			
		||||
        check_ctxt_rna_tip["multi_rnatip"] = check_ctxt.get("multi_rnatip")
 | 
			
		||||
 | 
			
		||||
    default_context = settings.DEFAULT_CONTEXT
 | 
			
		||||
 | 
			
		||||
    # Function definitions
 | 
			
		||||
    def walk_properties(cls):
 | 
			
		||||
        bl_rna = cls.bl_rna
 | 
			
		||||
        # Get our parents' properties, to not export them multiple times.
 | 
			
		||||
        bl_rna_base = bl_rna.base
 | 
			
		||||
        if bl_rna_base:
 | 
			
		||||
            bl_rna_base_props = set(bl_rna_base.properties.values())
 | 
			
		||||
        else:
 | 
			
		||||
            bl_rna_base_props = set()
 | 
			
		||||
 | 
			
		||||
        for prop in bl_rna.properties:
 | 
			
		||||
            # Only write this property if our parent hasn't got it.
 | 
			
		||||
            if prop in bl_rna_base_props:
 | 
			
		||||
                continue
 | 
			
		||||
            if prop.identifier == "rna_type":
 | 
			
		||||
                continue
 | 
			
		||||
            reports["rna_props"].append((cls, prop))
 | 
			
		||||
 | 
			
		||||
            msgsrc = "bpy.types.{}.{}".format(bl_rna.identifier, prop.identifier)
 | 
			
		||||
            msgctxt = prop.translation_context or default_context
 | 
			
		||||
 | 
			
		||||
            if prop.name and (prop.name != prop.identifier or msgctxt != default_context):
 | 
			
		||||
                process_msg(msgs, msgctxt, prop.name, msgsrc, reports, check_ctxt_rna, settings)
 | 
			
		||||
            if prop.description:
 | 
			
		||||
                process_msg(msgs, default_context, prop.description, msgsrc, reports, check_ctxt_rna_tip, settings)
 | 
			
		||||
 | 
			
		||||
            if isinstance(prop, bpy.types.EnumProperty):
 | 
			
		||||
                for item in prop.enum_items:
 | 
			
		||||
                    msgsrc = "bpy.types.{}.{}:'{}'".format(bl_rna.identifier, prop.identifier, item.identifier)
 | 
			
		||||
                    if item.name and item.name != item.identifier:
 | 
			
		||||
                        process_msg(msgs, msgctxt, item.name, msgsrc, reports, check_ctxt_rna, settings)
 | 
			
		||||
                    if item.description:
 | 
			
		||||
                        process_msg(msgs, default_context, item.description, msgsrc, reports, check_ctxt_rna_tip,
 | 
			
		||||
                                    settings)
 | 
			
		||||
 | 
			
		||||
    blacklist_rna_class = class_blacklist()
 | 
			
		||||
 | 
			
		||||
    def walk_class(cls):
 | 
			
		||||
        bl_rna = cls.bl_rna
 | 
			
		||||
        reports["rna_structs"].append(cls)
 | 
			
		||||
        if bl_rna.identifier in blacklist_rna_class:
 | 
			
		||||
            reports["rna_structs_skipped"].append(cls)
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        # XXX translation_context of Operator sub-classes are not "good"!
 | 
			
		||||
        #     So ignore those Operator sub-classes (anyway, will get the same from OperatorProperties sub-classes!)...
 | 
			
		||||
        if issubclass(cls, bpy.types.Operator):
 | 
			
		||||
            reports["rna_structs_skipped"].append(cls)
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        msgsrc = "bpy.types." + bl_rna.identifier
 | 
			
		||||
        msgctxt = bl_rna.translation_context or default_context
 | 
			
		||||
 | 
			
		||||
        if bl_rna.name and (bl_rna.name != bl_rna.identifier or msgctxt != default_context):
 | 
			
		||||
            process_msg(msgs, msgctxt, bl_rna.name, msgsrc, reports, check_ctxt_rna, settings)
 | 
			
		||||
 | 
			
		||||
        if bl_rna.description:
 | 
			
		||||
            process_msg(msgs, default_context, bl_rna.description, msgsrc, reports, check_ctxt_rna_tip, settings)
 | 
			
		||||
 | 
			
		||||
        if hasattr(bl_rna, 'bl_label') and  bl_rna.bl_label:
 | 
			
		||||
            process_msg(msgs, msgctxt, bl_rna.bl_label, msgsrc, reports, check_ctxt_rna, settings)
 | 
			
		||||
 | 
			
		||||
        walk_properties(cls)
 | 
			
		||||
 | 
			
		||||
    def walk_keymap_hierarchy(hier, msgsrc_prev):
 | 
			
		||||
        for lvl in hier:
 | 
			
		||||
            msgsrc = msgsrc_prev + "." + lvl[1]
 | 
			
		||||
            process_msg(msgs, default_context, lvl[0], msgsrc, reports, None, settings)
 | 
			
		||||
            if lvl[3]:
 | 
			
		||||
                walk_keymap_hierarchy(lvl[3], msgsrc)
 | 
			
		||||
 | 
			
		||||
    # Dump Messages
 | 
			
		||||
    def process_cls_list(cls_list):
 | 
			
		||||
        if not cls_list:
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        def full_class_id(cls):
 | 
			
		||||
            """ gives us 'ID.Lamp.AreaLamp' which is best for sorting."""
 | 
			
		||||
            cls_id = ""
 | 
			
		||||
            bl_rna = cls.bl_rna
 | 
			
		||||
            while bl_rna:
 | 
			
		||||
                cls_id = bl_rna.identifier + "." + cls_id
 | 
			
		||||
                bl_rna = bl_rna.base
 | 
			
		||||
            return cls_id
 | 
			
		||||
 | 
			
		||||
        cls_list.sort(key=full_class_id)
 | 
			
		||||
        for cls in cls_list:
 | 
			
		||||
            walk_class(cls)
 | 
			
		||||
            # Recursively process subclasses.
 | 
			
		||||
            process_cls_list(cls.__subclasses__())
 | 
			
		||||
 | 
			
		||||
    # Parse everything (recursively parsing from bpy_struct "class"...).
 | 
			
		||||
    process_cls_list(bpy.types.ID.__base__.__subclasses__())
 | 
			
		||||
 | 
			
		||||
    # And parse keymaps!
 | 
			
		||||
    from bpy_extras.keyconfig_utils import KM_HIERARCHY
 | 
			
		||||
 | 
			
		||||
    walk_keymap_hierarchy(KM_HIERARCHY, "KM_HIERARCHY")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##### Python source code #####
 | 
			
		||||
def dump_py_messages_from_files(msgs, reports, files, settings):
 | 
			
		||||
    """
 | 
			
		||||
    Dump text inlined in the python files given, e.g. 'My Name' in:
 | 
			
		||||
        layout.prop("someprop", text="My Name")
 | 
			
		||||
    """
 | 
			
		||||
    import ast
 | 
			
		||||
 | 
			
		||||
    bpy_struct = bpy.types.ID.__base__
 | 
			
		||||
 | 
			
		||||
    # Helper function
 | 
			
		||||
    def extract_strings_ex(node, is_split=False):
 | 
			
		||||
        """
 | 
			
		||||
        Recursively get strings, needed in case we have "Blah" + "Blah", passed as an argument in that case it won't
 | 
			
		||||
        evaluate to a string. However, break on some kind of stopper nodes, like e.g. Subscript.
 | 
			
		||||
        """
 | 
			
		||||
        if type(node) == ast.Str:
 | 
			
		||||
            eval_str = ast.literal_eval(node)
 | 
			
		||||
            if eval_str:
 | 
			
		||||
                yield (is_split, eval_str, (node,))
 | 
			
		||||
        else:
 | 
			
		||||
            is_split = (type(node) in separate_nodes)
 | 
			
		||||
            for nd in ast.iter_child_nodes(node):
 | 
			
		||||
                if type(nd) not in stopper_nodes:
 | 
			
		||||
                    yield from extract_strings_ex(nd, is_split=is_split)
 | 
			
		||||
 | 
			
		||||
    def _extract_string_merge(estr_ls, nds_ls):
 | 
			
		||||
        return "".join(s for s in estr_ls if s is not None), tuple(n for n in nds_ls if n is not None)
 | 
			
		||||
 | 
			
		||||
    def extract_strings(node):
 | 
			
		||||
        estr_ls = []
 | 
			
		||||
        nds_ls = []
 | 
			
		||||
        for is_split, estr, nds in extract_strings_ex(node):
 | 
			
		||||
            estr_ls.append(estr)
 | 
			
		||||
            nds_ls.extend(nds)
 | 
			
		||||
        ret = _extract_string_merge(estr_ls, nds_ls)
 | 
			
		||||
        return ret
 | 
			
		||||
    
 | 
			
		||||
    def extract_strings_split(node):
 | 
			
		||||
        """
 | 
			
		||||
        Returns a list args as returned by 'extract_strings()', But split into groups based on separate_nodes, this way
 | 
			
		||||
        expressions like ("A" if test else "B") wont be merged but "A" + "B" will.
 | 
			
		||||
        """
 | 
			
		||||
        estr_ls = []
 | 
			
		||||
        nds_ls = []
 | 
			
		||||
        bag = []
 | 
			
		||||
        for is_split, estr, nds in extract_strings_ex(node):
 | 
			
		||||
            if is_split:
 | 
			
		||||
                bag.append((estr_ls, nds_ls))
 | 
			
		||||
                estr_ls = []
 | 
			
		||||
                nds_ls = []
 | 
			
		||||
 | 
			
		||||
            estr_ls.append(estr)
 | 
			
		||||
            nds_ls.extend(nds)
 | 
			
		||||
 | 
			
		||||
        bag.append((estr_ls, nds_ls))
 | 
			
		||||
 | 
			
		||||
        return [_extract_string_merge(estr_ls, nds_ls) for estr_ls, nds_ls in bag]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def _ctxt_to_ctxt(node):
 | 
			
		||||
        return extract_strings(node)[0]
 | 
			
		||||
 | 
			
		||||
    def _op_to_ctxt(node):
 | 
			
		||||
        opname, _ = extract_strings(node)
 | 
			
		||||
        if not opname:
 | 
			
		||||
            return settings.DEFAULT_CONTEXT
 | 
			
		||||
        op = bpy.ops
 | 
			
		||||
        for n in opname.split('.'):
 | 
			
		||||
            op = getattr(op, n)
 | 
			
		||||
        try:
 | 
			
		||||
            return op.get_rna().bl_rna.translation_context
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            default_op_context = bpy.app.translations.contexts.operator_default
 | 
			
		||||
            print("ERROR: ", str(e))
 | 
			
		||||
            print("       Assuming default operator context '{}'".format(default_op_context))
 | 
			
		||||
            return default_op_context
 | 
			
		||||
 | 
			
		||||
    # Gather function names.
 | 
			
		||||
    # In addition of UI func, also parse pgettext ones...
 | 
			
		||||
    # Tuples of (module name, (short names, ...)).
 | 
			
		||||
    pgettext_variants = (
 | 
			
		||||
        ("pgettext", ("_",)),
 | 
			
		||||
        ("pgettext_iface", ("iface_",)),
 | 
			
		||||
        ("pgettext_tip", ("tip_",))
 | 
			
		||||
    )
 | 
			
		||||
    pgettext_variants_args = {"msgid": (0, {"msgctxt": 1})}
 | 
			
		||||
 | 
			
		||||
    # key: msgid keywords.
 | 
			
		||||
    # val: tuples of ((keywords,), context_getter_func) to get a context for that msgid.
 | 
			
		||||
    #      Note: order is important, first one wins!
 | 
			
		||||
    translate_kw = {
 | 
			
		||||
        "text": ((("text_ctxt",), _ctxt_to_ctxt),
 | 
			
		||||
                 (("operator",), _op_to_ctxt),
 | 
			
		||||
                ),
 | 
			
		||||
        "msgid": ((("msgctxt",), _ctxt_to_ctxt),
 | 
			
		||||
                 ),
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    context_kw_set = {}
 | 
			
		||||
    for k, ctxts in translate_kw.items():
 | 
			
		||||
        s = set()
 | 
			
		||||
        for c, _ in ctxts:
 | 
			
		||||
            s |= set(c)
 | 
			
		||||
        context_kw_set[k] = s
 | 
			
		||||
 | 
			
		||||
    # {func_id: {msgid: (arg_pos,
 | 
			
		||||
    #                    {msgctxt: arg_pos,
 | 
			
		||||
    #                     ...
 | 
			
		||||
    #                    }
 | 
			
		||||
    #                   ),
 | 
			
		||||
    #            ...
 | 
			
		||||
    #           },
 | 
			
		||||
    #  ...
 | 
			
		||||
    # }
 | 
			
		||||
    func_translate_args = {}
 | 
			
		||||
 | 
			
		||||
    # First, functions from UILayout
 | 
			
		||||
    # First loop is for msgid args, second one is for msgctxt args.
 | 
			
		||||
    for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
 | 
			
		||||
        # check it has one or more arguments as defined in translate_kw
 | 
			
		||||
        for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
 | 
			
		||||
            if ((arg_kw in translate_kw) and (not arg.is_output) and (arg.type == 'STRING')):
 | 
			
		||||
                func_translate_args.setdefault(func_id, {})[arg_kw] = (arg_pos, {})
 | 
			
		||||
    for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
 | 
			
		||||
        if func_id not in func_translate_args:
 | 
			
		||||
            continue
 | 
			
		||||
        for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
 | 
			
		||||
            if (not arg.is_output) and (arg.type == 'STRING'):
 | 
			
		||||
                for msgid, msgctxts in context_kw_set.items():
 | 
			
		||||
                    if arg_kw in msgctxts:
 | 
			
		||||
                        func_translate_args[func_id][msgid][1][arg_kw] = arg_pos
 | 
			
		||||
    # We manually add funcs from bpy.app.translations
 | 
			
		||||
    for func_id, func_ids in pgettext_variants:
 | 
			
		||||
        func_translate_args[func_id] = pgettext_variants_args
 | 
			
		||||
        for func_id in func_ids:
 | 
			
		||||
            func_translate_args[func_id] = pgettext_variants_args
 | 
			
		||||
    #print(func_translate_args)
 | 
			
		||||
 | 
			
		||||
    # Break recursive nodes look up on some kind of nodes.
 | 
			
		||||
    # E.g. we don’t want to get strings inside subscripts (blah["foo"])!
 | 
			
		||||
    stopper_nodes = {ast.Subscript}
 | 
			
		||||
    # Consider strings separate: ("a" if test else "b")
 | 
			
		||||
    separate_nodes = {ast.IfExp}
 | 
			
		||||
 | 
			
		||||
    check_ctxt_py = None
 | 
			
		||||
    if reports["check_ctxt"]:
 | 
			
		||||
        check_ctxt = reports["check_ctxt"]
 | 
			
		||||
        check_ctxt_py = {
 | 
			
		||||
            "py_in_rna": (check_ctxt.get("py_in_rna"), set(msgs.keys())),
 | 
			
		||||
            "multi_lines": check_ctxt.get("multi_lines"),
 | 
			
		||||
            "not_capitalized": check_ctxt.get("not_capitalized"),
 | 
			
		||||
            "end_point": check_ctxt.get("end_point"),
 | 
			
		||||
            "spell_checker": check_ctxt.get("spell_checker"),
 | 
			
		||||
            "spell_errors": check_ctxt.get("spell_errors"),
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
    for fp in files:
 | 
			
		||||
        with open(fp, 'r', encoding="utf8") as filedata:
 | 
			
		||||
            root_node = ast.parse(filedata.read(), fp, 'exec')
 | 
			
		||||
 | 
			
		||||
        fp_rel = os.path.relpath(fp, settings.SOURCE_DIR)
 | 
			
		||||
 | 
			
		||||
        for node in ast.walk(root_node):
 | 
			
		||||
            if type(node) == ast.Call:
 | 
			
		||||
                # print("found function at")
 | 
			
		||||
                # print("%s:%d" % (fp, node.lineno))
 | 
			
		||||
 | 
			
		||||
                # We can't skip such situations! from blah import foo\nfoo("bar") would also be an ast.Name func!
 | 
			
		||||
                if type(node.func) == ast.Name:
 | 
			
		||||
                    func_id = node.func.id
 | 
			
		||||
                elif hasattr(node.func, "attr"):
 | 
			
		||||
                    func_id = node.func.attr
 | 
			
		||||
                # Ugly things like getattr(self, con.type)(context, box, con)
 | 
			
		||||
                else:
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
                func_args = func_translate_args.get(func_id, {})
 | 
			
		||||
 | 
			
		||||
                # First try to get i18n contexts, for every possible msgid id.
 | 
			
		||||
                msgctxts = dict.fromkeys(func_args.keys(), "")
 | 
			
		||||
                for msgid, (_, context_args) in func_args.items():
 | 
			
		||||
                    context_elements = {}
 | 
			
		||||
                    for arg_kw, arg_pos in context_args.items():
 | 
			
		||||
                        if arg_pos < len(node.args):
 | 
			
		||||
                            context_elements[arg_kw] = node.args[arg_pos]
 | 
			
		||||
                        else:
 | 
			
		||||
                            for kw in node.keywords:
 | 
			
		||||
                                if kw.arg == arg_kw:
 | 
			
		||||
                                    context_elements[arg_kw] = kw.value
 | 
			
		||||
                                    break
 | 
			
		||||
                    #print(context_elements)
 | 
			
		||||
                    for kws, proc in translate_kw[msgid]:
 | 
			
		||||
                        if set(kws) <= context_elements.keys():
 | 
			
		||||
                            args = tuple(context_elements[k] for k in kws)
 | 
			
		||||
                            #print("running ", proc, " with ", args)
 | 
			
		||||
                            ctxt = proc(*args)
 | 
			
		||||
                            if ctxt:
 | 
			
		||||
                                msgctxts[msgid] = ctxt
 | 
			
		||||
                                break
 | 
			
		||||
 | 
			
		||||
                #print(translate_args)
 | 
			
		||||
                # do nothing if not found
 | 
			
		||||
                for arg_kw, (arg_pos, _) in func_args.items():
 | 
			
		||||
                    msgctxt = msgctxts[arg_kw]
 | 
			
		||||
                    estr_lst = [(None, ())]
 | 
			
		||||
                    if arg_pos < len(node.args):
 | 
			
		||||
                        estr_lst = extract_strings_split(node.args[arg_pos])
 | 
			
		||||
                        #print(estr, nds)
 | 
			
		||||
                    else:
 | 
			
		||||
                        for kw in node.keywords:
 | 
			
		||||
                            if kw.arg == arg_kw:
 | 
			
		||||
                                estr_lst = extract_strings_split(kw.value)
 | 
			
		||||
                                break
 | 
			
		||||
                        #print(estr, nds)
 | 
			
		||||
                    for estr, nds in estr_lst:
 | 
			
		||||
                        if estr:
 | 
			
		||||
                            if nds:
 | 
			
		||||
                                msgsrc = "{}:{}".format(fp_rel, sorted({nd.lineno for nd in nds})[0])
 | 
			
		||||
                            else:
 | 
			
		||||
                                msgsrc = "{}:???".format(fp_rel)
 | 
			
		||||
                            process_msg(msgs, msgctxt, estr, msgsrc, reports, check_ctxt_py, settings)
 | 
			
		||||
                            reports["py_messages"].append((msgctxt, estr, msgsrc))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def dump_py_messages(msgs, reports, addons, settings):
 | 
			
		||||
    def _get_files(path):
 | 
			
		||||
        if os.path.isdir(path):
 | 
			
		||||
            # XXX use walk instead of listdir?
 | 
			
		||||
            return [os.path.join(path, fn) for fn in sorted(os.listdir(path))
 | 
			
		||||
                                              if not fn.startswith("_") and fn.endswith(".py")]
 | 
			
		||||
        return [path]
 | 
			
		||||
 | 
			
		||||
    files = []
 | 
			
		||||
    for path in settings.CUSTOM_PY_UI_FILES:
 | 
			
		||||
        files += _get_files(path)
 | 
			
		||||
 | 
			
		||||
    # Add all addons we support in main translation file!
 | 
			
		||||
    for mod in addons:
 | 
			
		||||
        fn = mod.__file__
 | 
			
		||||
        if os.path.basename(fn) == "__init__.py":
 | 
			
		||||
            files += _get_files(os.path.dirname(fn))
 | 
			
		||||
        else:
 | 
			
		||||
            files.append(fn)
 | 
			
		||||
 | 
			
		||||
    dump_py_messages_from_files(msgs, reports, files, settings)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##### C source code #####
 | 
			
		||||
def dump_src_messages(msgs, reports, settings):
 | 
			
		||||
    def get_contexts():
 | 
			
		||||
        """Return a mapping {C_CTXT_NAME: ctxt_value}."""
 | 
			
		||||
        return {k: getattr(bpy.app.translations.contexts, n) for k, n in bpy.app.translations.contexts_C_to_py.items()}
 | 
			
		||||
 | 
			
		||||
    contexts = get_contexts()
 | 
			
		||||
 | 
			
		||||
    # Build regexes to extract messages (with optional contexts) from C source.
 | 
			
		||||
    pygettexts = tuple(re.compile(r).search for r in settings.PYGETTEXT_KEYWORDS)
 | 
			
		||||
 | 
			
		||||
    _clean_str = re.compile(settings.str_clean_re).finditer
 | 
			
		||||
    clean_str = lambda s: "".join(m.group("clean") for m in _clean_str(s))
 | 
			
		||||
 | 
			
		||||
    def dump_src_file(path, rel_path, msgs, reports, settings):
 | 
			
		||||
        def process_entry(_msgctxt, _msgid):
 | 
			
		||||
            # Context.
 | 
			
		||||
            msgctxt = settings.DEFAULT_CONTEXT
 | 
			
		||||
            if _msgctxt:
 | 
			
		||||
                if _msgctxt in contexts:
 | 
			
		||||
                    msgctxt = contexts[_msgctxt]
 | 
			
		||||
                elif '"' in _msgctxt or "'" in _msgctxt:
 | 
			
		||||
                    msgctxt = clean_str(_msgctxt)
 | 
			
		||||
                else:
 | 
			
		||||
                    print("WARNING: raw context “{}” couldn’t be resolved!".format(_msgctxt))
 | 
			
		||||
            # Message.
 | 
			
		||||
            msgid = ""
 | 
			
		||||
            if _msgid:
 | 
			
		||||
                if '"' in _msgid or "'" in _msgid:
 | 
			
		||||
                    msgid = clean_str(_msgid)
 | 
			
		||||
                else:
 | 
			
		||||
                    print("WARNING: raw message “{}” couldn’t be resolved!".format(_msgid))
 | 
			
		||||
            return msgctxt, msgid
 | 
			
		||||
 | 
			
		||||
        check_ctxt_src = None
 | 
			
		||||
        if reports["check_ctxt"]:
 | 
			
		||||
            check_ctxt = reports["check_ctxt"]
 | 
			
		||||
            check_ctxt_src = {
 | 
			
		||||
                "multi_lines": check_ctxt.get("multi_lines"),
 | 
			
		||||
                "not_capitalized": check_ctxt.get("not_capitalized"),
 | 
			
		||||
                "end_point": check_ctxt.get("end_point"),
 | 
			
		||||
                "spell_checker": check_ctxt.get("spell_checker"),
 | 
			
		||||
                "spell_errors": check_ctxt.get("spell_errors"),
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
        data = ""
 | 
			
		||||
        with open(path) as f:
 | 
			
		||||
            data = f.read()
 | 
			
		||||
        for srch in pygettexts:
 | 
			
		||||
            m = srch(data)
 | 
			
		||||
            line = pos = 0
 | 
			
		||||
            while m:
 | 
			
		||||
                d = m.groupdict()
 | 
			
		||||
                # Line.
 | 
			
		||||
                line += data[pos:m.start()].count('\n')
 | 
			
		||||
                msgsrc = rel_path + ":" + str(line)
 | 
			
		||||
                _msgid = d.get("msg_raw")
 | 
			
		||||
                # First, try the "multi-contexts" stuff!
 | 
			
		||||
                _msgctxts = tuple(d.get("ctxt_raw{}".format(i)) for i in range(settings.PYGETTEXT_MAX_MULTI_CTXT))
 | 
			
		||||
                if _msgctxts[0]:
 | 
			
		||||
                    for _msgctxt in _msgctxts:
 | 
			
		||||
                        if not _msgctxt:
 | 
			
		||||
                            break
 | 
			
		||||
                        msgctxt, msgid = process_entry(_msgctxt, _msgid)
 | 
			
		||||
                        process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt_src, settings)
 | 
			
		||||
                        reports["src_messages"].append((msgctxt, msgid, msgsrc))
 | 
			
		||||
                else:
 | 
			
		||||
                    _msgctxt = d.get("ctxt_raw")
 | 
			
		||||
                    msgctxt, msgid = process_entry(_msgctxt, _msgid)
 | 
			
		||||
                    process_msg(msgs, msgctxt, msgid, msgsrc, reports, check_ctxt_src, settings)
 | 
			
		||||
                    reports["src_messages"].append((msgctxt, msgid, msgsrc))
 | 
			
		||||
 | 
			
		||||
                pos = m.end()
 | 
			
		||||
                line += data[m.start():pos].count('\n')
 | 
			
		||||
                m = srch(data, pos)
 | 
			
		||||
 | 
			
		||||
    forbidden = set()
 | 
			
		||||
    forced = set()
 | 
			
		||||
    if os.path.isfile(settings.SRC_POTFILES):
 | 
			
		||||
        with open(settings.SRC_POTFILES) as src:
 | 
			
		||||
            for l in src:
 | 
			
		||||
                if l[0] == '-':
 | 
			
		||||
                    forbidden.add(l[1:].rstrip('\n'))
 | 
			
		||||
                elif l[0] != '#':
 | 
			
		||||
                    forced.add(l.rstrip('\n'))
 | 
			
		||||
    for root, dirs, files in os.walk(settings.POTFILES_SOURCE_DIR):
 | 
			
		||||
        if "/.svn" in root:
 | 
			
		||||
            continue
 | 
			
		||||
        for fname in files:
 | 
			
		||||
            if os.path.splitext(fname)[1] not in settings.PYGETTEXT_ALLOWED_EXTS:
 | 
			
		||||
                continue
 | 
			
		||||
            path = os.path.join(root, fname)
 | 
			
		||||
            rel_path = os.path.relpath(path, settings.SOURCE_DIR)
 | 
			
		||||
            if rel_path in forbidden:
 | 
			
		||||
                continue
 | 
			
		||||
            elif rel_path not in forced:
 | 
			
		||||
                forced.add(rel_path)
 | 
			
		||||
    for rel_path in sorted(forced):
 | 
			
		||||
        path = os.path.join(settings.SOURCE_DIR, rel_path)
 | 
			
		||||
        if os.path.exists(path):
 | 
			
		||||
            dump_src_file(path, rel_path, msgs, reports, settings)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##### Main functions! #####
 | 
			
		||||
def dump_messages(do_messages, do_checks, settings):
 | 
			
		||||
    bl_ver = "Blender " + bpy.app.version_string
 | 
			
		||||
    bl_rev = bpy.app.build_revision
 | 
			
		||||
    bl_date = datetime.datetime.strptime(bpy.app.build_date.decode() + "T" + bpy.app.build_time.decode(),
 | 
			
		||||
                                         "%Y-%m-%dT%H:%M:%S")
 | 
			
		||||
    pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, bl_ver, bl_rev, bl_date, bl_date.year,
 | 
			
		||||
                                                settings=settings)
 | 
			
		||||
    msgs = pot.msgs
 | 
			
		||||
 | 
			
		||||
    # Enable all wanted addons.
 | 
			
		||||
    # For now, enable all official addons, before extracting msgids.
 | 
			
		||||
    addons = enable_addons(support={"OFFICIAL"})
 | 
			
		||||
    # Note this is not needed if we have been started with factory settings, but just in case...
 | 
			
		||||
    enable_addons(support={"COMMUNITY", "TESTING"}, disable=True)
 | 
			
		||||
 | 
			
		||||
    reports = _gen_reports(_gen_check_ctxt(settings) if do_checks else None)
 | 
			
		||||
 | 
			
		||||
    # Get strings from RNA.
 | 
			
		||||
    dump_messages_rna(msgs, reports, settings)
 | 
			
		||||
 | 
			
		||||
    # Get strings from UI layout definitions text="..." args.
 | 
			
		||||
    dump_py_messages(msgs, reports, addons, settings)
 | 
			
		||||
 | 
			
		||||
    # Get strings from C source code.
 | 
			
		||||
    dump_src_messages(msgs, reports, settings)
 | 
			
		||||
 | 
			
		||||
    # Get strings specific to translations' menu.
 | 
			
		||||
    for lng in settings.LANGUAGES:
 | 
			
		||||
        process_msg(msgs, settings.DEFAULT_CONTEXT, lng[1], "Languages’ labels from bl_i18n_utils/settings.py",
 | 
			
		||||
                    reports, None, settings)
 | 
			
		||||
    for cat in settings.LANGUAGES_CATEGORIES:
 | 
			
		||||
        process_msg(msgs, settings.DEFAULT_CONTEXT, cat[1],
 | 
			
		||||
                    "Language categories’ labels from bl_i18n_utils/settings.py", reports, None, settings)
 | 
			
		||||
 | 
			
		||||
    #pot.check()
 | 
			
		||||
    pot.unescape()  # Strings gathered in py/C source code may contain escaped chars...
 | 
			
		||||
    print_info(reports, pot)
 | 
			
		||||
    #pot.check()
 | 
			
		||||
 | 
			
		||||
    if do_messages:
 | 
			
		||||
        print("Writing messages…")
 | 
			
		||||
        pot.write('PO', settings.FILE_NAME_POT)
 | 
			
		||||
 | 
			
		||||
    print("Finished extracting UI messages!")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def dump_addon_messages(module_name, messages_formats, do_checks, settings):
 | 
			
		||||
    # Enable our addon and get strings from RNA.
 | 
			
		||||
    addon = enable_addons(addons={module_name})[0]
 | 
			
		||||
 | 
			
		||||
    addon_info = addon_utils.module_bl_info(addon)
 | 
			
		||||
    ver = addon_info.name + " " + ".".join(addon_info.version)
 | 
			
		||||
    rev = "???"
 | 
			
		||||
    date = datetime.datetime()
 | 
			
		||||
    pot = utils.I18nMessages.gen_empty_messages(settings.PARSER_TEMPLATE_ID, ver, rev, date, date.year,
 | 
			
		||||
                                                settings=settings)
 | 
			
		||||
    msgs = pot.msgs
 | 
			
		||||
 | 
			
		||||
    minus_msgs = copy.deepcopy(msgs)
 | 
			
		||||
 | 
			
		||||
    check_ctxt = _gen_check_ctxt(settings) if do_checks else None
 | 
			
		||||
    minus_check_ctxt = _gen_check_ctxt(settings) if do_checks else None
 | 
			
		||||
 | 
			
		||||
    # Get current addon state (loaded or not):
 | 
			
		||||
    was_loaded = addon_utils.check(module_name)[1]
 | 
			
		||||
 | 
			
		||||
    # Enable our addon and get strings from RNA.
 | 
			
		||||
    addons = enable_addons(addons={module_name})
 | 
			
		||||
    reports = _gen_reports(check_ctxt)
 | 
			
		||||
    dump_messages_rna(msgs, reports, settings)
 | 
			
		||||
 | 
			
		||||
    # Now disable our addon, and rescan RNA.
 | 
			
		||||
    enable_addons(addons={module_name}, disable=True)
 | 
			
		||||
    reports["check_ctxt"] = minus_check_ctxt
 | 
			
		||||
    dump_messages_rna(minus_msgs, reports, settings)
 | 
			
		||||
 | 
			
		||||
    # Restore previous state if needed!
 | 
			
		||||
    if was_loaded:
 | 
			
		||||
        enable_addons(addons={module_name})
 | 
			
		||||
 | 
			
		||||
    # and make the diff!
 | 
			
		||||
    for key in minus_msgs:
 | 
			
		||||
        if key == settings.PO_HEADER_KEY:
 | 
			
		||||
            continue
 | 
			
		||||
        del msgs[key]
 | 
			
		||||
 | 
			
		||||
    if check_ctxt:
 | 
			
		||||
        for key in check_ctxt:
 | 
			
		||||
            for warning in minus_check_ctxt[key]:
 | 
			
		||||
                check_ctxt[key].remove(warning)
 | 
			
		||||
 | 
			
		||||
    # and we are done with those!
 | 
			
		||||
    del minus_msgs
 | 
			
		||||
    del minus_check_ctxt
 | 
			
		||||
 | 
			
		||||
    # get strings from UI layout definitions text="..." args
 | 
			
		||||
    reports["check_ctxt"] = check_ctxt
 | 
			
		||||
    dump_messages_pytext(msgs, reports, addons, settings)
 | 
			
		||||
 | 
			
		||||
    print_info(reports, pot)
 | 
			
		||||
 | 
			
		||||
    return pot
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    try:
 | 
			
		||||
        import bpy
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        print("This script must run from inside blender")
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    import sys
 | 
			
		||||
    back_argv = sys.argv
 | 
			
		||||
    # Get rid of Blender args!
 | 
			
		||||
    sys.argv = sys.argv[sys.argv.index("--") + 1:]
 | 
			
		||||
 | 
			
		||||
    import argparse
 | 
			
		||||
    parser = argparse.ArgumentParser(description="Process UI messages from inside Blender.")
 | 
			
		||||
    parser.add_argument('-c', '--no_checks', default=True, action="store_false", help="No checks over UI messages.")
 | 
			
		||||
    parser.add_argument('-m', '--no_messages', default=True, action="store_false", help="No export of UI messages.")
 | 
			
		||||
    parser.add_argument('-o', '--output', default=None, help="Output POT file path.")
 | 
			
		||||
    parser.add_argument('-s', '--settings', default=None,
 | 
			
		||||
                        help="Override (some) default settings. Either a JSon file name, or a JSon string.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    settings = i18n_settings.I18nSettings()
 | 
			
		||||
    settings.from_json(args.settings)
 | 
			
		||||
 | 
			
		||||
    if args.output:
 | 
			
		||||
        settings.FILE_NAME_POT = args.output
 | 
			
		||||
 | 
			
		||||
    dump_messages(do_messages=args.no_messages, do_checks=args.no_checks, settings=settings)
 | 
			
		||||
 | 
			
		||||
    sys.argv = back_argv
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    print("\n\n *** Running {} *** \n".format(__file__))
 | 
			
		||||
    main()
 | 
			
		||||
@@ -1,762 +0,0 @@
 | 
			
		||||
# ***** BEGIN GPL LICENSE BLOCK *****
 | 
			
		||||
#
 | 
			
		||||
# This program is free software; you can redistribute it and/or
 | 
			
		||||
# modify it under the terms of the GNU General Public License
 | 
			
		||||
# as published by the Free Software Foundation; either version 2
 | 
			
		||||
# of the License, or (at your option) any later version.
 | 
			
		||||
#
 | 
			
		||||
# This program is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 | 
			
		||||
#
 | 
			
		||||
# ***** END GPL LICENSE BLOCK *****
 | 
			
		||||
 | 
			
		||||
# <pep8 compliant>
 | 
			
		||||
 | 
			
		||||
# Write out messages.txt from Blender.
 | 
			
		||||
# XXX: This script is meant to be used from inside Blender!
 | 
			
		||||
#      You should not directly use this script, rather use update_msg.py!
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import collections
 | 
			
		||||
import copy
 | 
			
		||||
 | 
			
		||||
# XXX Relative import does not work here when used from Blender...
 | 
			
		||||
from bl_i18n_utils import settings
 | 
			
		||||
 | 
			
		||||
import bpy
 | 
			
		||||
 | 
			
		||||
print(dir(settings))
 | 
			
		||||
 | 
			
		||||
SOURCE_DIR = settings.SOURCE_DIR
 | 
			
		||||
 | 
			
		||||
CUSTOM_PY_UI_FILES = [os.path.abspath(os.path.join(SOURCE_DIR, p)) for p in settings.CUSTOM_PY_UI_FILES]
 | 
			
		||||
FILE_NAME_MESSAGES = settings.FILE_NAME_MESSAGES
 | 
			
		||||
MSG_COMMENT_PREFIX = settings.MSG_COMMENT_PREFIX
 | 
			
		||||
MSG_CONTEXT_PREFIX = settings.MSG_CONTEXT_PREFIX
 | 
			
		||||
CONTEXT_DEFAULT = settings.CONTEXT_DEFAULT
 | 
			
		||||
#CONTEXT_DEFAULT = bpy.app.i18n.contexts.default # XXX Not yet! :)
 | 
			
		||||
UNDOC_OPS_STR = settings.UNDOC_OPS_STR
 | 
			
		||||
 | 
			
		||||
NC_ALLOWED = settings.WARN_MSGID_NOT_CAPITALIZED_ALLOWED
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##### Utils #####
 | 
			
		||||
 | 
			
		||||
# check for strings like ": %d"
 | 
			
		||||
ignore_reg = re.compile(r"^(?:[-*.()/\\+:%xWXYZ0-9]|%d|%f|%s|%r|\s)*$")
 | 
			
		||||
filter_message = ignore_reg.match
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def check(check_ctxt, messages, key, msgsrc):
 | 
			
		||||
    """
 | 
			
		||||
    Performs a set of checks over the given key (context, message)...
 | 
			
		||||
    """
 | 
			
		||||
    if check_ctxt is None:
 | 
			
		||||
        return
 | 
			
		||||
    multi_rnatip = check_ctxt.get("multi_rnatip")
 | 
			
		||||
    multi_lines = check_ctxt.get("multi_lines")
 | 
			
		||||
    py_in_rna = check_ctxt.get("py_in_rna")
 | 
			
		||||
    not_capitalized = check_ctxt.get("not_capitalized")
 | 
			
		||||
    end_point = check_ctxt.get("end_point")
 | 
			
		||||
    undoc_ops = check_ctxt.get("undoc_ops")
 | 
			
		||||
 | 
			
		||||
    if multi_rnatip is not None:
 | 
			
		||||
        if key in messages and key not in multi_rnatip:
 | 
			
		||||
            multi_rnatip.add(key)
 | 
			
		||||
    if multi_lines is not None:
 | 
			
		||||
        if '\n' in key[1]:
 | 
			
		||||
            multi_lines.add(key)
 | 
			
		||||
    if py_in_rna is not None:
 | 
			
		||||
        if key in py_in_rna[1]:
 | 
			
		||||
            py_in_rna[0].add(key)
 | 
			
		||||
    if not_capitalized is not None:
 | 
			
		||||
        if(key[1] not in NC_ALLOWED and key[1][0].isalpha() and not key[1][0].isupper()):
 | 
			
		||||
            not_capitalized.add(key)
 | 
			
		||||
    if end_point is not None:
 | 
			
		||||
        if key[1].strip().endswith('.'):
 | 
			
		||||
            end_point.add(key)
 | 
			
		||||
    if undoc_ops is not None:
 | 
			
		||||
        if key[1] == UNDOC_OPS_STR:
 | 
			
		||||
            undoc_ops.add(key)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def print_warnings(check_ctxt, messages):
 | 
			
		||||
    if check_ctxt is not None:
 | 
			
		||||
        print("WARNINGS:")
 | 
			
		||||
        keys = set()
 | 
			
		||||
        for c in check_ctxt.values():
 | 
			
		||||
            keys |= c
 | 
			
		||||
        # XXX Temp, see below
 | 
			
		||||
        keys -= check_ctxt["multi_rnatip"]
 | 
			
		||||
        for key in keys:
 | 
			
		||||
            if key in check_ctxt["undoc_ops"]:
 | 
			
		||||
                print("\tThe following operators are undocumented:")
 | 
			
		||||
            else:
 | 
			
		||||
                print("\t“{}”|“{}”:".format(*key))
 | 
			
		||||
                if key in check_ctxt["multi_lines"]:
 | 
			
		||||
                    print("\t\t-> newline in this message!")
 | 
			
		||||
                if key in check_ctxt["not_capitalized"]:
 | 
			
		||||
                    print("\t\t-> message not capitalized!")
 | 
			
		||||
                if key in check_ctxt["end_point"]:
 | 
			
		||||
                    print("\t\t-> message with endpoint!")
 | 
			
		||||
                # XXX Hide this one for now, too much false positives.
 | 
			
		||||
#                if key in check_ctxt["multi_rnatip"]:
 | 
			
		||||
#                    print("\t\t-> tip used in several RNA items")
 | 
			
		||||
                if key in check_ctxt["py_in_rna"]:
 | 
			
		||||
                    print("\t\t-> RNA message also used in py UI code:")
 | 
			
		||||
            print("\t\t{}".format("\n\t\t".join(messages[key])))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def enable_addons(addons={}, support={}, disable=False):
 | 
			
		||||
    """
 | 
			
		||||
    Enable (or disable) addons based either on a set of names, or a set of 'support' types.
 | 
			
		||||
    Returns the list of all affected addons (as fake modules)!
 | 
			
		||||
    """
 | 
			
		||||
    import addon_utils
 | 
			
		||||
    import bpy
 | 
			
		||||
 | 
			
		||||
    userpref = bpy.context.user_preferences
 | 
			
		||||
    used_ext = {ext.module for ext in userpref.addons}
 | 
			
		||||
 | 
			
		||||
    ret = [mod for mod in addon_utils.modules(addon_utils.addons_fake_modules)
 | 
			
		||||
           if ((addons and mod.__name__ in addons) or
 | 
			
		||||
               (not addons and addon_utils.module_bl_info(mod)["support"] in support))]
 | 
			
		||||
 | 
			
		||||
    for mod in ret:
 | 
			
		||||
        module_name = mod.__name__
 | 
			
		||||
        if disable:
 | 
			
		||||
            if module_name not in used_ext:
 | 
			
		||||
                continue
 | 
			
		||||
            print("    Disabling module ", module_name)
 | 
			
		||||
            bpy.ops.wm.addon_disable(module=module_name)
 | 
			
		||||
        else:
 | 
			
		||||
            if module_name in used_ext:
 | 
			
		||||
                continue
 | 
			
		||||
            print("    Enabling module ", module_name)
 | 
			
		||||
            bpy.ops.wm.addon_enable(module=module_name)
 | 
			
		||||
 | 
			
		||||
    # XXX There are currently some problems with bpy/rna...
 | 
			
		||||
    #     *Very* tricky to solve!
 | 
			
		||||
    #     So this is a hack to make all newly added operator visible by
 | 
			
		||||
    #     bpy.types.OperatorProperties.__subclasses__()
 | 
			
		||||
    for cat in dir(bpy.ops):
 | 
			
		||||
        cat = getattr(bpy.ops, cat)
 | 
			
		||||
        for op in dir(cat):
 | 
			
		||||
            getattr(cat, op).get_rna()
 | 
			
		||||
 | 
			
		||||
    return ret
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##### RNA #####
 | 
			
		||||
 | 
			
		||||
def dump_messages_rna(messages, check_ctxt):
 | 
			
		||||
    """
 | 
			
		||||
    Dump into messages dict all RNA-defined UI messages (labels en tooltips).
 | 
			
		||||
    """
 | 
			
		||||
    import bpy
 | 
			
		||||
 | 
			
		||||
    def classBlackList():
 | 
			
		||||
        blacklist_rna_class = [
 | 
			
		||||
                               # core classes
 | 
			
		||||
                               "Context", "Event", "Function", "UILayout", "BlendData",
 | 
			
		||||
                               # registerable classes
 | 
			
		||||
                               "Panel", "Menu", "Header", "RenderEngine", "Operator", "OperatorMacro", "Macro",
 | 
			
		||||
                               "KeyingSetInfo", "UnknownType",
 | 
			
		||||
                               # window classes
 | 
			
		||||
                               "Window",
 | 
			
		||||
                               ]
 | 
			
		||||
 | 
			
		||||
        # ---------------------------------------------------------------------
 | 
			
		||||
        # Collect internal operators
 | 
			
		||||
 | 
			
		||||
        # extend with all internal operators
 | 
			
		||||
        # note that this uses internal api introspection functions
 | 
			
		||||
        # all possible operator names
 | 
			
		||||
        op_ids = set(cls.bl_rna.identifier for cls in bpy.types.OperatorProperties.__subclasses__()) | \
 | 
			
		||||
                 set(cls.bl_rna.identifier for cls in bpy.types.Operator.__subclasses__()) | \
 | 
			
		||||
                 set(cls.bl_rna.identifier for cls in bpy.types.OperatorMacro.__subclasses__())
 | 
			
		||||
 | 
			
		||||
        get_instance = __import__("_bpy").ops.get_instance
 | 
			
		||||
        path_resolve = type(bpy.context).__base__.path_resolve
 | 
			
		||||
        for idname in op_ids:
 | 
			
		||||
            op = get_instance(idname)
 | 
			
		||||
            # XXX Do not skip INTERNAL's anymore, some of those ops show up in UI now!
 | 
			
		||||
#            if 'INTERNAL' in path_resolve(op, "bl_options"):
 | 
			
		||||
#                blacklist_rna_class.append(idname)
 | 
			
		||||
 | 
			
		||||
        # ---------------------------------------------------------------------
 | 
			
		||||
        # Collect builtin classes we don't need to doc
 | 
			
		||||
        blacklist_rna_class.append("Property")
 | 
			
		||||
        blacklist_rna_class.extend([cls.__name__ for cls in bpy.types.Property.__subclasses__()])
 | 
			
		||||
 | 
			
		||||
        # ---------------------------------------------------------------------
 | 
			
		||||
        # Collect classes which are attached to collections, these are api
 | 
			
		||||
        # access only.
 | 
			
		||||
        collection_props = set()
 | 
			
		||||
        for cls_id in dir(bpy.types):
 | 
			
		||||
            cls = getattr(bpy.types, cls_id)
 | 
			
		||||
            for prop in cls.bl_rna.properties:
 | 
			
		||||
                if prop.type == 'COLLECTION':
 | 
			
		||||
                    prop_cls = prop.srna
 | 
			
		||||
                    if prop_cls is not None:
 | 
			
		||||
                        collection_props.add(prop_cls.identifier)
 | 
			
		||||
        blacklist_rna_class.extend(sorted(collection_props))
 | 
			
		||||
 | 
			
		||||
        return blacklist_rna_class
 | 
			
		||||
 | 
			
		||||
    blacklist_rna_class = classBlackList()
 | 
			
		||||
 | 
			
		||||
    def filterRNA(bl_rna):
 | 
			
		||||
        rid = bl_rna.identifier
 | 
			
		||||
        if rid in blacklist_rna_class:
 | 
			
		||||
            print("  skipping", rid)
 | 
			
		||||
            return True
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    check_ctxt_rna = check_ctxt_rna_tip = None
 | 
			
		||||
    if check_ctxt:
 | 
			
		||||
        check_ctxt_rna = {"multi_lines": check_ctxt.get("multi_lines"),
 | 
			
		||||
                          "not_capitalized": check_ctxt.get("not_capitalized"),
 | 
			
		||||
                          "end_point": check_ctxt.get("end_point"),
 | 
			
		||||
                          "undoc_ops": check_ctxt.get("undoc_ops")}
 | 
			
		||||
        check_ctxt_rna_tip = check_ctxt_rna
 | 
			
		||||
        check_ctxt_rna_tip["multi_rnatip"] = check_ctxt.get("multi_rnatip")
 | 
			
		||||
 | 
			
		||||
    # -------------------------------------------------------------------------
 | 
			
		||||
    # Function definitions
 | 
			
		||||
 | 
			
		||||
    def walkProperties(bl_rna):
 | 
			
		||||
        import bpy
 | 
			
		||||
 | 
			
		||||
        # Get our parents' properties, to not export them multiple times.
 | 
			
		||||
        bl_rna_base = bl_rna.base
 | 
			
		||||
        if bl_rna_base:
 | 
			
		||||
            bl_rna_base_props = bl_rna_base.properties.values()
 | 
			
		||||
        else:
 | 
			
		||||
            bl_rna_base_props = ()
 | 
			
		||||
 | 
			
		||||
        for prop in bl_rna.properties:
 | 
			
		||||
            # Only write this property if our parent hasn't got it.
 | 
			
		||||
            if prop in bl_rna_base_props:
 | 
			
		||||
                continue
 | 
			
		||||
            if prop.identifier == "rna_type":
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
            msgsrc = "bpy.types.{}.{}".format(bl_rna.identifier, prop.identifier)
 | 
			
		||||
            context = getattr(prop, "translation_context", CONTEXT_DEFAULT)
 | 
			
		||||
            if prop.name and (prop.name != prop.identifier or context):
 | 
			
		||||
                key = (context, prop.name)
 | 
			
		||||
                check(check_ctxt_rna, messages, key, msgsrc)
 | 
			
		||||
                messages.setdefault(key, []).append(msgsrc)
 | 
			
		||||
            if prop.description:
 | 
			
		||||
                key = (CONTEXT_DEFAULT, prop.description)
 | 
			
		||||
                check(check_ctxt_rna_tip, messages, key, msgsrc)
 | 
			
		||||
                messages.setdefault(key, []).append(msgsrc)
 | 
			
		||||
            if isinstance(prop, bpy.types.EnumProperty):
 | 
			
		||||
                for item in prop.enum_items:
 | 
			
		||||
                    msgsrc = "bpy.types.{}.{}:'{}'".format(bl_rna.identifier,
 | 
			
		||||
                                                            prop.identifier,
 | 
			
		||||
                                                            item.identifier)
 | 
			
		||||
                    if item.name and item.name != item.identifier:
 | 
			
		||||
                        key = (CONTEXT_DEFAULT, item.name)
 | 
			
		||||
                        check(check_ctxt_rna, messages, key, msgsrc)
 | 
			
		||||
                        messages.setdefault(key, []).append(msgsrc)
 | 
			
		||||
                    if item.description:
 | 
			
		||||
                        key = (CONTEXT_DEFAULT, item.description)
 | 
			
		||||
                        check(check_ctxt_rna_tip, messages, key, msgsrc)
 | 
			
		||||
                        messages.setdefault(key, []).append(msgsrc)
 | 
			
		||||
 | 
			
		||||
    def walkRNA(bl_rna):
 | 
			
		||||
        if filterRNA(bl_rna):
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        msgsrc = ".".join(("bpy.types", bl_rna.identifier))
 | 
			
		||||
        context = getattr(bl_rna, "translation_context", CONTEXT_DEFAULT)
 | 
			
		||||
 | 
			
		||||
        if bl_rna.name and (bl_rna.name != bl_rna.identifier or context):
 | 
			
		||||
            key = (context, bl_rna.name)
 | 
			
		||||
            check(check_ctxt_rna, messages, key, msgsrc)
 | 
			
		||||
            messages.setdefault(key, []).append(msgsrc)
 | 
			
		||||
 | 
			
		||||
        if bl_rna.description:
 | 
			
		||||
            key = (CONTEXT_DEFAULT, bl_rna.description)
 | 
			
		||||
            check(check_ctxt_rna_tip, messages, key, msgsrc)
 | 
			
		||||
            messages.setdefault(key, []).append(msgsrc)
 | 
			
		||||
 | 
			
		||||
        if hasattr(bl_rna, 'bl_label') and  bl_rna.bl_label:
 | 
			
		||||
            key = (context, bl_rna.bl_label)
 | 
			
		||||
            check(check_ctxt_rna, messages, key, msgsrc)
 | 
			
		||||
            messages.setdefault(key, []).append(msgsrc)
 | 
			
		||||
 | 
			
		||||
        walkProperties(bl_rna)
 | 
			
		||||
 | 
			
		||||
    def walkClass(cls):
 | 
			
		||||
        walkRNA(cls.bl_rna)
 | 
			
		||||
 | 
			
		||||
    def walk_keymap_hierarchy(hier, msgsrc_prev):
 | 
			
		||||
        for lvl in hier:
 | 
			
		||||
            msgsrc = "{}.{}".format(msgsrc_prev, lvl[1])
 | 
			
		||||
            messages.setdefault((CONTEXT_DEFAULT, lvl[0]), []).append(msgsrc)
 | 
			
		||||
 | 
			
		||||
            if lvl[3]:
 | 
			
		||||
                walk_keymap_hierarchy(lvl[3], msgsrc)
 | 
			
		||||
 | 
			
		||||
    # -------------------------------------------------------------------------
 | 
			
		||||
    # Dump Messages
 | 
			
		||||
 | 
			
		||||
    def process_cls_list(cls_list):
 | 
			
		||||
        if not cls_list:
 | 
			
		||||
            return 0
 | 
			
		||||
 | 
			
		||||
        def full_class_id(cls):
 | 
			
		||||
            """ gives us 'ID.Lamp.AreaLamp' which is best for sorting.
 | 
			
		||||
            """
 | 
			
		||||
            cls_id = ""
 | 
			
		||||
            bl_rna = cls.bl_rna
 | 
			
		||||
            while bl_rna:
 | 
			
		||||
                cls_id = "{}.{}".format(bl_rna.identifier, cls_id)
 | 
			
		||||
                bl_rna = bl_rna.base
 | 
			
		||||
            return cls_id
 | 
			
		||||
 | 
			
		||||
        cls_list.sort(key=full_class_id)
 | 
			
		||||
        processed = 0
 | 
			
		||||
        for cls in cls_list:
 | 
			
		||||
            # XXX translation_context of Operator sub-classes are not "good"!
 | 
			
		||||
            #     So ignore those Operator sub-classes (anyway, will get the same from OperatorProperties
 | 
			
		||||
            #     sub-classes!)...
 | 
			
		||||
            if issubclass(cls, bpy.types.Operator):
 | 
			
		||||
                continue
 | 
			
		||||
 | 
			
		||||
            walkClass(cls)
 | 
			
		||||
#            classes.add(cls)
 | 
			
		||||
            # Recursively process subclasses.
 | 
			
		||||
            processed += process_cls_list(cls.__subclasses__()) + 1
 | 
			
		||||
        return processed
 | 
			
		||||
 | 
			
		||||
    # Parse everything (recursively parsing from bpy_struct "class"...).
 | 
			
		||||
    processed = process_cls_list(type(bpy.context).__base__.__subclasses__())
 | 
			
		||||
    print("{} classes processed!".format(processed))
 | 
			
		||||
 | 
			
		||||
    from bpy_extras.keyconfig_utils import KM_HIERARCHY
 | 
			
		||||
 | 
			
		||||
    walk_keymap_hierarchy(KM_HIERARCHY, "KM_HIERARCHY")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##### Python source code #####
 | 
			
		||||
 | 
			
		||||
def dump_py_messages_from_files(messages, check_ctxt, files):
 | 
			
		||||
    """
 | 
			
		||||
    Dump text inlined in the python files given, e.g. 'My Name' in:
 | 
			
		||||
        layout.prop("someprop", text="My Name")
 | 
			
		||||
    """
 | 
			
		||||
    import ast
 | 
			
		||||
 | 
			
		||||
    bpy_struct = bpy.types.ID.__base__
 | 
			
		||||
 | 
			
		||||
    # Helper function
 | 
			
		||||
    def extract_strings_ex(node, is_split=False):
 | 
			
		||||
        """
 | 
			
		||||
        Recursively get strings, needed in case we have "Blah" + "Blah", passed as an argument in that case it won't
 | 
			
		||||
        evaluate to a string. However, break on some kind of stopper nodes, like e.g. Subscript.
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        if type(node) == ast.Str:
 | 
			
		||||
            eval_str = ast.literal_eval(node)
 | 
			
		||||
            if eval_str:
 | 
			
		||||
                yield (is_split, eval_str, (node,))
 | 
			
		||||
        else:
 | 
			
		||||
            is_split = (type(node) in separate_nodes)
 | 
			
		||||
            for nd in ast.iter_child_nodes(node):
 | 
			
		||||
                if type(nd) not in stopper_nodes:
 | 
			
		||||
                    yield from extract_strings_ex(nd, is_split=is_split)
 | 
			
		||||
 | 
			
		||||
    def _extract_string_merge(estr_ls, nds_ls):
 | 
			
		||||
        return "".join(s for s in estr_ls if s is not None), tuple(n for n in nds_ls if n is not None)
 | 
			
		||||
 | 
			
		||||
    def extract_strings(node):
 | 
			
		||||
        estr_ls = []
 | 
			
		||||
        nds_ls = []
 | 
			
		||||
        for is_split, estr, nds in extract_strings_ex(node):
 | 
			
		||||
            estr_ls.append(estr)
 | 
			
		||||
            nds_ls.extend(nds)
 | 
			
		||||
        ret = _extract_string_merge(estr_ls, nds_ls)
 | 
			
		||||
        #print(ret)
 | 
			
		||||
        return ret
 | 
			
		||||
    
 | 
			
		||||
    def extract_strings_split(node):
 | 
			
		||||
        """
 | 
			
		||||
        Returns a list args as returned by 'extract_strings()',
 | 
			
		||||
        But split into groups based on separate_nodes, this way
 | 
			
		||||
        expressions like ("A" if test else "B") wont be merged but
 | 
			
		||||
        "A" + "B" will.
 | 
			
		||||
        """
 | 
			
		||||
        estr_ls = []
 | 
			
		||||
        nds_ls = []
 | 
			
		||||
        bag = []
 | 
			
		||||
        for is_split, estr, nds in extract_strings_ex(node):
 | 
			
		||||
            if is_split:
 | 
			
		||||
                bag.append((estr_ls, nds_ls))
 | 
			
		||||
                estr_ls = []
 | 
			
		||||
                nds_ls = []
 | 
			
		||||
 | 
			
		||||
            estr_ls.append(estr)
 | 
			
		||||
            nds_ls.extend(nds)
 | 
			
		||||
 | 
			
		||||
        bag.append((estr_ls, nds_ls))
 | 
			
		||||
 | 
			
		||||
        return [_extract_string_merge(estr_ls, nds_ls) for estr_ls, nds_ls in bag]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def _ctxt_to_ctxt(node):
 | 
			
		||||
        return extract_strings(node)[0]
 | 
			
		||||
 | 
			
		||||
    def _op_to_ctxt(node):
 | 
			
		||||
        opname, _ = extract_strings(node)
 | 
			
		||||
        if not opname:
 | 
			
		||||
            return ""
 | 
			
		||||
        op = bpy.ops
 | 
			
		||||
        for n in opname.split('.'):
 | 
			
		||||
            op = getattr(op, n)
 | 
			
		||||
        try:
 | 
			
		||||
            return op.get_rna().bl_rna.translation_context
 | 
			
		||||
        except Exception as e:
 | 
			
		||||
            default_op_context = bpy.app.translations.contexts.operator_default
 | 
			
		||||
            print("ERROR: ", str(e))
 | 
			
		||||
            print("       Assuming default operator context '{}'".format(default_op_context))
 | 
			
		||||
            return default_op_context
 | 
			
		||||
 | 
			
		||||
    # -------------------------------------------------------------------------
 | 
			
		||||
    # Gather function names
 | 
			
		||||
 | 
			
		||||
    # In addition of UI func, also parse pgettext ones...
 | 
			
		||||
    # Tuples of (module name, (short names, ...)).
 | 
			
		||||
    pgettext_variants = (
 | 
			
		||||
        ("pgettext", ("_",)),
 | 
			
		||||
        ("pgettext_iface", ("iface_",)),
 | 
			
		||||
        ("pgettext_tip", ("tip_",))
 | 
			
		||||
    )
 | 
			
		||||
    pgettext_variants_args = {"msgid": (0, {"msgctxt": 1})}
 | 
			
		||||
 | 
			
		||||
    # key: msgid keywords.
 | 
			
		||||
    # val: tuples of ((keywords,), context_getter_func) to get a context for that msgid.
 | 
			
		||||
    #      Note: order is important, first one wins!
 | 
			
		||||
    translate_kw = {
 | 
			
		||||
        "text": ((("text_ctxt",), _ctxt_to_ctxt),
 | 
			
		||||
                 (("operator",), _op_to_ctxt),
 | 
			
		||||
                ),
 | 
			
		||||
        "msgid": ((("msgctxt",), _ctxt_to_ctxt),
 | 
			
		||||
                 ),
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    context_kw_set = {}
 | 
			
		||||
    for k, ctxts in translate_kw.items():
 | 
			
		||||
        s = set()
 | 
			
		||||
        for c, _ in ctxts:
 | 
			
		||||
            s |= set(c)
 | 
			
		||||
        context_kw_set[k] = s
 | 
			
		||||
 | 
			
		||||
    # {func_id: {msgid: (arg_pos,
 | 
			
		||||
    #                    {msgctxt: arg_pos,
 | 
			
		||||
    #                     ...
 | 
			
		||||
    #                    }
 | 
			
		||||
    #                   ),
 | 
			
		||||
    #            ...
 | 
			
		||||
    #           },
 | 
			
		||||
    #  ...
 | 
			
		||||
    # }
 | 
			
		||||
    func_translate_args = {}
 | 
			
		||||
 | 
			
		||||
    # First, functions from UILayout
 | 
			
		||||
    # First loop is for msgid args, second one is for msgctxt args.
 | 
			
		||||
    for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
 | 
			
		||||
        # check it has one or more arguments as defined in translate_kw
 | 
			
		||||
        for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
 | 
			
		||||
            if ((arg_kw in translate_kw) and (not arg.is_output) and (arg.type == 'STRING')):
 | 
			
		||||
                func_translate_args.setdefault(func_id, {})[arg_kw] = (arg_pos, {})
 | 
			
		||||
    for func_id, func in bpy.types.UILayout.bl_rna.functions.items():
 | 
			
		||||
        if func_id not in func_translate_args:
 | 
			
		||||
            continue
 | 
			
		||||
        for arg_pos, (arg_kw, arg) in enumerate(func.parameters.items()):
 | 
			
		||||
            if (not arg.is_output) and (arg.type == 'STRING'):
 | 
			
		||||
                for msgid, msgctxts in context_kw_set.items():
 | 
			
		||||
                    if arg_kw in msgctxts:
 | 
			
		||||
                        func_translate_args[func_id][msgid][1][arg_kw] = arg_pos
 | 
			
		||||
    # We manually add funcs from bpy.app.translations
 | 
			
		||||
    for func_id, func_ids in pgettext_variants:
 | 
			
		||||
        func_translate_args[func_id] = pgettext_variants_args
 | 
			
		||||
        for func_id in func_ids:
 | 
			
		||||
            func_translate_args[func_id] = pgettext_variants_args
 | 
			
		||||
    #print(func_translate_args)
 | 
			
		||||
 | 
			
		||||
    # Break recursive nodes look up on some kind of nodes.
 | 
			
		||||
    # E.g. we don’t want to get strings inside subscripts (blah["foo"])!
 | 
			
		||||
    stopper_nodes = {ast.Subscript}
 | 
			
		||||
    # Consider strings separate: ("a" if test else "b")
 | 
			
		||||
    separate_nodes = {ast.IfExp}
 | 
			
		||||
 | 
			
		||||
    check_ctxt_py = None
 | 
			
		||||
    if check_ctxt:
 | 
			
		||||
        check_ctxt_py = {"py_in_rna": (check_ctxt["py_in_rna"], messages.copy()),
 | 
			
		||||
                         "multi_lines": check_ctxt["multi_lines"],
 | 
			
		||||
                         "not_capitalized": check_ctxt["not_capitalized"],
 | 
			
		||||
                         "end_point": check_ctxt["end_point"]}
 | 
			
		||||
 | 
			
		||||
    for fp in files:
 | 
			
		||||
        with open(fp, 'r', encoding="utf8") as filedata:
 | 
			
		||||
            root_node = ast.parse(filedata.read(), fp, 'exec')
 | 
			
		||||
 | 
			
		||||
        fp_rel = os.path.relpath(fp, SOURCE_DIR)
 | 
			
		||||
 | 
			
		||||
        for node in ast.walk(root_node):
 | 
			
		||||
            if type(node) == ast.Call:
 | 
			
		||||
                # print("found function at")
 | 
			
		||||
                # print("%s:%d" % (fp, node.lineno))
 | 
			
		||||
 | 
			
		||||
                # We can't skip such situations! from blah import foo\nfoo("bar") would also be an ast.Name func!
 | 
			
		||||
                if type(node.func) == ast.Name:
 | 
			
		||||
                    func_id = node.func.id
 | 
			
		||||
                elif hasattr(node.func, "attr"):
 | 
			
		||||
                    func_id = node.func.attr
 | 
			
		||||
                # Ugly things like getattr(self, con.type)(context, box, con)
 | 
			
		||||
                else:
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
                func_args = func_translate_args.get(func_id, {})
 | 
			
		||||
 | 
			
		||||
                # First try to get i18n contexts, for every possible msgid id.
 | 
			
		||||
                contexts = dict.fromkeys(func_args.keys(), "")
 | 
			
		||||
                for msgid, (_, context_args) in func_args.items():
 | 
			
		||||
                    context_elements = {}
 | 
			
		||||
                    for arg_kw, arg_pos in context_args.items():
 | 
			
		||||
                        if arg_pos < len(node.args):
 | 
			
		||||
                            context_elements[arg_kw] = node.args[arg_pos]
 | 
			
		||||
                        else:
 | 
			
		||||
                            for kw in node.keywords:
 | 
			
		||||
                                if kw.arg == arg_kw:
 | 
			
		||||
                                    context_elements[arg_kw] = kw.value
 | 
			
		||||
                                    break
 | 
			
		||||
                    #print(context_elements)
 | 
			
		||||
                    for kws, proc in translate_kw[msgid]:
 | 
			
		||||
                        if set(kws) <= context_elements.keys():
 | 
			
		||||
                            args = tuple(context_elements[k] for k in kws)
 | 
			
		||||
                            #print("running ", proc, " with ", args)
 | 
			
		||||
                            ctxt = proc(*args)
 | 
			
		||||
                            if ctxt:
 | 
			
		||||
                                contexts[msgid] = ctxt
 | 
			
		||||
                                break
 | 
			
		||||
 | 
			
		||||
                #print(translate_args)
 | 
			
		||||
                # do nothing if not found
 | 
			
		||||
                for arg_kw, (arg_pos, _) in func_args.items():
 | 
			
		||||
                    estr_lst = [(None, ())]
 | 
			
		||||
                    if arg_pos < len(node.args):
 | 
			
		||||
                        estr_lst = extract_strings_split(node.args[arg_pos])
 | 
			
		||||
                        #print(estr, nds)
 | 
			
		||||
                    else:
 | 
			
		||||
                        for kw in node.keywords:
 | 
			
		||||
                            if kw.arg == arg_kw:
 | 
			
		||||
                                estr_lst = extract_strings_split(kw.value)
 | 
			
		||||
                                break
 | 
			
		||||
                        #print(estr, nds)
 | 
			
		||||
                    for estr, nds in estr_lst:
 | 
			
		||||
                        if estr:
 | 
			
		||||
                            key = (contexts[arg_kw], estr)
 | 
			
		||||
                            if nds:
 | 
			
		||||
                                msgsrc = ["{}:{}".format(fp_rel, sorted({nd.lineno for nd in nds})[0])]
 | 
			
		||||
                            else:
 | 
			
		||||
                                msgsrc = ["{}:???".format(fp_rel)]
 | 
			
		||||
                            check(check_ctxt_py, messages, key, msgsrc)
 | 
			
		||||
                            messages.setdefault(key, []).extend(msgsrc)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def dump_py_messages(messages, check_ctxt, addons):
 | 
			
		||||
    mod_dir = os.path.join(SOURCE_DIR, "release", "scripts", "startup", "bl_ui")
 | 
			
		||||
 | 
			
		||||
    files = [os.path.join(mod_dir, fn) for fn in sorted(os.listdir(mod_dir))
 | 
			
		||||
             if not fn.startswith("_") if fn.endswith("py")]
 | 
			
		||||
 | 
			
		||||
    # Dummy Cycles has its py addon in its own dir!
 | 
			
		||||
    files += CUSTOM_PY_UI_FILES
 | 
			
		||||
 | 
			
		||||
    # Add all addons we support in main translation file!
 | 
			
		||||
    for mod in addons:
 | 
			
		||||
        fn = mod.__file__
 | 
			
		||||
        if os.path.basename(fn) == "__init__.py":
 | 
			
		||||
            mod_dir = os.path.dirname(fn)
 | 
			
		||||
            files += [fn for fn in sorted(os.listdir(mod_dir))
 | 
			
		||||
                      if os.path.isfile(fn) and os.path.splitext(fn)[1] == ".py"]
 | 
			
		||||
        else:
 | 
			
		||||
            files.append(fn)
 | 
			
		||||
 | 
			
		||||
    dump_py_messages_from_files(messages, check_ctxt, files)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##### Main functions! #####
 | 
			
		||||
 | 
			
		||||
def dump_messages(do_messages, do_checks):
 | 
			
		||||
    messages = getattr(collections, 'OrderedDict', dict)()
 | 
			
		||||
 | 
			
		||||
    messages[(CONTEXT_DEFAULT, "")] = []
 | 
			
		||||
 | 
			
		||||
    # Enable all wanted addons.
 | 
			
		||||
    # For now, enable all official addons, before extracting msgids.
 | 
			
		||||
    addons = enable_addons(support={"OFFICIAL"})
 | 
			
		||||
 | 
			
		||||
    check_ctxt = None
 | 
			
		||||
    if do_checks:
 | 
			
		||||
        check_ctxt = {"multi_rnatip": set(),
 | 
			
		||||
                      "multi_lines": set(),
 | 
			
		||||
                      "py_in_rna": set(),
 | 
			
		||||
                      "not_capitalized": set(),
 | 
			
		||||
                      "end_point": set(),
 | 
			
		||||
                      "undoc_ops": set()}
 | 
			
		||||
 | 
			
		||||
    # get strings from RNA
 | 
			
		||||
    dump_messages_rna(messages, check_ctxt)
 | 
			
		||||
 | 
			
		||||
    # get strings from UI layout definitions text="..." args
 | 
			
		||||
    dump_py_messages(messages, check_ctxt, addons)
 | 
			
		||||
 | 
			
		||||
    del messages[(CONTEXT_DEFAULT, "")]
 | 
			
		||||
 | 
			
		||||
    print_warnings(check_ctxt, messages)
 | 
			
		||||
 | 
			
		||||
    if do_messages:
 | 
			
		||||
        print("Writing messages…")
 | 
			
		||||
        num_written = 0
 | 
			
		||||
        num_filtered = 0
 | 
			
		||||
        with open(FILE_NAME_MESSAGES, 'w', encoding="utf8") as message_file:
 | 
			
		||||
            for (ctx, key), value in messages.items():
 | 
			
		||||
                # filter out junk values
 | 
			
		||||
                if filter_message(key):
 | 
			
		||||
                    num_filtered += 1
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
                # Remove newlines in key and values!
 | 
			
		||||
                message_file.write("\n".join(MSG_COMMENT_PREFIX + msgsrc.replace("\n", "") for msgsrc in value))
 | 
			
		||||
                message_file.write("\n")
 | 
			
		||||
                if ctx:
 | 
			
		||||
                    message_file.write(MSG_CONTEXT_PREFIX + ctx.replace("\n", "") + "\n")
 | 
			
		||||
                message_file.write(key.replace("\n", "") + "\n")
 | 
			
		||||
                num_written += 1
 | 
			
		||||
 | 
			
		||||
        print("Written {} messages to: {} ({} were filtered out)."
 | 
			
		||||
              "".format(num_written, FILE_NAME_MESSAGES, num_filtered))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def dump_addon_messages(module_name, messages_formats, do_checks):
 | 
			
		||||
    messages = getattr(collections, 'OrderedDict', dict)()
 | 
			
		||||
 | 
			
		||||
    messages[(CONTEXT_DEFAULT, "")] = []
 | 
			
		||||
    minus_messages = copy.deepcopy(messages)
 | 
			
		||||
 | 
			
		||||
    check_ctxt = None
 | 
			
		||||
    minus_check_ctxt = None
 | 
			
		||||
    if do_checks:
 | 
			
		||||
        check_ctxt = {"multi_rnatip": set(),
 | 
			
		||||
                      "multi_lines": set(),
 | 
			
		||||
                      "py_in_rna": set(),
 | 
			
		||||
                      "not_capitalized": set(),
 | 
			
		||||
                      "end_point": set(),
 | 
			
		||||
                      "undoc_ops": set()}
 | 
			
		||||
        minus_check_ctxt = copy.deepcopy(check_ctxt)
 | 
			
		||||
 | 
			
		||||
    # Get current addon state (loaded or not):
 | 
			
		||||
    was_loaded = addon_utils.check(module_name)[1]
 | 
			
		||||
 | 
			
		||||
    # Enable our addon and get strings from RNA.
 | 
			
		||||
    enable_addons(addons={module_name})
 | 
			
		||||
    dump_messages_rna(messages, check_ctxt)
 | 
			
		||||
 | 
			
		||||
    # Now disable our addon, and rescan RNA.
 | 
			
		||||
    enable_addons(addons={module_name}, disable=True)
 | 
			
		||||
    dump_messages_rna(minus_messages, minus_check_ctxt)
 | 
			
		||||
 | 
			
		||||
    # Restore previous state if needed!
 | 
			
		||||
    if was_loaded:
 | 
			
		||||
        enable_addons(addons={module_name})
 | 
			
		||||
 | 
			
		||||
    # and make the diff!
 | 
			
		||||
    for key in minus_messages:
 | 
			
		||||
        if k == (CONTEXT_DEFAULT, ""):
 | 
			
		||||
            continue
 | 
			
		||||
        del messages[k]
 | 
			
		||||
 | 
			
		||||
    if check_ctxt:
 | 
			
		||||
        for key in check_ctxt:
 | 
			
		||||
            for warning in minus_check_ctxt[key]:
 | 
			
		||||
                check_ctxt[key].remove(warning)
 | 
			
		||||
 | 
			
		||||
    # and we are done with those!
 | 
			
		||||
    del minus_messages
 | 
			
		||||
    del minus_check_ctxt
 | 
			
		||||
 | 
			
		||||
    # get strings from UI layout definitions text="..." args
 | 
			
		||||
    dump_messages_pytext(messages, check_ctxt)
 | 
			
		||||
 | 
			
		||||
    del messages[(CONTEXT_DEFAULT, "")]
 | 
			
		||||
 | 
			
		||||
    print_warnings
 | 
			
		||||
 | 
			
		||||
    if do_messages:
 | 
			
		||||
        print("Writing messages…")
 | 
			
		||||
        num_written = 0
 | 
			
		||||
        num_filtered = 0
 | 
			
		||||
        with open(FILE_NAME_MESSAGES, 'w', encoding="utf8") as message_file:
 | 
			
		||||
            for (ctx, key), value in messages.items():
 | 
			
		||||
                # filter out junk values
 | 
			
		||||
                if filter_message(key):
 | 
			
		||||
                    num_filtered += 1
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
                # Remove newlines in key and values!
 | 
			
		||||
                message_file.write("\n".join(COMMENT_PREFIX + msgsrc.replace("\n", "") for msgsrc in value))
 | 
			
		||||
                message_file.write("\n")
 | 
			
		||||
                if ctx:
 | 
			
		||||
                    message_file.write(CONTEXT_PREFIX + ctx.replace("\n", "") + "\n")
 | 
			
		||||
                message_file.write(key.replace("\n", "") + "\n")
 | 
			
		||||
                num_written += 1
 | 
			
		||||
 | 
			
		||||
        print("Written {} messages to: {} ({} were filtered out)."
 | 
			
		||||
              "".format(num_written, FILE_NAME_MESSAGES, num_filtered))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    try:
 | 
			
		||||
        import bpy
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        print("This script must run from inside blender")
 | 
			
		||||
        return
 | 
			
		||||
 | 
			
		||||
    import sys
 | 
			
		||||
    back_argv = sys.argv
 | 
			
		||||
    # Get rid of Blender args!
 | 
			
		||||
    sys.argv = sys.argv[sys.argv.index("--") + 1:]
 | 
			
		||||
 | 
			
		||||
    import argparse
 | 
			
		||||
    parser = argparse.ArgumentParser(description="Process UI messages from inside Blender.")
 | 
			
		||||
    parser.add_argument('-c', '--no_checks', default=True, action="store_false", help="No checks over UI messages.")
 | 
			
		||||
    parser.add_argument('-m', '--no_messages', default=True, action="store_false", help="No export of UI messages.")
 | 
			
		||||
    parser.add_argument('-o', '--output', help="Output messages file path.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if args.output:
 | 
			
		||||
        global FILE_NAME_MESSAGES
 | 
			
		||||
        FILE_NAME_MESSAGES = args.output
 | 
			
		||||
 | 
			
		||||
    dump_messages(do_messages=args.no_messages, do_checks=args.no_checks)
 | 
			
		||||
 | 
			
		||||
    sys.argv = back_argv
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    print("\n\n *** Running {} *** \n".format(__file__))
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										96
									
								
								release/scripts/modules/bl_i18n_utils/languages_menu_utils.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										96
									
								
								release/scripts/modules/bl_i18n_utils/languages_menu_utils.py
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,96 @@
 | 
			
		||||
# ***** BEGIN GPL LICENSE BLOCK *****
 | 
			
		||||
#
 | 
			
		||||
# This program is free software; you can redistribute it and/or
 | 
			
		||||
# modify it under the terms of the GNU General Public License
 | 
			
		||||
# as published by the Free Software Foundation; either version 2
 | 
			
		||||
# of the License, or (at your option) any later version.
 | 
			
		||||
#
 | 
			
		||||
# This program is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 | 
			
		||||
#
 | 
			
		||||
# ***** END GPL LICENSE BLOCK *****
 | 
			
		||||
 | 
			
		||||
# <pep8 compliant>
 | 
			
		||||
 | 
			
		||||
# Update "languages" text file used by Blender at runtime to build translations menu.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
OK = 0
 | 
			
		||||
MISSING = 1
 | 
			
		||||
TOOLOW = 2
 | 
			
		||||
FORBIDDEN = 3
 | 
			
		||||
FLAG_MESSAGES = {
 | 
			
		||||
    OK: "",
 | 
			
		||||
    MISSING: "No translation yet!",
 | 
			
		||||
    TOOLOW: "Not enough advanced to be included...",
 | 
			
		||||
    FORBIDDEN: "Explicitly forbidden!",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
def gen_menu_file(stats, settings):
 | 
			
		||||
    # Generate languages file used by Blender's i18n system.
 | 
			
		||||
    # First, match all entries in LANGUAGES to a lang in stats, if possible!
 | 
			
		||||
    tmp = []
 | 
			
		||||
    for uid_num, label, uid, in settings.LANGUAGES:
 | 
			
		||||
        if uid in stats:
 | 
			
		||||
            if uid in settings.IMPORT_LANGUAGES_SKIP:
 | 
			
		||||
                tmp.append((stats[uid], uid_num, label, uid, FORBIDDEN))
 | 
			
		||||
            else:
 | 
			
		||||
                tmp.append((stats[uid], uid_num, label, uid, OK))
 | 
			
		||||
        else:
 | 
			
		||||
            tmp.append((0.0, uid_num, label, uid, MISSING))
 | 
			
		||||
    stats = tmp
 | 
			
		||||
    limits = sorted(settings.LANGUAGES_CATEGORIES, key=lambda it: it[0], reverse=True)
 | 
			
		||||
    idx = 0
 | 
			
		||||
    stats = sorted(stats, key=lambda it: it[0], reverse=True)
 | 
			
		||||
    langs_cats = [[] for i in range(len(limits))]
 | 
			
		||||
    highest_uid = 0
 | 
			
		||||
    for lvl, uid_num, label, uid, flag in stats:
 | 
			
		||||
        if lvl < limits[idx][0]:
 | 
			
		||||
            # Sub-sort languages by iso-codes.
 | 
			
		||||
            langs_cats[idx].sort(key=lambda it: it[2])
 | 
			
		||||
            idx += 1
 | 
			
		||||
        if lvl < settings.IMPORT_MIN_LEVEL and flag == OK:
 | 
			
		||||
            flag = TOOLOW
 | 
			
		||||
        langs_cats[idx].append((uid_num, label, uid, flag))
 | 
			
		||||
        if abs(uid_num) > highest_uid:
 | 
			
		||||
            highest_uid = abs(uid_num)
 | 
			
		||||
    # Sub-sort last group of languages by iso-codes!
 | 
			
		||||
    langs_cats[idx].sort(key=lambda it: it[2])
 | 
			
		||||
    data_lines = [
 | 
			
		||||
        "# File used by Blender to know which languages (translations) are available, ",
 | 
			
		||||
        "# and to generate translation menu.",
 | 
			
		||||
        "#",
 | 
			
		||||
        "# File format:",
 | 
			
		||||
        "# ID:MENULABEL:ISOCODE",
 | 
			
		||||
        "# ID must be unique, except for 0 value (marks categories for menu).",
 | 
			
		||||
        "# Line starting with a # are comments!",
 | 
			
		||||
        "#",
 | 
			
		||||
        "# Automatically generated by bl_i18n_utils/update_languages_menu.py script.",
 | 
			
		||||
        "# Highest ID currently in use: {}".format(highest_uid),
 | 
			
		||||
    ]
 | 
			
		||||
    for cat, langs_cat in zip(limits, langs_cats):
 | 
			
		||||
        data_lines.append("#")
 | 
			
		||||
        # Write "category menu label"...
 | 
			
		||||
        if langs_cat:
 | 
			
		||||
            data_lines.append("0:{}:".format(cat[1]))
 | 
			
		||||
        else:
 | 
			
		||||
            # Do not write the category if it has no language!
 | 
			
		||||
            data_lines.append("# Void category! #0:{}:".format(cat[1]))
 | 
			
		||||
        # ...and all matching language entries!
 | 
			
		||||
        for uid_num, label, uid, flag in langs_cat:
 | 
			
		||||
            if flag == OK:
 | 
			
		||||
                data_lines.append("{}:{}:{}".format(uid_num, label, uid))
 | 
			
		||||
            else:
 | 
			
		||||
                # Non-existing, commented entry!
 | 
			
		||||
                data_lines.append("# {} #{}:{}:{}".format(FLAG_MESSAGES[flag], uid_num, label, uid))
 | 
			
		||||
    with open(os.path.join(settings.TRUNK_MO_DIR, settings.LANGUAGES_FILE), 'w') as f:
 | 
			
		||||
        f.write("\n".join(data_lines))
 | 
			
		||||
@@ -36,18 +36,6 @@ import sys
 | 
			
		||||
import ctypes
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import settings
 | 
			
		||||
    import utils
 | 
			
		||||
except:
 | 
			
		||||
    from . import (settings, utils)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
FRIBIDI_LIB = settings.FRIBIDI_LIB
 | 
			
		||||
 | 
			
		||||
###### Import C library and recreate "defines". #####
 | 
			
		||||
fbd = ctypes.CDLL(FRIBIDI_LIB)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#define FRIBIDI_MASK_NEUTRAL	0x00000040L	/* Is neutral */
 | 
			
		||||
FRIBIDI_PAR_ON = 0x00000040
 | 
			
		||||
@@ -80,12 +68,9 @@ FRIBIDI_FLAG_REMOVE_SPECIALS = 0x00040000
 | 
			
		||||
FRIBIDI_FLAG_SHAPE_ARAB_PRES = 0x00000100
 | 
			
		||||
FRIBIDI_FLAG_SHAPE_ARAB_LIGA = 0x00000200
 | 
			
		||||
 | 
			
		||||
FRIBIDI_FLAGS_DEFAULT = FRIBIDI_FLAG_SHAPE_MIRRORING | \
 | 
			
		||||
                        FRIBIDI_FLAG_REORDER_NSM | \
 | 
			
		||||
                        FRIBIDI_FLAG_REMOVE_SPECIALS
 | 
			
		||||
FRIBIDI_FLAGS_DEFAULT = FRIBIDI_FLAG_SHAPE_MIRRORING | FRIBIDI_FLAG_REORDER_NSM | FRIBIDI_FLAG_REMOVE_SPECIALS
 | 
			
		||||
 | 
			
		||||
FRIBIDI_FLAGS_ARABIC = FRIBIDI_FLAG_SHAPE_ARAB_PRES | \
 | 
			
		||||
                       FRIBIDI_FLAG_SHAPE_ARAB_LIGA
 | 
			
		||||
FRIBIDI_FLAGS_ARABIC = FRIBIDI_FLAG_SHAPE_ARAB_PRES | FRIBIDI_FLAG_SHAPE_ARAB_LIGA
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
MENU_DETECT_REGEX = re.compile("%x\\d+\\|")
 | 
			
		||||
@@ -158,11 +143,13 @@ def protect_format_seq(msg):
 | 
			
		||||
    return "".join(ret)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def log2vis(msgs):
 | 
			
		||||
def log2vis(msgs, settings):
 | 
			
		||||
    """
 | 
			
		||||
    Globally mimics deprecated fribidi_log2vis.
 | 
			
		||||
    msgs should be an iterable of messages to rtl-process.
 | 
			
		||||
    """
 | 
			
		||||
    fbd = ctypes.CDLL(settings.FRIBIDI_LIB)
 | 
			
		||||
 | 
			
		||||
    for msg in msgs:
 | 
			
		||||
        msg = protect_format_seq(msg)
 | 
			
		||||
 | 
			
		||||
@@ -206,52 +193,3 @@ def log2vis(msgs):
 | 
			
		||||
#        print(*(ord(c) for c in fbc_str))
 | 
			
		||||
 | 
			
		||||
        yield fbc_str.value
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
##### Command line stuff. #####
 | 
			
		||||
def main():
 | 
			
		||||
    import argparse
 | 
			
		||||
    parser = argparse.ArgumentParser(description="" \
 | 
			
		||||
                    "Preprocesses right-to-left languages.\n" \
 | 
			
		||||
                    "You can use it either standalone, or through " \
 | 
			
		||||
                    "import_po_from_branches or update_trunk.\n\n" \
 | 
			
		||||
                    "Note: This has been tested on Linux, not 100% it will " \
 | 
			
		||||
                    "work nicely on Windows or OsX.\n" \
 | 
			
		||||
                    "Note: This uses ctypes, as there is no py3 binding for " \
 | 
			
		||||
                    "fribidi currently. This implies you only need the " \
 | 
			
		||||
                    "compiled C library to run it.\n" \
 | 
			
		||||
                    "Note: It handles some formating/escape codes (like " \
 | 
			
		||||
                    "\\\", %s, %x12, %.4f, etc.), protecting them from ugly " \
 | 
			
		||||
                    "(evil) fribidi, which seems completely unaware of such " \
 | 
			
		||||
                    "things (as unicode is...).")
 | 
			
		||||
    parser.add_argument('dst', metavar='dst.po',
 | 
			
		||||
                        help="The dest po into which write the " \
 | 
			
		||||
                             "pre-processed messages.")
 | 
			
		||||
    parser.add_argument('src', metavar='src.po',
 | 
			
		||||
                        help="The po's to pre-process messages.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    msgs, state, u1 = utils.parse_messages(args.src)
 | 
			
		||||
    if state["is_broken"]:
 | 
			
		||||
        print("Source po is BROKEN, aborting.")
 | 
			
		||||
        return 1
 | 
			
		||||
 | 
			
		||||
    keys = []
 | 
			
		||||
    trans = []
 | 
			
		||||
    for key, val in msgs.items():
 | 
			
		||||
        keys.append(key)
 | 
			
		||||
        trans.append("".join(val["msgstr_lines"]))
 | 
			
		||||
    trans = log2vis(trans)
 | 
			
		||||
    for key, trn in zip(keys, trans):
 | 
			
		||||
        # Mono-line for now...
 | 
			
		||||
        msgs[key]["msgstr_lines"] = [trn]
 | 
			
		||||
 | 
			
		||||
    utils.write_messages(args.dst, msgs, state["comm_msg"], state["fuzzy_msg"])
 | 
			
		||||
 | 
			
		||||
    print("RTL pre-process completed.")
 | 
			
		||||
    return 0
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    print("\n\n *** Running {} *** \n".format(__file__))
 | 
			
		||||
    sys.exit(main())
 | 
			
		||||
@@ -24,8 +24,12 @@
 | 
			
		||||
# XXX This is a template, most values should be OK, but some you’ll have to
 | 
			
		||||
#     edit (most probably, BLENDER_EXEC and SOURCE_DIR).
 | 
			
		||||
 | 
			
		||||
import os.path
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
import bpy
 | 
			
		||||
 | 
			
		||||
###############################################################################
 | 
			
		||||
# MISC
 | 
			
		||||
@@ -86,15 +90,24 @@ LANGUAGES = (
 | 
			
		||||
    (40, "Hindi (मानक हिन्दी)", "hi_IN"),
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
# Default context, in py!
 | 
			
		||||
DEFAULT_CONTEXT = bpy.app.translations.contexts.default
 | 
			
		||||
 | 
			
		||||
# Name of language file used by Blender to generate translations' menu.
 | 
			
		||||
LANGUAGES_FILE = "languages"
 | 
			
		||||
 | 
			
		||||
# The min level of completeness for a po file to be imported from /branches
 | 
			
		||||
# into /trunk, as a percentage. -1 means "import everything".
 | 
			
		||||
IMPORT_MIN_LEVEL = -1
 | 
			
		||||
# The min level of completeness for a po file to be imported from /branches into /trunk, as a percentage.
 | 
			
		||||
IMPORT_MIN_LEVEL = 0.0
 | 
			
		||||
 | 
			
		||||
# Languages in /branches we do not want to import in /trunk currently...
 | 
			
		||||
IMPORT_LANGUAGES_SKIP = {'am', 'bg', 'fi', 'el', 'et', 'ne', 'pl', 'ro', 'uz', 'uz@cyrillic'}
 | 
			
		||||
IMPORT_LANGUAGES_SKIP = {
 | 
			
		||||
    'am_ET', 'bg_BG', 'fi_FI', 'el_GR', 'et_EE', 'ne_NP', 'pl_PL', 'ro_RO', 'uz_UZ', 'uz_UZ@cyrillic',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# Languages that need RTL pre-processing.
 | 
			
		||||
IMPORT_LANGUAGES_RTL = {
 | 
			
		||||
    'ar_EG', 'fa_IR', 'he_IL',
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
# The comment prefix used in generated messages.txt file.
 | 
			
		||||
MSG_COMMENT_PREFIX = "#~ "
 | 
			
		||||
@@ -111,6 +124,9 @@ PO_COMMENT_PREFIX_SOURCE = "#: "
 | 
			
		||||
# The comment prefix used to mark sources of msgids, in po's.
 | 
			
		||||
PO_COMMENT_PREFIX_SOURCE_CUSTOM = "#. :src: "
 | 
			
		||||
 | 
			
		||||
# The general "generated" comment prefix, in po's.
 | 
			
		||||
PO_COMMENT_PREFIX_GENERATED = "#. "
 | 
			
		||||
 | 
			
		||||
# The comment prefix used to comment entries in po's.
 | 
			
		||||
PO_COMMENT_PREFIX_MSG= "#~ "
 | 
			
		||||
 | 
			
		||||
@@ -127,16 +143,16 @@ PO_MSGID = "msgid "
 | 
			
		||||
PO_MSGSTR = "msgstr "
 | 
			
		||||
 | 
			
		||||
# The 'header' key of po files.
 | 
			
		||||
PO_HEADER_KEY = ("", "")
 | 
			
		||||
PO_HEADER_KEY = (DEFAULT_CONTEXT, "")
 | 
			
		||||
 | 
			
		||||
PO_HEADER_MSGSTR = (
 | 
			
		||||
    "Project-Id-Version: Blender {blender_ver} (r{blender_rev})\\n\n"
 | 
			
		||||
    "Project-Id-Version: {blender_ver} (r{blender_rev})\\n\n"
 | 
			
		||||
    "Report-Msgid-Bugs-To: \\n\n"
 | 
			
		||||
    "POT-Creation-Date: {time}\\n\n"
 | 
			
		||||
    "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n\n"
 | 
			
		||||
    "Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n\n"
 | 
			
		||||
    "Language-Team: LANGUAGE <LL@li.org>\\n\n"
 | 
			
		||||
    "Language: {iso}\\n\n"
 | 
			
		||||
    "Language: {uid}\\n\n"
 | 
			
		||||
    "MIME-Version: 1.0\\n\n"
 | 
			
		||||
    "Content-Type: text/plain; charset=UTF-8\\n\n"
 | 
			
		||||
    "Content-Transfer-Encoding: 8bit\n"
 | 
			
		||||
@@ -154,8 +170,8 @@ PO_HEADER_COMMENT = (
 | 
			
		||||
 | 
			
		||||
TEMPLATE_ISO_ID = "__TEMPLATE__"
 | 
			
		||||
 | 
			
		||||
# Default context.
 | 
			
		||||
CONTEXT_DEFAULT = ""
 | 
			
		||||
# Num buttons report their label with a trailing ': '...
 | 
			
		||||
NUM_BUTTON_SUFFIX = ": "
 | 
			
		||||
 | 
			
		||||
# Undocumented operator placeholder string.
 | 
			
		||||
UNDOC_OPS_STR = "(undocumented operator)"
 | 
			
		||||
@@ -241,11 +257,6 @@ PYGETTEXT_KEYWORDS = (() +
 | 
			
		||||
          for it in ("BLF_I18N_MSGID_MULTI_CTXT",))
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
ESCAPE_RE = (
 | 
			
		||||
    (r'((?<!\\)"|(?<!\\)\\(?!\\|"))', r"\\\1"),
 | 
			
		||||
    ('\t', r"\\t"),
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
# Should po parser warn when finding a first letter not capitalized?
 | 
			
		||||
WARN_MSGID_NOT_CAPITALIZED = True
 | 
			
		||||
 | 
			
		||||
@@ -291,40 +302,42 @@ WARN_MSGID_NOT_CAPITALIZED_ALLOWED = {
 | 
			
		||||
}
 | 
			
		||||
WARN_MSGID_NOT_CAPITALIZED_ALLOWED |= set(lng[2] for lng in LANGUAGES)
 | 
			
		||||
 | 
			
		||||
WARN_MSGID_END_POINT_ALLOWED = {
 | 
			
		||||
    "Numpad .",
 | 
			
		||||
    "Circle|Alt .",
 | 
			
		||||
    "Temp. Diff.",
 | 
			
		||||
    "Float Neg. Exp.",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
PARSER_CACHE_HASH = 'sha1'
 | 
			
		||||
 | 
			
		||||
PARSER_TEMPLATE_ID = "__POT__"
 | 
			
		||||
PARSER_PY_ID = "__PY__"
 | 
			
		||||
 | 
			
		||||
PARSER_PY_MARKER_BEGIN = "\n# ##### BEGIN AUTOGENERATED I18N SECTION #####\n"
 | 
			
		||||
PARSER_PY_MARKER_END = "\n# ##### END AUTOGENERATED I18N SECTION #####\n"
 | 
			
		||||
 | 
			
		||||
PARSER_MAX_FILE_SIZE = 2**24  # in bytes, i.e. 16 Mb.
 | 
			
		||||
 | 
			
		||||
###############################################################################
 | 
			
		||||
# PATHS
 | 
			
		||||
###############################################################################
 | 
			
		||||
 | 
			
		||||
# The tools path, should be OK.
 | 
			
		||||
TOOLS_DIR = os.path.join(os.path.dirname(__file__))
 | 
			
		||||
 | 
			
		||||
# The Python3 executable.You’ll likely have to edit it in your user_settings.py
 | 
			
		||||
# if you’re under Windows.
 | 
			
		||||
PYTHON3_EXEC = "python3"
 | 
			
		||||
 | 
			
		||||
# The Blender executable!
 | 
			
		||||
# This is just an example, you’ll most likely have to edit it in your user_settings.py!
 | 
			
		||||
BLENDER_EXEC = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..", "blender"))
 | 
			
		||||
# This is just an example, you’ll have to edit it in your user_settings.py!
 | 
			
		||||
BLENDER_EXEC = os.path.abspath(os.path.join("foo", "bar", "blender"))
 | 
			
		||||
# check for blender.bin
 | 
			
		||||
if not os.path.exists(BLENDER_EXEC):
 | 
			
		||||
    if os.path.exists(BLENDER_EXEC + ".bin"):
 | 
			
		||||
        BLENDER_EXEC = BLENDER_EXEC + ".bin"
 | 
			
		||||
 | 
			
		||||
# The xgettext tool. You’ll likely have to edit it in your user_settings.py if you’re under Windows.
 | 
			
		||||
GETTEXT_XGETTEXT_EXECUTABLE = "xgettext"
 | 
			
		||||
 | 
			
		||||
# The gettext msgmerge tool. You’ll likely have to edit it in your user_settings.py if you’re under Windows.
 | 
			
		||||
GETTEXT_MSGMERGE_EXECUTABLE = "msgmerge"
 | 
			
		||||
 | 
			
		||||
# The gettext msgfmt "compiler". You’ll likely have to edit it in your user_settings.py if you’re under Windows.
 | 
			
		||||
GETTEXT_MSGFMT_EXECUTABLE = "msgfmt"
 | 
			
		||||
 | 
			
		||||
# The svn binary... You’ll likely have to edit it in your user_settings.py if you’re under Windows.
 | 
			
		||||
SVN_EXECUTABLE = "svn"
 | 
			
		||||
 | 
			
		||||
# The FriBidi C compiled library (.so under Linux, .dll under windows...).
 | 
			
		||||
# You’ll likely have to edit it in your user_settings.py if you’re under Windows., e.g. using the included one:
 | 
			
		||||
#     FRIBIDI_LIB = os.path.join(TOOLS_DIR, "libfribidi.dll")
 | 
			
		||||
@@ -334,53 +347,63 @@ FRIBIDI_LIB = "libfribidi.so.0"
 | 
			
		||||
RTL_PREPROCESS_FILE = "is_rtl"
 | 
			
		||||
 | 
			
		||||
# The Blender source root path.
 | 
			
		||||
# This is just an example, you’ll most likely have to override it in your user_settings.py!
 | 
			
		||||
SOURCE_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..", "..", "..", "blender_msgs"))
 | 
			
		||||
# This is just an example, you’ll have to override it in your user_settings.py!
 | 
			
		||||
SOURCE_DIR = os.path.abspath(os.path.join("blender"))
 | 
			
		||||
 | 
			
		||||
# The bf-translation repository (you'll likely have to override this in your user_settings.py).
 | 
			
		||||
I18N_DIR = os.path.abspath(os.path.join(TOOLS_DIR, "..", "..", "..", "..", "..", "..", "i18n"))
 | 
			
		||||
# The bf-translation repository (you'll have to override this in your user_settings.py).
 | 
			
		||||
I18N_DIR = os.path.abspath(os.path.join("i18n"))
 | 
			
		||||
 | 
			
		||||
# The /branches path (overriden in bf-translation's i18n_override_settings.py).
 | 
			
		||||
BRANCHES_DIR = os.path.join(I18N_DIR, "branches")
 | 
			
		||||
# The /branches path (relative to I18N_DIR).
 | 
			
		||||
REL_BRANCHES_DIR = os.path.join("branches")
 | 
			
		||||
 | 
			
		||||
# The /trunk path (overriden in bf-translation's i18n_override_settings.py).
 | 
			
		||||
TRUNK_DIR = os.path.join(I18N_DIR, "trunk")
 | 
			
		||||
# The /trunk path (relative to I18N_DIR).
 | 
			
		||||
REL_TRUNK_DIR = os.path.join("trunk")
 | 
			
		||||
 | 
			
		||||
# The /trunk/po path (overriden in bf-translation's i18n_override_settings.py).
 | 
			
		||||
TRUNK_PO_DIR = os.path.join(TRUNK_DIR, "po")
 | 
			
		||||
# The /trunk/po path (relative to I18N_DIR).
 | 
			
		||||
REL_TRUNK_PO_DIR = os.path.join(REL_TRUNK_DIR, "po")
 | 
			
		||||
 | 
			
		||||
# The /trunk/mo path (overriden in bf-translation's i18n_override_settings.py).
 | 
			
		||||
TRUNK_MO_DIR = os.path.join(TRUNK_DIR, "locale")
 | 
			
		||||
# The /trunk/mo path (relative to I18N_DIR).
 | 
			
		||||
REL_TRUNK_MO_DIR = os.path.join(REL_TRUNK_DIR, "locale")
 | 
			
		||||
 | 
			
		||||
# The file storing Blender-generated messages.
 | 
			
		||||
FILE_NAME_MESSAGES = os.path.join(TRUNK_PO_DIR, "messages.txt")
 | 
			
		||||
# The Blender source path to check for i18n macros (relative to SOURCE_DIR).
 | 
			
		||||
REL_POTFILES_SOURCE_DIR = os.path.join("source")
 | 
			
		||||
 | 
			
		||||
# The Blender source path to check for i18n macros.
 | 
			
		||||
POTFILES_SOURCE_DIR = os.path.join(SOURCE_DIR, "source")
 | 
			
		||||
# The template messages file (relative to I18N_DIR).
 | 
			
		||||
REL_FILE_NAME_POT = os.path.join(REL_BRANCHES_DIR, DOMAIN + ".pot")
 | 
			
		||||
 | 
			
		||||
# The "source" file storing which files should be processed by xgettext, used to create FILE_NAME_POTFILES
 | 
			
		||||
FILE_NAME_SRC_POTFILES = os.path.join(TRUNK_PO_DIR, "_POTFILES.in")
 | 
			
		||||
# Mo root datapath.
 | 
			
		||||
REL_MO_PATH_ROOT = os.path.join(REL_TRUNK_DIR, "locale")
 | 
			
		||||
 | 
			
		||||
# The final (generated) file storing which files should be processed by xgettext.
 | 
			
		||||
FILE_NAME_POTFILES = os.path.join(TRUNK_PO_DIR, "POTFILES.in")
 | 
			
		||||
# Mo path generator for a given language.
 | 
			
		||||
REL_MO_PATH_TEMPLATE = os.path.join(REL_MO_PATH_ROOT, "{}", "LC_MESSAGES")
 | 
			
		||||
 | 
			
		||||
# The template messages file.
 | 
			
		||||
FILE_NAME_POT = os.path.join(TRUNK_PO_DIR, ".".join((DOMAIN, "pot")))
 | 
			
		||||
# Mo path generator for a given language (relative to any "locale" dir).
 | 
			
		||||
MO_PATH_ROOT_RELATIVE = os.path.join("locale")
 | 
			
		||||
MO_PATH_TEMPLATE_RELATIVE = os.path.join(MO_PATH_ROOT_RELATIVE, "{}", "LC_MESSAGES")
 | 
			
		||||
 | 
			
		||||
# Other py files that should be searched for ui strings, relative to SOURCE_DIR.
 | 
			
		||||
# Needed for Cycles, currently...
 | 
			
		||||
CUSTOM_PY_UI_FILES = [
 | 
			
		||||
# Mo file name.
 | 
			
		||||
MO_FILE_NAME = DOMAIN + ".mo"
 | 
			
		||||
 | 
			
		||||
# Where to search for py files that may contain ui strings (relative to SOURCE_DIR).
 | 
			
		||||
REL_CUSTOM_PY_UI_FILES = [
 | 
			
		||||
    os.path.join("release", "scripts", "startup", "bl_ui"),
 | 
			
		||||
    os.path.join("intern", "cycles", "blender", "addon", "ui.py"),
 | 
			
		||||
    os.path.join("release", "scripts", "modules", "rna_prop_ui.py"),
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
# An optional text file listing files to force include/exclude from py_xgettext process.
 | 
			
		||||
SRC_POTFILES = ""
 | 
			
		||||
 | 
			
		||||
# A cache storing validated msgids, to avoid re-spellchecking them.
 | 
			
		||||
SPELL_CACHE = os.path.join("/tmp", ".spell_cache")
 | 
			
		||||
 | 
			
		||||
# Threshold defining whether a new msgid is similar enough with an old one to reuse its translation...
 | 
			
		||||
SIMILAR_MSGID_THRESHOLD = 0.75
 | 
			
		||||
 | 
			
		||||
# Additional import paths to add to sys.path (';' separated)...
 | 
			
		||||
INTERN_PY_SYS_PATHS = ""
 | 
			
		||||
 | 
			
		||||
# Custom override settings must be one dir above i18n tools itself!
 | 
			
		||||
import sys
 | 
			
		||||
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
 | 
			
		||||
try:
 | 
			
		||||
    from bl_i18n_override_settings import *
 | 
			
		||||
@@ -392,3 +415,105 @@ try:
 | 
			
		||||
    from user_settings import *
 | 
			
		||||
except ImportError:  # If no user_settings available, it’s no error!
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
for p in set(INTERN_PY_SYS_PATHS.split(";")):
 | 
			
		||||
    if p:
 | 
			
		||||
        sys.path.append(p)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# The settings class itself!
 | 
			
		||||
def _do_get(ref, path):
 | 
			
		||||
    return os.path.normpath(os.path.join(ref, path))
 | 
			
		||||
 | 
			
		||||
def _do_set(ref, path):
 | 
			
		||||
    path = os.path.normpath(path)
 | 
			
		||||
    # If given path is absolute, make it relative to current ref one (else we consider it is already the case!)
 | 
			
		||||
    if os.path.isabs(path):
 | 
			
		||||
        return os.path.relpath(path, ref)
 | 
			
		||||
    else:
 | 
			
		||||
        return path
 | 
			
		||||
 | 
			
		||||
def _gen_get_set_path(ref, name):
 | 
			
		||||
    def _get(self):
 | 
			
		||||
        return _do_get(getattr(self, ref), getattr(self, name))
 | 
			
		||||
    def _set(self, value):
 | 
			
		||||
        setattr(self, name, _do_set(getattr(self, ref), value))
 | 
			
		||||
    return _get, _set
 | 
			
		||||
 | 
			
		||||
def _gen_get_set_paths(ref, name):
 | 
			
		||||
    def _get(self):
 | 
			
		||||
        return [_do_get(getattr(self, ref), p) for p in getattr(self, name)]
 | 
			
		||||
    def _set(self, value):
 | 
			
		||||
        setattr(self, name, [_do_set(getattr(self, ref), p) for p in value])
 | 
			
		||||
    return _get, _set
 | 
			
		||||
 | 
			
		||||
class I18nSettings:
 | 
			
		||||
    """
 | 
			
		||||
    Class allowing persistence of our settings!
 | 
			
		||||
    Saved in JSon format, so settings should be JSon'able objects!
 | 
			
		||||
    """
 | 
			
		||||
    _settings = None
 | 
			
		||||
 | 
			
		||||
    def __new__(cls, *args, **kwargs):
 | 
			
		||||
        # Addon preferences are singleton by definition, so is this class!
 | 
			
		||||
        if not I18nSettings._settings:
 | 
			
		||||
            cls._settings = super(I18nSettings, cls).__new__(cls)
 | 
			
		||||
            cls._settings.__dict__ = {uid: data for uid, data in globals().items() if not uid.startswith("_")}
 | 
			
		||||
        return I18nSettings._settings
 | 
			
		||||
 | 
			
		||||
    def from_json(self, string):
 | 
			
		||||
        data = dict(json.loads(string))
 | 
			
		||||
        # Special case... :/
 | 
			
		||||
        if "INTERN_PY_SYS_PATHS" in data:
 | 
			
		||||
            self.PY_SYS_PATHS = data["INTERN_PY_SYS_PATHS"]
 | 
			
		||||
        self.__dict__.update(data)
 | 
			
		||||
 | 
			
		||||
    def to_json(self):
 | 
			
		||||
        # Only save the diff from default i18n_settings!
 | 
			
		||||
        glob = globals()
 | 
			
		||||
        export_dict = {uid: val for uid, val in self.__dict__.items() if glob.get(uid) != val}
 | 
			
		||||
        return json.dumps(export_dict)
 | 
			
		||||
 | 
			
		||||
    def load(self, fname, reset=False):
 | 
			
		||||
        if reset:
 | 
			
		||||
            self.__dict__ = {uid: data for uid, data in globals().items() if not uid.startswith("_")}
 | 
			
		||||
        if isinstance(fname, str):
 | 
			
		||||
            if not os.path.isfile(fname):
 | 
			
		||||
                return
 | 
			
		||||
            with open(fname) as f:
 | 
			
		||||
                self.from_json(f.read())
 | 
			
		||||
        # Else assume fname is already a file(like) object!
 | 
			
		||||
        else:
 | 
			
		||||
            self.from_json(fname.read())
 | 
			
		||||
 | 
			
		||||
    def save(self, fname):
 | 
			
		||||
        if isinstance(fname, str):
 | 
			
		||||
            with open(fname, 'w') as f:
 | 
			
		||||
                f.write(self.to_json())
 | 
			
		||||
        # Else assume fname is already a file(like) object!
 | 
			
		||||
        else:
 | 
			
		||||
            fname.write(self.to_json())
 | 
			
		||||
 | 
			
		||||
    BRANCHES_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_BRANCHES_DIR")))
 | 
			
		||||
    TRUNK_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_TRUNK_DIR")))
 | 
			
		||||
    TRUNK_PO_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_TRUNK_PO_DIR")))
 | 
			
		||||
    TRUNK_MO_DIR = property(*(_gen_get_set_path("I18N_DIR", "REL_TRUNK_MO_DIR")))
 | 
			
		||||
    POTFILES_SOURCE_DIR = property(*(_gen_get_set_path("SOURCE_DIR", "REL_POTFILES_SOURCE_DIR")))
 | 
			
		||||
    FILE_NAME_POT = property(*(_gen_get_set_path("I18N_DIR", "REL_FILE_NAME_POT")))
 | 
			
		||||
    MO_PATH_ROOT = property(*(_gen_get_set_path("I18N_DIR", "REL_MO_PATH_ROOT")))
 | 
			
		||||
    MO_PATH_TEMPLATE = property(*(_gen_get_set_path("I18N_DIR", "REL_MO_PATH_TEMPLATE")))
 | 
			
		||||
    CUSTOM_PY_UI_FILES = property(*(_gen_get_set_paths("SOURCE_DIR", "REL_CUSTOM_PY_UI_FILES")))
 | 
			
		||||
 | 
			
		||||
    def _get_py_sys_paths(self):
 | 
			
		||||
        return self.INTERN_PY_SYS_PATHS
 | 
			
		||||
    def _set_py_sys_paths(self, val):
 | 
			
		||||
        old_paths = set(self.INTERN_PY_SYS_PATHS.split(";")) - {""}
 | 
			
		||||
        new_paths = set(val.split(";")) - {""}
 | 
			
		||||
        for p in old_paths - new_paths:
 | 
			
		||||
            if p in sys.path:
 | 
			
		||||
                sys.path.remove(p)
 | 
			
		||||
        for p in new_paths - old_paths:
 | 
			
		||||
            sys.path.append(p)
 | 
			
		||||
        self.INTERN_PY_SYS_PATHS = val
 | 
			
		||||
    PY_SYS_PATHS = property(_get_py_sys_paths, _set_py_sys_paths)
 | 
			
		||||
 
 | 
			
		||||
@@ -18,21 +18,19 @@
 | 
			
		||||
 | 
			
		||||
# <pep8 compliant>
 | 
			
		||||
 | 
			
		||||
import enchant
 | 
			
		||||
import os
 | 
			
		||||
import pickle
 | 
			
		||||
import re
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
_valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
 | 
			
		||||
_valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
 | 
			
		||||
_valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
 | 
			
		||||
_reg = re.compile(_valid_words)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def split_words(text):
 | 
			
		||||
    return [w for w in _reg.findall(text) if w]
 | 
			
		||||
 | 
			
		||||
class SpellChecker():
 | 
			
		||||
    """
 | 
			
		||||
    A basic spell checker.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    # These must be all lower case for comparisons
 | 
			
		||||
dict_uimsgs = {
 | 
			
		||||
    uimsgs = {
 | 
			
		||||
        # OK words
 | 
			
		||||
        "aren",  # aren't
 | 
			
		||||
        "betweens",  # yuck! in-betweens!
 | 
			
		||||
@@ -533,3 +531,48 @@ dict_uimsgs = {
 | 
			
		||||
        "xna",
 | 
			
		||||
        "xvid",
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    _valid_before = "(?<=[\\s*'\"`])|(?<=[a-zA-Z][/-])|(?<=^)"
 | 
			
		||||
    _valid_after = "(?=[\\s'\"`.!?,;:])|(?=[/-]\\s*[a-zA-Z])|(?=$)"
 | 
			
		||||
    _valid_words = "(?:{})(?:(?:[A-Z]+[a-z]*)|[A-Z]*|[a-z]*)(?:{})".format(_valid_before, _valid_after)
 | 
			
		||||
    _split_words = re.compile(_valid_words).findall
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def split_words(cls, text):
 | 
			
		||||
        return [w for w in cls._split_words(text) if w]
 | 
			
		||||
 | 
			
		||||
    def __init__(self, settings, lang="en_US"):
 | 
			
		||||
        self.settings = settings
 | 
			
		||||
        self.dict_spelling = enchant.Dict(lang)
 | 
			
		||||
        self.cache = set(self.uimsgs)
 | 
			
		||||
 | 
			
		||||
        cache = self.settings.SPELL_CACHE
 | 
			
		||||
        if cache and os.path.exists(cache):
 | 
			
		||||
            with open(cache, 'rb') as f:
 | 
			
		||||
                self.cache |= set(pickle.load(f))
 | 
			
		||||
 | 
			
		||||
    def __del__(self):
 | 
			
		||||
        cache = self.settings.SPELL_CACHE
 | 
			
		||||
        if cache and os.path.exists(cache):
 | 
			
		||||
            with open(cache, 'wb') as f:
 | 
			
		||||
                pickle.dump(self.cache, f)
 | 
			
		||||
 | 
			
		||||
    def check(self, txt):
 | 
			
		||||
        ret = []
 | 
			
		||||
 | 
			
		||||
        if txt in self.cache:
 | 
			
		||||
            return ret
 | 
			
		||||
 | 
			
		||||
        for w in self.split_words(txt):
 | 
			
		||||
            w_lower = w.lower()
 | 
			
		||||
            if w_lower in self.cache:
 | 
			
		||||
                continue
 | 
			
		||||
            if not self.dict_spelling.check(w):
 | 
			
		||||
                ret.append((w, self.dict_spelling.suggest(w)))
 | 
			
		||||
            else:
 | 
			
		||||
                self.cache.add(w_lower)
 | 
			
		||||
 | 
			
		||||
        if not ret:
 | 
			
		||||
            self.cache.add(txt)
 | 
			
		||||
 | 
			
		||||
        return ret
 | 
			
		||||
 
 | 
			
		||||
@@ -1,148 +0,0 @@
 | 
			
		||||
#!/usr/bin/python3
 | 
			
		||||
 | 
			
		||||
# ***** BEGIN GPL LICENSE BLOCK *****
 | 
			
		||||
#
 | 
			
		||||
# This program is free software; you can redistribute it and/or
 | 
			
		||||
# modify it under the terms of the GNU General Public License
 | 
			
		||||
# as published by the Free Software Foundation; either version 2
 | 
			
		||||
# of the License, or (at your option) any later version.
 | 
			
		||||
#
 | 
			
		||||
# This program is distributed in the hope that it will be useful,
 | 
			
		||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
 | 
			
		||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 | 
			
		||||
# GNU General Public License for more details.
 | 
			
		||||
#
 | 
			
		||||
# You should have received a copy of the GNU General Public License
 | 
			
		||||
# along with this program; if not, write to the Free Software Foundation,
 | 
			
		||||
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
 | 
			
		||||
#
 | 
			
		||||
# ***** END GPL LICENSE BLOCK *****
 | 
			
		||||
 | 
			
		||||
# <pep8 compliant>
 | 
			
		||||
 | 
			
		||||
# Update "languages" text file used by Blender at runtime to build translations menu.
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
import shutil
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    import settings
 | 
			
		||||
    import utils
 | 
			
		||||
except:
 | 
			
		||||
    from . import (settings, utils)
 | 
			
		||||
 | 
			
		||||
TRUNK_PO_DIR = settings.TRUNK_PO_DIR
 | 
			
		||||
TRUNK_MO_DIR = settings.TRUNK_MO_DIR
 | 
			
		||||
 | 
			
		||||
LANGUAGES_CATEGORIES = settings.LANGUAGES_CATEGORIES
 | 
			
		||||
LANGUAGES = settings.LANGUAGES
 | 
			
		||||
LANGUAGES_FILE = settings.LANGUAGES_FILE
 | 
			
		||||
 | 
			
		||||
OK = 0
 | 
			
		||||
MISSING = 1
 | 
			
		||||
TOOLOW = 2
 | 
			
		||||
FORBIDDEN = 3
 | 
			
		||||
FLAG_MESSAGES = {
 | 
			
		||||
    OK: "",
 | 
			
		||||
    MISSING: "No translation yet!",
 | 
			
		||||
    TOOLOW: "Not enough advanced to be included...",
 | 
			
		||||
    FORBIDDEN: "Explicitly forbidden!",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
def find_matching_po(languages, stats, forbidden):
 | 
			
		||||
    """Match languages defined in LANGUAGES setting to relevant po, if possible!"""
 | 
			
		||||
    ret = []
 | 
			
		||||
    for uid, label, org_key, in languages:
 | 
			
		||||
        key = org_key
 | 
			
		||||
        if key not in stats:
 | 
			
		||||
            # Try to simplify the key (eg from es_ES to es).
 | 
			
		||||
            if '_' in org_key:
 | 
			
		||||
                key = org_key[0:org_key.index('_')]
 | 
			
		||||
            # For stuff like sr_SR@latin -> sr@latin...
 | 
			
		||||
            if '@' in org_key:
 | 
			
		||||
                key = key + org_key[org_key.index('@'):]
 | 
			
		||||
        if key in stats:
 | 
			
		||||
            if key in forbidden:
 | 
			
		||||
                ret.append((stats[key], uid, label, org_key, FORBIDDEN))
 | 
			
		||||
            else:
 | 
			
		||||
                ret.append((stats[key], uid, label, org_key, OK))
 | 
			
		||||
        else:
 | 
			
		||||
            ret.append((0.0, uid, label, org_key, MISSING))
 | 
			
		||||
    return ret
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    import argparse
 | 
			
		||||
    parser = argparse.ArgumentParser(description="Update 'languages' text file used by Blender at runtime to build "
 | 
			
		||||
                                                 "translations menu.")
 | 
			
		||||
    parser.add_argument('-m', '--min_translation', type=int, default=-100,
 | 
			
		||||
                        help="Minimum level of translation, as a percentage (translations below this are commented out "
 | 
			
		||||
                             "in menu).")
 | 
			
		||||
    parser.add_argument('langs', metavar='ISO_code', nargs='*',
 | 
			
		||||
                        help="Unconditionally exclude those languages from the menu.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    ret = 0
 | 
			
		||||
    min_trans = args.min_translation / 100.0
 | 
			
		||||
    forbidden = set(args.langs)
 | 
			
		||||
    # 'DEFAULT' and en_US are always valid, fully-translated "languages"!
 | 
			
		||||
    stats = {"DEFAULT": 1.0, "en_US": 1.0}
 | 
			
		||||
 | 
			
		||||
    # Get the "done level" of each po in trunk...
 | 
			
		||||
    for po in os.listdir(TRUNK_PO_DIR):
 | 
			
		||||
        if po.endswith(".po") and not po.endswith("_raw.po"):
 | 
			
		||||
            lang = os.path.basename(po)[:-3]
 | 
			
		||||
            msgs = utils.I18nMessages(kind='PO', src=os.path.join(TRUNK_PO_DIR, po))
 | 
			
		||||
            stats[lang] = msgs.nbr_trans_msgs / msgs.nbr_msgs
 | 
			
		||||
 | 
			
		||||
    # Generate languages file used by Blender's i18n system.
 | 
			
		||||
    # First, match all entries in LANGUAGES to a lang in stats, if possible!
 | 
			
		||||
    stats = find_matching_po(LANGUAGES, stats, forbidden)
 | 
			
		||||
    limits = sorted(LANGUAGES_CATEGORIES, key=lambda it: it[0], reverse=True)
 | 
			
		||||
    idx = 0
 | 
			
		||||
    stats = sorted(stats, key=lambda it: it[0], reverse=True)
 | 
			
		||||
    langs_cats = [[] for i in range(len(limits))]
 | 
			
		||||
    highest_uid = 0
 | 
			
		||||
    for prop, uid, label, key, flag in stats:
 | 
			
		||||
        if prop < limits[idx][0]:
 | 
			
		||||
            # Sub-sort languages by iso-codes.
 | 
			
		||||
            langs_cats[idx].sort(key=lambda it: it[2])
 | 
			
		||||
            idx += 1
 | 
			
		||||
        if prop < min_trans and flag == OK:
 | 
			
		||||
            flag = TOOLOW
 | 
			
		||||
        langs_cats[idx].append((uid, label, key, flag))
 | 
			
		||||
        if abs(uid) > highest_uid:
 | 
			
		||||
            highest_uid = abs(uid)
 | 
			
		||||
    # Sub-sort last group of languages by iso-codes!
 | 
			
		||||
    langs_cats[idx].sort(key=lambda it: it[2])
 | 
			
		||||
    with open(os.path.join(TRUNK_MO_DIR, LANGUAGES_FILE), 'w', encoding="utf-8") as f:
 | 
			
		||||
        f.write("# File used by Blender to know which languages (translations) are available, \n")
 | 
			
		||||
        f.write("# and to generate translation menu.\n")
 | 
			
		||||
        f.write("#\n")
 | 
			
		||||
        f.write("# File format:\n")
 | 
			
		||||
        f.write("# ID:MENULABEL:ISOCODE\n")
 | 
			
		||||
        f.write("# ID must be unique, except for 0 value (marks categories for menu).\n")
 | 
			
		||||
        f.write("# Line starting with a # are comments!\n")
 | 
			
		||||
        f.write("#\n")
 | 
			
		||||
        f.write("# Automatically generated by bl_i18n_utils/update_languages_menu.py script.\n")
 | 
			
		||||
        f.write("# Highest ID currently in use: {}\n".format(highest_uid))
 | 
			
		||||
        for cat, langs_cat in zip(limits, langs_cats):
 | 
			
		||||
            f.write("#\n")
 | 
			
		||||
            # Write "category menu label"...
 | 
			
		||||
            if langs_cat:
 | 
			
		||||
                f.write("0:{}::\n".format(cat[1]))
 | 
			
		||||
            else:
 | 
			
		||||
                # Do not write the category if it has no language!
 | 
			
		||||
                f.write("# Void category! #0:{}:\n".format(cat[1]))
 | 
			
		||||
            # ...and all matching language entries!
 | 
			
		||||
            for uid, label, key, flag in langs_cat:
 | 
			
		||||
                if flag == OK:
 | 
			
		||||
                    f.write("{}:{}:{}\n".format(uid, label, key))
 | 
			
		||||
                else:
 | 
			
		||||
                    # Non-existing, commented entry!
 | 
			
		||||
                    f.write("# {} #{}:{}:{}\n".format(FLAG_MESSAGES[flag], uid, label, key))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    print("\n\n *** Running {} *** \n".format(__file__))
 | 
			
		||||
    sys.exit(main())
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -35,6 +35,7 @@ __all__ = (
 | 
			
		||||
    "register_module",
 | 
			
		||||
    "register_manual_map",
 | 
			
		||||
    "unregister_manual_map",
 | 
			
		||||
    "make_rna_paths",
 | 
			
		||||
    "manual_map",
 | 
			
		||||
    "resource_path",
 | 
			
		||||
    "script_path_user",
 | 
			
		||||
@@ -640,3 +641,29 @@ def manual_map():
 | 
			
		||||
            continue
 | 
			
		||||
 | 
			
		||||
        yield prefix, url_manual_mapping
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Build an RNA path from struct/property/enum names.
 | 
			
		||||
def make_rna_paths(struct_name, prop_name, enum_name):
 | 
			
		||||
    """
 | 
			
		||||
    Create RNA "paths" from given names.
 | 
			
		||||
 | 
			
		||||
    :arg struct_name: Name of a RNA struct (like e.g. "Scene").
 | 
			
		||||
    :type struct_name: string
 | 
			
		||||
    :arg prop_name: Name of a RNA struct's property.
 | 
			
		||||
    :type prop_name: string
 | 
			
		||||
    :arg enum_name: Name of a RNA enum identifier.
 | 
			
		||||
    :type enum_name: string
 | 
			
		||||
    :return: A triple of three "RNA paths" (most_complete_path, "struct.prop", "struct.prop:'enum'").
 | 
			
		||||
             If no enum_name is given, the third element will always be void.
 | 
			
		||||
    :rtype: tuple of strings
 | 
			
		||||
    """
 | 
			
		||||
    src = src_rna = src_enum = ""
 | 
			
		||||
    if struct_name:
 | 
			
		||||
        if prop_name:
 | 
			
		||||
            src = src_rna = ".".join((struct_name, prop_name))
 | 
			
		||||
            if enum_name:
 | 
			
		||||
                src = src_enum = "{}:'{}'".format(src_rna, enum_name)
 | 
			
		||||
        else:
 | 
			
		||||
            src = src_rna = struct_name
 | 
			
		||||
    return src, src_rna, src_enum
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user