Spelling fixes in comments and descriptions, patch by luzpaz.

Differential Revision: https://developer.blender.org/D3668
This commit is contained in:
2018-09-03 16:49:08 +02:00
parent c6bbe6c5aa
commit 4da2acae3a
72 changed files with 132 additions and 132 deletions

View File

@@ -881,7 +881,7 @@ class pyBluePrintCirclesShader(StrokeShader):
# times as possible.
#
# This works because the phases and directions are only
# dependant on the stroke length, and the chance that
# dependent on the stroke length, and the chance that
# stroke.resample() above produces strokes of the same length
# is quite high.
#

View File

@@ -352,7 +352,7 @@ def enable(module_name, *, default_set=False, persistent=False, handle_error=Non
mod.__time__ = os.path.getmtime(mod.__file__)
mod.__addon_enabled__ = False
except Exception as ex:
# if the addon doesn't exist, dont print full traceback
# if the addon doesn't exist, don't print full traceback
if type(ex) is ImportError and ex.name == module_name:
print("addon not found:", repr(module_name))
else:

View File

@@ -34,7 +34,7 @@ IS_TESTING = False
def drepr(string):
# is there a less crappy way to do this in python?, re.escape also escapes
# single quotes strings so cant use it.
# single quotes strings so can't use it.
return '"%s"' % repr(string)[1:-1].replace("\"", "\\\"").replace("\\'", "'")

View File

@@ -248,7 +248,7 @@ def dump_rna_messages(msgs, reports, settings, verbose=False):
# Now here is the *ugly* hack!
# Unfortunately, all classes we want to access are not available from bpy.types (OperatorProperties subclasses
# are not here, as they have the same name as matching Operator ones :( ). So we use __subclasses__() calls
# to walk through all rna hierachy.
# to walk through all rna hierarchy.
# But unregistered classes remain listed by relevant __subclasses__() calls (be it a Py or BPY/RNA bug),
# and obviously the matching RNA struct exists no more, so trying to access their data (even the identifier)
# quickly leads to segfault!
@@ -498,7 +498,7 @@ def dump_py_messages_from_files(msgs, reports, files, settings):
def extract_strings_split(node):
"""
Returns a list args as returned by 'extract_strings()', but split into groups based on separate_nodes, this way
expressions like ("A" if test else "B") wont be merged but "A" + "B" will.
expressions like ("A" if test else "B") won't be merged but "A" + "B" will.
"""
estr_ls = []
nds_ls = []

View File

@@ -262,7 +262,7 @@ PYGETTEXT_KEYWORDS = (() +
# Check printf mismatches between msgid and msgstr.
CHECK_PRINTF_FORMAT = (
r"(?!<%)(?:%%)*%" # Begining, with handling for crazy things like '%%%%%s'
r"(?!<%)(?:%%)*%" # Beginning, with handling for crazy things like '%%%%%s'
r"[-+#0]?" # Flags (note: do not add the ' ' (space) flag here, generates too much false positives!)
r"(?:\*|[0-9]+)?" # Width
r"(?:\.(?:\*|[0-9]+))?" # Precision

View File

@@ -457,7 +457,7 @@ class I18nMessages:
def check(self, fix=False):
"""
Check consistency between messages and their keys!
Check messages using format stuff are consistant between msgid and msgstr!
Check messages using format stuff are consistent between msgid and msgstr!
If fix is True, tries to fix the issues.
Return a list of found errors (empty if everything went OK!).
"""

View File

@@ -28,7 +28,7 @@
# Windows or OsX.
# This uses ctypes, as there is no py3 binding for fribidi currently.
# This implies you only need the compiled C library to run it.
# Finally, note that it handles some formating/escape codes (like
# Finally, note that it handles some formatting/escape codes (like
# \", %s, %x12, %.4f, etc.), protecting them from ugly (evil) fribidi,
# which seems completely unaware of such things (as unicode is...).
@@ -79,7 +79,7 @@ MENU_DETECT_REGEX = re.compile("%x\\d+\\|")
##### Kernel processing funcs. #####
def protect_format_seq(msg):
"""
Find some specific escaping/formating sequences (like \", %s, etc.,
Find some specific escaping/formatting sequences (like \", %s, etc.,
and protect them from any modification!
"""
# LRM = "\u200E"

View File

@@ -22,7 +22,7 @@
# This module can get render info without running from inside blender.
#
# This struct wont change according to Ton.
# This struct won't change according to Ton.
# Note that the size differs on 32/64bit
#
# typedef struct BHead {

View File

@@ -301,7 +301,7 @@ def resolve_ncase(path):
if f_iter_nocase:
return _os.path.join(dirpath, f_iter_nocase) + suffix, True
else:
# cant find the right one, just return the path as is.
# can't find the right one, just return the path as is.
return path, False
ncase_path, found = _ncase_path_found(path)

View File

@@ -32,7 +32,7 @@ __all__ = (
def mesh_linked_uv_islands(mesh):
"""
Splits the mesh into connected polygons, use this for seperating cubes from
Splits the mesh into connected polygons, use this for separating cubes from
other mesh elements within 1 mesh datablock.
:arg mesh: the mesh used to group with.
@@ -92,7 +92,7 @@ def mesh_linked_uv_islands(mesh):
def mesh_linked_tessfaces(mesh):
"""
Splits the mesh into connected faces, use this for seperating cubes from
Splits the mesh into connected faces, use this for separating cubes from
other mesh elements within 1 mesh datablock.
:arg mesh: the mesh used to group with.
@@ -182,7 +182,7 @@ def edge_loops_from_tessfaces(mesh, tessfaces=None, seams=()):
Edge loops defined by faces
Takes me.tessfaces or a list of faces and returns the edge loops
These edge loops are the edges that sit between quads, so they dont touch
These edge loops are the edges that sit between quads, so they don't touch
1 quad, note: not connected will make 2 edge loops,
both only containing 2 edges.
@@ -252,7 +252,7 @@ def edge_loops_from_tessfaces(mesh, tessfaces=None, seams=()):
i = ed_adj.index(context_loop[-2])
context_loop.append(ed_adj[not i])
# Dont look at this again
# Don't look at this again
del ed_adj[:]
return edge_loops
@@ -377,7 +377,7 @@ def ngon_tessellate(from_data, indices, fix_loops=True):
else:
"""
Seperate this loop into multiple loops be finding edges that are
Separate this loop into multiple loops be finding edges that are
used twice. This is used by lightwave LWO files a lot
"""

View File

@@ -37,7 +37,7 @@ changes have been made:
- limit list of modules to prefix in case of "from w"
- sorted modules
- added sphinx documentation
- complete() returns a blank list of the module isnt found
- complete() returns a blank list of the module isn't found
"""

View File

@@ -68,7 +68,7 @@ def get_console(console_id):
# check if clearing the namespace is needed to avoid a memory leak.
# the window manager is normally loaded with new blend files
# so this is a reasonable way to deal with namespace clearing.
# bpy.data hashing is reset by undo so cant be used.
# bpy.data hashing is reset by undo so can't be used.
hash_prev = getattr(get_console, "consoles_namespace_hash", 0)
if hash_prev != hash_next:

View File

@@ -83,7 +83,7 @@ def float_as_string(f):
def get_py_class_from_rna(rna_type):
""" Get's the Python type for a class which isn't necessarily added to ``bpy.types``.
""" Gets the Python type for a class which isn't necessarily added to ``bpy.types``.
"""
identifier = rna_type.identifier
py_class = getattr(bpy.types, identifier, None)

View File

@@ -32,7 +32,7 @@ def build_property_typemap(skip_classes, skip_typemap):
if issubclass(cls, skip_classes):
continue
# # to support skip-save we cant get all props
# # to support skip-save we can't get all props
# properties = cls.bl_rna.properties.keys()
properties = []
for prop_id, prop in cls.bl_rna.properties.items():
@@ -149,7 +149,7 @@ def rna2xml(fw=print_ln,
subvalue_rna = value.path_resolve(prop, False)
if type(subvalue_rna).__name__ == "bpy_prop_array":
# check if this is a 0-1 color (rgb, rgba)
# in that case write as a hexidecimal
# in that case write as a hexadecimal
prop_rna = value.bl_rna.properties[prop]
if (prop_rna.subtype == 'COLOR_GAMMA' and
prop_rna.hard_min == 0.0 and
@@ -274,7 +274,7 @@ def xml2rna(root_xml,
tp_name = 'STR'
elif hasattr(subvalue, "__len__"):
if value_xml.startswith("#"):
# read hexidecimal value as float array
# read hexadecimal value as float array
value_xml_split = value_xml[1:]
value_xml_coerce = [int(value_xml_split[i:i + 2], 16) /
255 for i in range(0, len(value_xml_split), 2)]

View File

@@ -473,7 +473,7 @@ class ShapeTransfer(Operator):
objects = [ob for ob in context.selected_editable_objects
if ob != ob_act]
if 1: # swap from/to, means we cant copy to many at once.
if 1: # swap from/to, means we can't copy to many at once.
if len(objects) != 1:
self.report({'ERROR'},
("Expected one other selected "

View File

@@ -52,7 +52,7 @@ def extend(obj, operator, EXTEND_MODE):
# our own local walker
def walk_face_init(faces, f_act):
# first tag all faces True (so we dont uvmap them)
# first tag all faces True (so we don't uvmap them)
for f in bm.faces:
f.tag = True
# then tag faces arg False

View File

@@ -450,7 +450,7 @@ def lightmap_uvpack(meshes,
max_int_dimension = int(((side_len / float_to_int_factor)) / PREF_BOX_DIV)
ok = True
else:
max_int_dimension = 0.0 # wont be used
max_int_dimension = 0.0 # won't be used
ok = False
# RECURSIVE pretty face grouping

View File

@@ -271,7 +271,7 @@ def optiRotateUvIsland(faces):
# orient them vertically (could be an option)
minx, miny, maxx, maxy = boundsIsland(faces)
w, h = maxx - minx, maxy - miny
# use epsilon so we dont randomly rotate (almost) perfect squares.
# use epsilon so we don't randomly rotate (almost) perfect squares.
if h + 0.00001 < w:
from math import pi
angle = pi / 2.0
@@ -357,7 +357,7 @@ def mergeUvIslands(islandList):
BREAK = True
break
# Now we have 2 islands, if the efficiency of the islands lowers theres an
# Now we have 2 islands, if the efficiency of the islands lowers there's an
# increasing likely hood that we can fit merge into the bigger UV island.
# this ensures a tight fit.
@@ -625,7 +625,7 @@ def packIslands(islandList):
h = SMALL_NUM
"""Save the offset to be applied later,
we could apply to the UVs now and allign them to the bottom left hand area
we could apply to the UVs now and align them to the bottom left hand area
of the UV coords like the box packer imagines they are
but, its quicker just to remember their offset and
apply the packing and offset in 1 pass """
@@ -873,7 +873,7 @@ def main(context,
# This while only gathers projection vecs, faces are assigned later on.
while 1:
# If theres none there then start with the largest face
# If there's none there then start with the largest face
# add all the faces that are close.
for fIdx in range(len(tempMeshFaces) - 1, -1, -1):

View File

@@ -98,7 +98,7 @@ def operator_path_is_undo(context, data_path):
# note that if we have data paths that use strings this could fail
# luckily we don't do this!
#
# When we cant find the data owner assume no undo is needed.
# When we can't find the data owner assume no undo is needed.
data_path_head = data_path.rpartition(".")[0]
if not data_path_head:

View File

@@ -944,7 +944,7 @@ class CLIP_PT_stabilization(CLIP_PT_reconstruction_panel, Panel):
sub.menu('CLIP_MT_stabilize_2d_specials', text="",
icon='DOWNARROW_HLT')
# Usually we don't hide things from iterface, but here every pixel of
# Usually we don't hide things from interface, but here every pixel of
# vertical space is precious.
if stab.use_stabilize_rotation:
box.label(text="Tracks For Rotation / Scale")

View File

@@ -1294,7 +1294,7 @@ class INFO_MT_add(Menu):
def draw(self, context):
layout = self.layout
# note, don't use 'EXEC_SCREEN' or operators wont get the 'v3d' context.
# note, don't use 'EXEC_SCREEN' or operators won't get the 'v3d' context.
# Note: was EXEC_AREA, but this context does not have the 'rv3d', which prevents
# "align_view" to work on first call (see [#32719]).

View File

@@ -95,7 +95,7 @@ def main():
help="Render an image to the specified path",
)
args = parser.parse_args(argv) # In this example we wont use the args
args = parser.parse_args(argv) # In this example we won't use the args
if not argv:
parser.print_help()