Cleanup: spelling
This commit is contained in:
@@ -194,7 +194,7 @@ string OpenCLDevice::get_build_options(const DeviceRequestedFeatures &requested_
|
||||
DeviceRequestedFeatures features(requested_features);
|
||||
enable_default_features(features);
|
||||
|
||||
/* Always turn off baking at this point. Baking is only usefull when building the bake kernel.
|
||||
/* Always turn off baking at this point. Baking is only useful when building the bake kernel.
|
||||
* this also makes sure that the kernels that are build during baking can be reused
|
||||
* when not doing any baking. */
|
||||
features.use_baking = false;
|
||||
|
||||
@@ -746,7 +746,7 @@ bool OpenCLInfo::device_supported(const string &platform_name, const cl_device_i
|
||||
}
|
||||
VLOG(3) << "OpenCL driver version " << driver_major << "." << driver_minor;
|
||||
|
||||
/* It is possible tyo have Iris GPU on AMD/Apple OpenCL framework
|
||||
/* It is possible to have Iris GPU on AMD/Apple OpenCL framework
|
||||
* (aka, it will not be on Intel framework). This isn't supported
|
||||
* and needs an explicit blacklist.
|
||||
*/
|
||||
|
||||
@@ -283,8 +283,8 @@ vector<int> distribute_threads_on_nodes(const int num_threads)
|
||||
}
|
||||
++current_node_index;
|
||||
}
|
||||
/* Second pass: keep scheduling threads to each node one by one, uniformly
|
||||
* fillign them in.
|
||||
/* Second pass: keep scheduling threads to each node one by one,
|
||||
* uniformly filling them in.
|
||||
* This is where things becomes tricky to predict for the maximum
|
||||
* performance: on the one hand this avoids too much threading overhead on
|
||||
* few nodes, but for the final performance having all the overhead on one
|
||||
|
||||
@@ -166,8 +166,8 @@ static inline Eigen::Vector3d SphericalRangeParameters(const Eigen::Matrix3d &R)
|
||||
// singularity at pi
|
||||
if (fabs(num) < IK_EPSILON)
|
||||
// TODO: this does now rotation of size pi over z axis, but could
|
||||
// be any axis, how to deal with this i'm not sure, maybe don't
|
||||
// enforce limits at all then
|
||||
// be any axis, how to deal with this I'm not sure, maybe don't
|
||||
// enforce limits at all then.
|
||||
return Eigen::Vector3d(0.0, tau, 1.0);
|
||||
|
||||
num = 1.0 / sqrt(num);
|
||||
|
||||
@@ -2226,7 +2226,7 @@ static bool ob_parcurve(Object *ob, Object *par, float mat[4][4])
|
||||
* dependency cycles. We can't correct anything from here, since that would
|
||||
* cause a threading conflicts.
|
||||
*
|
||||
* TODO(sergey): Somce of the legit looking cases like T56619 need to be
|
||||
* TODO(sergey): Some of the legit looking cases like T56619 need to be
|
||||
* looked into, and maybe curve cache (and other dependencies) are to be
|
||||
* evaluated prior to conversion. */
|
||||
if (par->runtime.curve_cache == NULL) {
|
||||
|
||||
@@ -69,7 +69,7 @@ static int last_studiolight_id = 0;
|
||||
|
||||
/*
|
||||
* Disable this option so caches are not loaded from disk
|
||||
* Do not checkin with this commented out
|
||||
* Do not checking with this commented out.
|
||||
*/
|
||||
#define STUDIOLIGHT_LOAD_CACHED_FILES
|
||||
|
||||
|
||||
@@ -264,7 +264,7 @@ static void do_versions_nodetree_multi_file_output_format_2_62_1(Scene *sce, bNo
|
||||
|
||||
/* if z buffer is saved, change the image type to multilayer exr.
|
||||
* XXX this is slightly messy, Z buffer was ignored before for anything but EXR and IRIS ...
|
||||
* i'm just assuming here that IRIZ means IRIS with z buffer ...
|
||||
* I'm just assuming here that IRIZ means IRIS with z buffer ...
|
||||
*/
|
||||
if (old_data && ELEM(old_data->im_format.imtype, R_IMF_IMTYPE_IRIZ, R_IMF_IMTYPE_OPENEXR)) {
|
||||
char sockpath[FILE_MAX];
|
||||
|
||||
@@ -111,9 +111,9 @@ void TextureBaseOperation::executePixelSampled(float output[4],
|
||||
float u = (x - cx) / this->getWidth() * 2;
|
||||
float v = (y - cy) / this->getHeight() * 2;
|
||||
|
||||
/* When no interpolation/filtering happens in multitex() foce nearest interpolation.
|
||||
/* When no interpolation/filtering happens in multitex() force nearest interpolation.
|
||||
* We do it here because (a) we can't easily say multitex() that we want nearest
|
||||
* interpolation and (b) in such configuration multitex() sinply floor's the value
|
||||
* interpolation and (b) in such configuration multitex() simply floor's the value
|
||||
* which often produces artifacts.
|
||||
*/
|
||||
if (m_texture != NULL && (m_texture->imaflag & TEX_INTERPOL) == 0) {
|
||||
|
||||
@@ -4633,8 +4633,8 @@ void ui_draw_but(const bContext *C, ARegion *ar, uiStyle *style, uiBut *but, rct
|
||||
else {
|
||||
/* with menu arrows */
|
||||
|
||||
/* we could use a flag for this, but for now just check size,
|
||||
* add updown arrows if there is room. */
|
||||
/* We could use a flag for this, but for now just check size,
|
||||
* add up/down arrows if there is room. */
|
||||
if ((!but->str[0] && but->icon && (BLI_rcti_size_x(rect) < BLI_rcti_size_y(rect) + 2)) ||
|
||||
/* disable for brushes also */
|
||||
(but->flag & UI_BUT_ICON_PREVIEW)) {
|
||||
|
||||
@@ -338,7 +338,7 @@ static PTCacheEdit *pe_get_current(Depsgraph *depsgraph, Scene *scene, Object *o
|
||||
}
|
||||
|
||||
/* Don't consider inactive or render dependency graphs, since they might be evaluated for a
|
||||
* different number of childrem. or have different pointer to evaluated particle system or
|
||||
* different number of children. or have different pointer to evaluated particle system or
|
||||
* modifier which will also cause troubles. */
|
||||
if (edit && DEG_is_active(depsgraph)) {
|
||||
edit->pid = *pid;
|
||||
|
||||
@@ -442,14 +442,14 @@ static bool outliner_collection_is_isolated(Scene *scene,
|
||||
else if (BKE_collection_has_collection(collection_ensure, (Collection *)collection_ensure_cmp) ||
|
||||
BKE_collection_has_collection((Collection *)collection_ensure_cmp, collection_ensure)) {
|
||||
/* This collection is either a parent or a child of the collection.
|
||||
* We expect it to be set "visble" already. */
|
||||
* We expect it to be set "visible" already. */
|
||||
if (value != value_cmp) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* This collection is neither a parent nor a child of the collection.
|
||||
* We expect it to be "invisble". */
|
||||
* We expect it to be "invisible". */
|
||||
if (value == value_cmp) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -3962,7 +3962,7 @@ static float p_smooth_median_edge_length(PChart *chart)
|
||||
float median;
|
||||
int i;
|
||||
|
||||
/* ok, so i'm lazy */
|
||||
/* ok, so I'm lazy */
|
||||
for (i = 0, e = chart->edges; e; e = e->nextlink, i++) {
|
||||
lengths[i] = p_edge_length(e);
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ class GeomCleaner {
|
||||
* Output of sorted vertices.
|
||||
* A vertex v1 precedes another one v2 in this array
|
||||
* if v1.x<v2.x, or v1.x=v2.x && v1.y < v2.y or v1.x=v2.y && v1.y=v2.y && v1.z < v2.z.
|
||||
* The array is organized as a 3-float serie giving the vertices coordinates: XYZXYZXYZ...
|
||||
* The array is organized as a 3-float series giving the vertices coordinates: XYZXYZXYZ...
|
||||
* oIndices
|
||||
* Output corresponding to the iIndices array but reorganized in
|
||||
* order to match the sorted vertex array.
|
||||
@@ -86,7 +86,7 @@ class GeomCleaner {
|
||||
* The size of iIndices array
|
||||
* oVertices
|
||||
* The vertex array, result of the compression.
|
||||
* The array is organized as a 3-float serie giving the vertices coordinates: XYZXYZXYZ...
|
||||
* The array is organized as a 3-float series giving the vertices coordinates: XYZXYZXYZ...
|
||||
* oVSize
|
||||
* The size of oVertices.
|
||||
* oIndices
|
||||
@@ -114,7 +114,7 @@ class GeomCleaner {
|
||||
* The size of iIndices array
|
||||
* oVertices
|
||||
* The vertex array, result of the sorting-compression.
|
||||
* The array is organized as a 3-float serie giving the vertices coordinates: XYZXYZXYZ...
|
||||
* The array is organized as a 3-float series giving the vertices coordinates: XYZXYZXYZ...
|
||||
* oVSize
|
||||
* The size of oVertices.
|
||||
* oIndices
|
||||
@@ -143,7 +143,7 @@ class GeomCleaner {
|
||||
* The size of iIndices array
|
||||
* oVertices
|
||||
* The vertex array, result of the sorting-compression.
|
||||
* The array is organized as a 3-float serie giving the vertices coordinates: XYZXYZXYZ...
|
||||
* The array is organized as a 3-float series giving the vertices coordinates: XYZXYZXYZ...
|
||||
* oVSize
|
||||
* The size of oVertices.
|
||||
* oIndices
|
||||
|
||||
@@ -45,10 +45,10 @@ class NodeTransform : public NodeGroup {
|
||||
{
|
||||
}
|
||||
|
||||
/*! multiplys the current matrix by the x, y, z translation matrix. */
|
||||
/*! multiplies the current matrix by the x, y, z translation matrix. */
|
||||
void Translate(real x, real y, real z);
|
||||
|
||||
/*! multiplys the current matrix by a rotation matrix
|
||||
/*! multiplis the current matrix by a rotation matrix
|
||||
* iAngle
|
||||
* The rotation angle
|
||||
* x, y, z
|
||||
|
||||
@@ -208,7 +208,7 @@ class GetDirectionalViewMapDensityF1D : public UnaryFunction1D<double> {
|
||||
|
||||
// GetSteerableViewMapDensityF1D
|
||||
/*! Returns the density of the viewmap for a given Interface1D. The density of each FEdge is
|
||||
* evaluated in the proper steerable ViewMap depending on its oorientation.
|
||||
* evaluated in the proper steerable ViewMap depending on its orientation.
|
||||
*/
|
||||
class GetSteerableViewMapDensityF1D : public UnaryFunction1D<double> {
|
||||
private:
|
||||
|
||||
@@ -101,7 +101,7 @@ class SpatialNoiseShader : public StrokeShader {
|
||||
};
|
||||
|
||||
/*! [ Geometry Shader ].
|
||||
* Smoothes the stroke.
|
||||
* Smooths the stroke.
|
||||
* (Moves the vertices to make the stroke smoother).
|
||||
* Uses curvature flow to converge towards a curve of constant curvature. The diffusion method we
|
||||
* use is anisotropic to prevent the diffusion across corners. \see \htmlonly <a
|
||||
|
||||
@@ -515,8 +515,8 @@ class PolygonalizationShader : public StrokeShader {
|
||||
/*! Builds the shader.
|
||||
* \param iError:
|
||||
* The error we want our polygonal approximation to have with respect to the original
|
||||
* geometry. The smaller, the closer the new stroke to the orinal one. This error corresponds to
|
||||
* the maximum distance between the new stroke and the old one.
|
||||
* geometry. The smaller, the closer the new stroke to the original one.
|
||||
* This error corresponds * to the maximum distance between the new stroke and the old one.
|
||||
*/
|
||||
PolygonalizationShader(float iError) : StrokeShader()
|
||||
{
|
||||
|
||||
@@ -623,7 +623,7 @@ bool rna_PoseChannel_constraints_override_apply(Main *UNUSED(bmain),
|
||||
bPoseChannel *pchan_src = (bPoseChannel *)ptr_src->data;
|
||||
|
||||
/* Remember that insertion operations are defined and stored in correct order, which means that
|
||||
* even if we insert several items in a row, we alays insert first one, then second one, etc.
|
||||
* even if we insert several items in a row, we always insert first one, then second one, etc.
|
||||
* So we should always find 'anchor' constraint in both _src *and* _dst> */
|
||||
bConstraint *con_anchor = NULL;
|
||||
if (opop->subitem_local_name && opop->subitem_local_name[0]) {
|
||||
|
||||
@@ -307,7 +307,7 @@ static void smooth_iter__length_weight(CorrectiveSmoothModifierData *csmd,
|
||||
/* fast-path */
|
||||
for (i = 0; i < numVerts; i++) {
|
||||
struct SmoothingData_Weighted *sd = &smooth_data[i];
|
||||
/* Divide by sum of all neighbour distances (weighted) and amount of neighbors,
|
||||
/* Divide by sum of all neighbor distances (weighted) and amount of neighbors,
|
||||
* (mean average). */
|
||||
const float div = sd->edge_length_sum * vertex_edge_count[i];
|
||||
if (div > eps) {
|
||||
|
||||
@@ -2534,7 +2534,7 @@ static PyObject *Matrix_mul(PyObject *m1, PyObject *m2)
|
||||
return NULL;
|
||||
}
|
||||
/*------------------------obj *= obj------------------------------
|
||||
* Inplace element-wise multiplication */
|
||||
* In place element-wise multiplication */
|
||||
static PyObject *Matrix_imul(PyObject *m1, PyObject *m2)
|
||||
{
|
||||
float scalar;
|
||||
@@ -2567,7 +2567,7 @@ static PyObject *Matrix_imul(PyObject *m1, PyObject *m2)
|
||||
mul_vn_vn(mat1->matrix, mat2->matrix, mat1->num_col * mat1->num_row);
|
||||
#else
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"Inplace element-wise multiplication: "
|
||||
"In place element-wise multiplication: "
|
||||
"not supported between '%.200s' and '%.200s' types",
|
||||
Py_TYPE(m1)->tp_name,
|
||||
Py_TYPE(m2)->tp_name);
|
||||
@@ -2580,7 +2580,7 @@ static PyObject *Matrix_imul(PyObject *m1, PyObject *m2)
|
||||
}
|
||||
else {
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"Inplace element-wise multiplication: "
|
||||
"In place element-wise multiplication: "
|
||||
"not supported between '%.200s' and '%.200s' types",
|
||||
Py_TYPE(m1)->tp_name,
|
||||
Py_TYPE(m2)->tp_name);
|
||||
@@ -2668,7 +2668,7 @@ static PyObject *Matrix_matmul(PyObject *m1, PyObject *m2)
|
||||
return NULL;
|
||||
}
|
||||
/*------------------------obj @= obj------------------------------
|
||||
* inplace matrix multiplication */
|
||||
* In place matrix multiplication */
|
||||
static PyObject *Matrix_imatmul(PyObject *m1, PyObject *m2)
|
||||
{
|
||||
MatrixObject *mat1 = NULL, *mat2 = NULL;
|
||||
@@ -2715,7 +2715,7 @@ static PyObject *Matrix_imatmul(PyObject *m1, PyObject *m2)
|
||||
}
|
||||
else {
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"Inplace matrix multiplication: "
|
||||
"In place matrix multiplication: "
|
||||
"not supported between '%.200s' and '%.200s' types",
|
||||
Py_TYPE(m1)->tp_name,
|
||||
Py_TYPE(m2)->tp_name);
|
||||
|
||||
@@ -935,7 +935,7 @@ static PyObject *Quaternion_imul(PyObject *q1, PyObject *q2)
|
||||
mul_vn_vn(quat1->quat, quat2->quat, QUAT_SIZE);
|
||||
#else
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"Inplace element-wise multiplication: "
|
||||
"In place element-wise multiplication: "
|
||||
"not supported between '%.200s' and '%.200s' types",
|
||||
Py_TYPE(q1)->tp_name,
|
||||
Py_TYPE(q2)->tp_name);
|
||||
@@ -1040,7 +1040,7 @@ static PyObject *Quaternion_imatmul(PyObject *q1, PyObject *q2)
|
||||
}
|
||||
else {
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"Inplace quaternion multiplication: "
|
||||
"In place quaternion multiplication: "
|
||||
"not supported between '%.200s' and '%.200s' types",
|
||||
Py_TYPE(q1)->tp_name,
|
||||
Py_TYPE(q2)->tp_name);
|
||||
|
||||
@@ -1834,7 +1834,7 @@ static PyObject *Vector_imul(PyObject *v1, PyObject *v2)
|
||||
mul_vn_vn(vec1->vec, vec2->vec, vec1->size);
|
||||
#else
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"Inplace element-wise multiplication: "
|
||||
"In place element-wise multiplication: "
|
||||
"not supported between '%.200s' and '%.200s' types",
|
||||
Py_TYPE(v1)->tp_name,
|
||||
Py_TYPE(v2)->tp_name);
|
||||
@@ -1847,7 +1847,7 @@ static PyObject *Vector_imul(PyObject *v1, PyObject *v2)
|
||||
}
|
||||
else {
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"Inplace element-wise multiplication: "
|
||||
"In place element-wise multiplication: "
|
||||
"not supported between '%.200s' and '%.200s' types",
|
||||
Py_TYPE(v1)->tp_name,
|
||||
Py_TYPE(v2)->tp_name);
|
||||
@@ -1925,7 +1925,7 @@ static PyObject *Vector_matmul(PyObject *v1, PyObject *v2)
|
||||
static PyObject *Vector_imatmul(PyObject *v1, PyObject *v2)
|
||||
{
|
||||
PyErr_Format(PyExc_TypeError,
|
||||
"Inplace vector multiplication: "
|
||||
"In place vector multiplication: "
|
||||
"not supported between '%.200s' and '%.200s' types",
|
||||
Py_TYPE(v1)->tp_name,
|
||||
Py_TYPE(v2)->tp_name);
|
||||
|
||||
@@ -947,7 +947,7 @@ static void alpha_clip_aniso(
|
||||
float alphaclip;
|
||||
rctf rf;
|
||||
|
||||
/* TXF apha: we're doing the same alphaclip here as boxsample, but i'm doubting
|
||||
/* TXF alpha: we're doing the same alpha-clip here as box-sample, but I'm doubting
|
||||
* if this is actually correct for the all the filtering algorithms .. */
|
||||
|
||||
if (!(extflag == TXC_REPT || extflag == TXC_EXTD)) {
|
||||
|
||||
Reference in New Issue
Block a user