Build: replace Blender specific DEBUG by standard NDEBUG #115774

Merged
Brecht Van Lommel merged 2 commits from brecht/blender:ndebug into main 2023-12-06 16:05:26 +01:00
89 changed files with 166 additions and 167 deletions

View File

@ -55,7 +55,9 @@ endif()
# global compile definitions since add_definitions() adds for all.
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS
$<$<CONFIG:Debug>:DEBUG;_DEBUG>
# Visual Studio define for debug, enable on all platforms.
$<$<CONFIG:Debug>:_DEBUG>
# Standard C define to disable asserts.
$<$<CONFIG:Release>:NDEBUG>
$<$<CONFIG:MinSizeRel>:NDEBUG>
$<$<CONFIG:RelWithDebInfo>:NDEBUG>

View File

@ -1311,7 +1311,7 @@ int curve_fit_cubic_to_points_refit_db(
#ifdef USE_CORNER_DETECT
if (use_corner) {
#ifdef DEBUG
#ifndef NDEBUG
for (uint i = 0; i < knots_len; i++) {
assert(knots[i].heap_node == NULL);
}

View File

@ -206,7 +206,7 @@ static void rt_node_free(RangeTreeUInt *rt, Node *node);
#ifdef USE_BTREE
#ifdef DEBUG
#ifndef NDEBUG
static bool rb_is_balanced_root(const Node *root);
#endif
@ -238,7 +238,7 @@ static int key_cmp(uint key1, uint key2)
/* removed from the tree */
static void rb_node_invalidate(Node *node)
{
#ifdef DEBUG
#ifndef NDEBUG
node->left = NULL;
node->right = NULL;
node->color = false;
@ -481,7 +481,7 @@ static Node *rb_get_or_lower_recursive(Node *n, const uint key)
}
}
#ifdef DEBUG
#ifndef NDEBUG
static bool rb_is_balanced_recursive(const Node *node, int black)
{
@ -511,7 +511,7 @@ static bool rb_is_balanced_root(const Node *root)
return rb_is_balanced_recursive(root, black);
}
#endif // DEBUG
#endif // NDEBUG
/* End BTree API */
@ -703,7 +703,7 @@ RangeTreeUInt *range_tree_uint_alloc(uint min, uint max)
void range_tree_uint_free(RangeTreeUInt *rt)
{
#ifdef DEBUG
#ifndef NDEBUG
#ifdef USE_BTREE
assert(rb_is_balanced_root(rt->root));
#endif

View File

@ -259,7 +259,7 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
# else
options.append("Wno-parentheses-equality -Wno-unused-value --hipcc-func-supp -O3 -ffast-math");
# endif
# ifdef _DEBUG
# ifndef NDEBUG
options.append(" -save-temps");
# endif
options.append(" --amdgpu-target=").append(arch);

View File

@ -545,7 +545,7 @@ void OneapiDevice::usm_free(void *usm_ptr)
void OneapiDevice::check_usm(SyclQueue *queue_, const void *usm_ptr, bool allow_host = false)
{
# ifdef _DEBUG
# ifndef NDEBUG
sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_);
sycl::info::device_type device_type =
queue->get_device().get_info<sycl::info::device::device_type>();

View File

@ -10,7 +10,7 @@
#pragma once
#ifdef _MSC_VER
# ifdef DEBUG
# ifdef _DEBUG
/* Suppress STL-MSVC debug info warning. */
# pragma warning(disable : 4786)
# endif

View File

@ -159,7 +159,7 @@ GHOST_TSuccess GHOST_DisplayManagerX11::setCurrentDisplaySetting(
fprintf(stderr, "Error: XF86VidMode extension missing!\n");
return GHOST_kFailure;
}
# ifdef DEBUG
# ifndef NDEBUG
printf("Using XFree86-VidModeExtension Version %d.%d\n", majorVersion, minorVersion);
# endif
@ -199,7 +199,7 @@ GHOST_TSuccess GHOST_DisplayManagerX11::setCurrentDisplaySetting(
}
if (best_fit != -1) {
# ifdef DEBUG
# ifndef NDEBUG
printf("Switching to video mode %dx%d %dx%d %d\n",
vidmodes[best_fit]->hdisplay,
vidmodes[best_fit]->vdisplay,

View File

@ -345,7 +345,7 @@ int BLF_load_default(bool unique);
int BLF_load_mono_default(bool unique);
void BLF_load_font_stack(void);
#ifdef DEBUG
#ifndef NDEBUG
void BLF_state_print(int fontid);
#endif

View File

@ -1017,7 +1017,7 @@ float BLF_character_to_curves(int fontid,
return blf_character_to_curves(font, unicode, nurbsbase, scale);
}
#ifdef DEBUG
#ifndef NDEBUG
void BLF_state_print(int fontid)
{
FontBLF *font = blf_get(fontid);

View File

@ -765,7 +765,7 @@ static FT_UInt blf_glyph_index_from_charcode(FontBLF **font, const uint charcode
}
}
#ifdef DEBUG
#ifndef NDEBUG
printf("Unicode character U+%04X not found in loaded fonts. \n", charcode);
#endif

View File

@ -882,7 +882,7 @@ static void where_am_i(char *program_filepath,
/* Remove "/./" and "/../" so string comparisons can be used on the path. */
BLI_path_normalize_native(program_filepath);
# if defined(DEBUG)
# ifndef NDEBUG
if (!STREQ(program_name, program_filepath)) {
CLOG_INFO(&LOG, 2, "guessing '%s' == '%s'", program_name, program_filepath);
}

View File

@ -681,7 +681,7 @@ void MutableAttributeAccessor::remove_anonymous()
/**
* Debug utility that checks whether the #finish function of an #AttributeWriter has been called.
*/
#ifdef DEBUG
#ifndef NDEBUG
struct FinishCallChecker {
std::string name;
bool finish_called = false;
@ -700,7 +700,7 @@ GAttributeWriter MutableAttributeAccessor::lookup_for_write(const AttributeIDRef
{
GAttributeWriter attribute = fn_->lookup_for_write(owner_, attribute_id);
/* Check that the #finish method is called in debug builds. */
#ifdef DEBUG
#ifndef NDEBUG
if (attribute) {
auto checker = std::make_shared<FinishCallChecker>();
checker->name = attribute_id.name();

View File

@ -1217,7 +1217,7 @@ BVHTree *BKE_bvhtree_from_mesh_get(BVHTreeFromMesh *data,
bvhcache_insert(*bvh_cache_p, data->tree, bvh_cache_type);
bvhcache_unlock(*bvh_cache_p, lock_started);
#ifdef DEBUG
#ifndef NDEBUG
if (data->tree != nullptr) {
if (BLI_bvhtree_get_tree_type(data->tree) != tree_type) {
printf("tree_type %d obtained instead of %d\n",
@ -1307,7 +1307,7 @@ BVHTree *BKE_bvhtree_from_editmesh_get(BVHTreeFromEditMesh *data,
bvhcache_unlock(*bvh_cache_p, lock_started);
}
#ifdef DEBUG
#ifndef NDEBUG
if (data->tree != nullptr) {
if (BLI_bvhtree_get_tree_type(data->tree) != tree_type) {
printf("tree_type %d obtained instead of %d\n",

View File

@ -72,7 +72,7 @@ CurvesGeometry::CurvesGeometry(const int point_num, const int curve_num)
MEM_malloc_arrayN(this->curve_num + 1, sizeof(int), __func__));
this->runtime->curve_offsets_sharing_info = implicit_sharing::info_for_mem_free(
this->curve_offsets);
#ifdef DEBUG
#ifndef NDEBUG
this->offsets_for_write().fill(-1);
#endif
/* Set common values for convenience. */

View File

@ -535,7 +535,7 @@ void BKE_mesh_face_offsets_ensure_alloc(Mesh *mesh)
mesh->runtime->face_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
mesh->face_offset_indices);
#ifdef DEBUG
#ifndef NDEBUG
/* Fill offsets with obviously bad values to simplify finding missing initialization. */
mesh->face_offsets_for_write().fill(-1);
#endif

View File

@ -74,7 +74,7 @@ Mesh *BKE_mesh_wrapper_from_editmesh(BMEditMesh *em,
me->edit_mesh->is_shallow_copy = true;
/* Make sure we crash if these are ever used. */
#ifdef DEBUG
#ifndef NDEBUG
me->totvert = INT_MAX;
me->totedge = INT_MAX;
me->faces_num = INT_MAX;

View File

@ -512,7 +512,7 @@ class NodeTreeMainUpdater {
result.interface_changed = true;
}
#ifdef DEBUG
#ifndef NDEBUG
/* Check the uniqueness of node identifiers. */
Set<int32_t> node_identifiers;
const Span<const bNode *> nodes = ntree.all_nodes();

View File

@ -1121,7 +1121,7 @@ bool BKE_paint_ensure(ToolSettings *ts, Paint **r_paint)
(Paint *)ts->uvsculpt,
(Paint *)ts->curves_sculpt,
(Paint *)&ts->imapaint));
#ifdef DEBUG
#ifndef NDEBUG
Paint paint_test = **r_paint;
BKE_paint_runtime_init(ts, *r_paint);
/* Swap so debug doesn't hide errors when release fails. */

View File

@ -379,7 +379,7 @@ WorkSpaceLayout *BKE_workspace_layout_add(Main *bmain,
WorkSpaceLayout *layout = MEM_cnew<WorkSpaceLayout>(__func__);
BLI_assert(!workspaces_is_screen_used(bmain, screen));
#ifndef DEBUG
#ifdef NDEBUG
UNUSED_VARS(bmain);
#endif
layout->screen = screen;

View File

@ -12,7 +12,7 @@
*/
/* only validate array-bounds in debug mode */
#ifdef DEBUG
#ifndef NDEBUG
# define STACK_DECLARE(stack) unsigned int _##stack##_index, _##stack##_num_alloc
# define STACK_INIT(stack, stack_num) \
((void)stack, \

View File

@ -49,7 +49,7 @@
/* Setting zero so we can catch bugs in BLI_task/KDOPBVH.
* TODO(sergey): Deduplicate the limits with PBVH from BKE.
*/
#ifdef DEBUG
#ifndef NDEBUG
# define KDOPBVH_THREAD_LEAF_THRESHOLD 0
#else
# define KDOPBVH_THREAD_LEAF_THRESHOLD 1024

View File

@ -22,7 +22,7 @@ namespace blender::index_mask {
template<typename T> void build_reverse_map(const IndexMask &mask, MutableSpan<T> r_map)
{
#ifdef DEBUG
#ifndef NDEBUG
/* Catch errors with asserts in debug builds. */
r_map.fill(-1);
#endif

View File

@ -37,7 +37,7 @@ struct KDTree {
uint nodes_len;
uint root;
int max_node_index;
#ifdef DEBUG
#ifndef NDEBUG
bool is_balanced; /* ensure we call balance first */
uint nodes_len_capacity; /* max size of the tree */
#endif
@ -97,7 +97,7 @@ KDTree *BLI_kdtree_nd_(new)(uint nodes_len_capacity)
tree->root = KD_NODE_ROOT_IS_INIT;
tree->max_node_index = -1;
#ifdef DEBUG
#ifndef NDEBUG
tree->is_balanced = false;
tree->nodes_len_capacity = nodes_len_capacity;
#endif
@ -120,7 +120,7 @@ void BLI_kdtree_nd_(insert)(KDTree *tree, int index, const float co[KD_DIMS])
{
KDTreeNode *node = &tree->nodes[tree->nodes_len++];
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->nodes_len <= tree->nodes_len_capacity);
#endif
@ -133,7 +133,7 @@ void BLI_kdtree_nd_(insert)(KDTree *tree, int index, const float co[KD_DIMS])
node->d = 0;
tree->max_node_index = MAX2(tree->max_node_index, index);
#ifdef DEBUG
#ifndef NDEBUG
tree->is_balanced = false;
#endif
}
@ -205,7 +205,7 @@ void BLI_kdtree_nd_(balance)(KDTree *tree)
tree->root = kdtree_balance(tree->nodes, tree->nodes_len, 0, 0);
#ifdef DEBUG
#ifndef NDEBUG
tree->is_balanced = true;
#endif
}
@ -236,7 +236,7 @@ int BLI_kdtree_nd_(find_nearest)(const KDTree *tree,
float min_dist, cur_dist;
uint stack_len_capacity, cur = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -346,7 +346,7 @@ int BLI_kdtree_nd_(find_nearest_cb)(
float min_dist = FLT_MAX, cur_dist;
uint stack_len_capacity, cur = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -487,7 +487,7 @@ int BLI_kdtree_nd_(find_nearest_n_with_len_squared_cb)(
uint stack_len_capacity, cur = 0;
uint i, nearest_len = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -652,7 +652,7 @@ int BLI_kdtree_nd_(range_search_with_len_squared_cb)(
uint stack_len_capacity, cur = 0;
uint nearest_len = 0, nearest_len_capacity = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -746,7 +746,7 @@ void BLI_kdtree_nd_(range_search_cb)(
float range_sq = range * range, dist_sq;
uint stack_len_capacity, cur = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -978,7 +978,7 @@ static int kdtree_node_cmp_deduplicate(const void *n0_p, const void *n1_p)
*/
int BLI_kdtree_nd_(deduplicate)(KDTree *tree)
{
#ifdef DEBUG
#ifndef NDEBUG
tree->is_balanced = false;
#endif
qsort(tree->nodes, (size_t)tree->nodes_len, sizeof(*tree->nodes), kdtree_node_cmp_deduplicate);

View File

@ -3589,7 +3589,7 @@ static bool barycentric_weights(const float v1[3],
wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{
@ -3686,7 +3686,7 @@ bool barycentric_coords_v2(
const float x3 = v3[0], y3 = v3[1];
const float det = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3);
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (det != 0.0f)
#endif
{
@ -3711,7 +3711,7 @@ void barycentric_weights_v2(
w[2] = cross_tri_v2(v1, v2, co);
wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{
@ -3734,7 +3734,7 @@ void barycentric_weights_v2_clamped(
w[2] = max_ff(cross_tri_v2(v1, v2, co), 0.0f);
wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{
@ -3757,7 +3757,7 @@ void barycentric_weights_v2_persp(
w[2] = cross_tri_v2(v1, v2, co) / v3[3];
wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{
@ -3849,7 +3849,7 @@ void barycentric_weights_v2_quad(const float v1[2],
wtot = w[0] + w[1] + w[2] + w[3];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{

View File

@ -17,7 +17,7 @@
/******************************** Quaternions ********************************/
/* used to test is a quat is not normalized (only used for debug prints) */
#ifdef DEBUG
#ifndef NDEBUG
# define QUAT_EPSILON 0.0001
#endif
@ -216,7 +216,7 @@ static void quat_to_mat3_no_error(float m[3][3], const float q[4])
void quat_to_mat3(float m[3][3], const float q[4])
{
#ifdef DEBUG
#ifndef NDEBUG
float f;
if (!((f = dot_qtqt(q, q)) == 0.0f || (fabsf(f - 1.0f) < (float)QUAT_EPSILON))) {
fprintf(stderr,
@ -232,7 +232,7 @@ void quat_to_mat4(float m[4][4], const float q[4])
{
double q0, q1, q2, q3, qda, qdb, qdc, qaa, qab, qac, qbb, qbc, qcc;
#ifdef DEBUG
#ifndef NDEBUG
if (!((q0 = dot_qtqt(q, q)) == 0.0 || (fabs(q0 - 1.0) < QUAT_EPSILON))) {
fprintf(stderr,
"Warning! quat_to_mat4() called with non-normalized: size %.8f *** report a bug ***\n",
@ -1065,7 +1065,7 @@ void quat_to_axis_angle(float axis[3], float *angle, const float q[4])
{
float ha, si;
#ifdef DEBUG
#ifndef NDEBUG
if (!((ha = dot_qtqt(q, q)) == 0.0f || (fabsf(ha - 1.0f) < (float)QUAT_EPSILON))) {
fprintf(stderr,
"Warning! quat_to_axis_angle() called with non-normalized: size %.8f *** report a bug "

View File

@ -463,7 +463,7 @@ static void pf_coord_remove(PolyFill *pf, PolyIndex *pi)
if (UNLIKELY(pf->indices == pi)) {
pf->indices = pi->next;
}
#ifdef DEBUG
#ifndef NDEBUG
pi->index = (uint32_t)-1;
pi->next = pi->prev = NULL;
#endif

View File

@ -842,7 +842,7 @@ uint BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const float n
BLI_assert(!nor_proj || len_squared_v3(nor_proj) > FLT_EPSILON);
#ifdef DEBUG
#ifndef NDEBUG
for (eve = sf_ctx->fillvertbase.first; eve; eve = eve->next) {
/* these values used to be set,
* however they should always be zero'd so check instead */
@ -984,7 +984,7 @@ uint BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const float n
}
if (eed) {
/* otherwise it's impossible to be sure you can clear vertices */
#ifdef DEBUG
#ifndef NDEBUG
printf("No vertices with 250 edges allowed!\n");
#endif
return 0;
@ -1027,7 +1027,7 @@ uint BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const float n
eed->v1->edge_count++;
eed->v2->edge_count++;
}
#ifdef DEBUG
#ifndef NDEBUG
/* ensure we're right! */
for (eed = sf_ctx->filledgebase.first; eed; eed = eed->next) {
BLI_assert(eed->v1->edge_count != 1);

View File

@ -664,9 +664,5 @@ typedef bool (*BMLoopPairFilterFunc)(const BMLoop *, const BMLoop *, void *user_
#define BM_LOOP_RADIAL_MAX 10000
#define BM_NGON_MAX 100000
/* setting zero so we can catch bugs in OpenMP/BMesh */
#ifdef DEBUG
# define BM_OMP_LIMIT 0
#else
# define BM_OMP_LIMIT 10000
#endif
/* Minimum number of elements before using threading. */
#define BM_THREAD_LIMIT 10000

View File

@ -76,7 +76,7 @@ void BM_face_copy_shared(BMesh *bm, BMFace *f, BMLoopFilterFunc filter_fn, void
BMLoop *l_first;
BMLoop *l_iter;
#ifdef DEBUG
#ifndef NDEBUG
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
do {
BLI_assert(BM_ELEM_API_FLAG_TEST(l_iter, _FLAG_OVERLAP) == 0);

View File

@ -360,7 +360,7 @@ int BM_iter_mesh_count_flag(const char itype, BMesh *bm, const char hflag, const
* allow adding but not removing, this isn't _totally_ safe since
* you could add/remove within the same loop, but catches common cases
*/
#ifdef DEBUG
#ifndef NDEBUG
# define USE_IMMUTABLE_ASSERT
#endif

View File

@ -73,7 +73,7 @@ extern const char bm_iter_itype_htype_map[BM_ITYPE_MAX];
/* a version of BM_ITER_MESH which keeps the next item in storage
* so we can delete the current item, see bug #36923. */
#ifdef DEBUG
#ifndef NDEBUG
# define BM_ITER_MESH_MUTABLE(ele, ele_next, iter, bm, itype) \
for (BM_CHECK_TYPE_ELEM_ASSIGN(ele) = BM_iter_new(iter, bm, itype, NULL); \
ele ? ((void)((iter)->count = BM_iter_mesh_count(itype, bm)), \

View File

@ -381,7 +381,7 @@ static void bm_mesh_select_mode_flush_vert_to_edge(BMesh *bm)
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = bm->totedge >= BM_OMP_LIMIT;
settings.use_threading = bm->totedge >= BM_THREAD_LIMIT;
settings.userdata_chunk = &chunk_data;
settings.userdata_chunk_size = sizeof(chunk_data);
settings.func_reduce = bm_mesh_select_mode_flush_reduce_fn;
@ -397,7 +397,7 @@ static void bm_mesh_select_mode_flush_edge_to_face(BMesh *bm)
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = bm->totface >= BM_OMP_LIMIT;
settings.use_threading = bm->totface >= BM_THREAD_LIMIT;
settings.userdata_chunk = &chunk_data;
settings.userdata_chunk_size = sizeof(chunk_data);
settings.func_reduce = bm_mesh_select_mode_flush_reduce_fn;
@ -1156,7 +1156,7 @@ void BM_select_history_merge_from_targetmap(
BMesh *bm, GHash *vert_map, GHash *edge_map, GHash *face_map, const bool use_chain)
{
#ifdef DEBUG
#ifndef NDEBUG
LISTBASE_FOREACH (BMEditSelection *, ese, &bm->selected) {
BLI_assert(BM_ELEM_API_FLAG_TEST(ese->ele, _FLAG_OVERLAP) == 0);
}

View File

@ -335,7 +335,7 @@ void bmesh_edit_end(BMesh *bm, BMOpTypeFlag type_flag)
void BM_mesh_elem_index_ensure_ex(BMesh *bm, const char htype, int elem_offset[4])
{
#ifdef DEBUG
#ifndef NDEBUG
BM_ELEM_INDEX_VALIDATE(bm, "Should Never Fail!", __func__);
#endif
@ -514,7 +514,7 @@ void BM_mesh_elem_index_validate(
}
#if 0 /* mostly annoying, even in debug mode */
# ifdef DEBUG
# ifndef NDEBUG
if (is_any_error == 0) {
fprintf(stderr, "Valid Index Success: at %s, %s, '%s', '%s'\n", location, func, msg_a, msg_b);
}

View File

@ -212,7 +212,7 @@ static void bm_mesh_verts_calc_normals(BMesh *bm,
TaskParallelSettings settings;
BLI_parallel_mempool_settings_defaults(&settings);
settings.use_threading = bm->totvert >= BM_OMP_LIMIT;
settings.use_threading = bm->totvert >= BM_THREAD_LIMIT;
if (vcos == nullptr) {
BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_cb, nullptr, &settings);
@ -242,7 +242,7 @@ void BM_mesh_normals_update_ex(BMesh *bm, const BMeshNormalsUpdate_Params *param
/* Calculate all face normals. */
TaskParallelSettings settings;
BLI_parallel_mempool_settings_defaults(&settings);
settings.use_threading = bm->totedge >= BM_OMP_LIMIT;
settings.use_threading = bm->totedge >= BM_THREAD_LIMIT;
BM_iter_parallel(bm, BM_FACES_OF_MESH, bm_face_calc_normals_cb, nullptr, &settings);
}
@ -1356,7 +1356,7 @@ static void bm_mesh_loops_calc_normals(BMesh *bm,
const bool do_rebuild,
const float split_angle_cos)
{
if (bm->totloop < BM_OMP_LIMIT) {
if (bm->totloop < BM_THREAD_LIMIT) {
bm_mesh_loops_calc_normals__single_threaded(bm,
vcos,
fnos,

View File

@ -49,7 +49,7 @@ BLI_INLINE void bmesh_calc_tessellation_for_face_impl(BMLoop *(*looptris)[3],
MemArena **pf_arena_p,
const bool face_normal)
{
#ifdef DEBUG
#ifndef NDEBUG
/* The face normal is used for projecting faces into 2D space for tessellation.
* Invalid normals may result in invalid tessellation.
* Either `face_normal` should be true or normals should be updated first. */

View File

@ -9,7 +9,7 @@
*/
/* debug builds only */
#ifdef DEBUG
#ifndef NDEBUG
# include "BLI_map.hh"
# include "BLI_ordered_edge.hh"

View File

@ -132,7 +132,7 @@ void BMO_op_init(BMesh *bm, BMOperator *op, const int flag, const char *opname)
{
int opcode = BMO_opcode_from_opname(opname);
#ifdef DEBUG
#ifndef NDEBUG
BM_ELEM_INDEX_VALIDATE(bm, "pre bmo", opname);
#else
(void)bm;
@ -185,7 +185,7 @@ void BMO_op_finish(BMesh *bm, BMOperator *op)
BLI_memarena_free(op->arena);
#ifdef DEBUG
#ifndef NDEBUG
BM_ELEM_INDEX_VALIDATE(bm, "post bmo", bmo_opdefines[op->type]->opname);
/* avoid accidental re-use */

View File

@ -492,7 +492,7 @@ bool BM_face_split_edgenet(BMesh *bm,
BLI_assert(BM_ELEM_API_FLAG_TEST(f, FACE_NET) == 0);
BM_ELEM_API_FLAG_ENABLE(f, FACE_NET);
#ifdef DEBUG
#ifndef NDEBUG
for (i = 0; i < edge_net_len; i++) {
BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET) == 0);
BLI_assert(BM_edge_in_face(edge_net[i], f) == false);
@ -1637,7 +1637,7 @@ finally:
if (use_partial_connect) {
/* Sanity check: ensure we don't have connecting edges before splicing begins. */
# ifdef DEBUG
# ifndef NDEBUG
{
struct TempVertPair *tvp = temp_vert_pairs.list;
do {

View File

@ -1828,7 +1828,7 @@ BMFace *BM_face_exists_overlap(BMVert **varr, const int len)
BMFace *f_overlap = nullptr;
LinkNode *f_lnk = nullptr;
#ifdef DEBUG
#ifndef NDEBUG
/* check flag isn't already set */
for (i = 0; i < len; i++) {
BM_ITER_ELEM (f, &viter, varr[i], BM_FACES_OF_VERT) {
@ -1866,7 +1866,7 @@ bool BM_face_exists_overlap_subset(BMVert **varr, const int len)
bool is_overlap = false;
LinkNode *f_lnk = nullptr;
#ifdef DEBUG
#ifndef NDEBUG
/* check flag isn't already set */
for (int i = 0; i < len; i++) {
BLI_assert(BM_ELEM_API_FLAG_TEST(varr[i], _FLAG_OVERLAP) == 0);
@ -2112,7 +2112,7 @@ int BM_mesh_calc_face_groups(BMesh *bm,
{
/* NOTE: almost duplicate of #BM_mesh_calc_edge_groups, keep in sync. */
#ifdef DEBUG
#ifndef NDEBUG
int group_index_len = 1;
#else
int group_index_len = 32;
@ -2266,7 +2266,7 @@ int BM_mesh_calc_edge_groups(BMesh *bm,
{
/* NOTE: almost duplicate of #BM_mesh_calc_face_groups, keep in sync. */
#ifdef DEBUG
#ifndef NDEBUG
int group_index_len = 1;
#else
int group_index_len = 32;

View File

@ -475,7 +475,7 @@ static void bm_grid_fill(BMesh *bm,
const uint ytot = uint(BM_edgeloop_length_get(estore_rail_a));
// BMVert *v;
uint i;
#ifdef DEBUG
#ifndef NDEBUG
uint x, y;
#endif
LinkData *el;
@ -523,7 +523,7 @@ static void bm_grid_fill(BMesh *bm,
for (el = static_cast<LinkData *>(lb_rail_b->first), i = 0; el; el = el->next, i++) {
v_grid[(xtot * i) + (xtot - 1)] = static_cast<BMVert *>(el->data);
}
#ifdef DEBUG
#ifndef NDEBUG
for (x = 1; x < xtot - 1; x++) {
for (y = 1; y < ytot - 1; y++) {
BLI_assert(v_grid[(y * xtot) + x] == nullptr);

View File

@ -300,7 +300,7 @@ static bool apply_mesh_output_to_bmesh(BMesh *bm, IMesh &m_out, bool keep_hidden
BMIter iter;
BMFace *bmf = static_cast<BMFace *>(BM_iter_new(&iter, bm, BM_FACES_OF_MESH, nullptr));
while (bmf != nullptr) {
# ifdef DEBUG
# ifndef NDEBUG
iter.count = BM_iter_mesh_count(BM_FACES_OF_MESH, bm);
# endif
BMFace *bmf_next = static_cast<BMFace *>(BM_iter_step(&iter));
@ -318,7 +318,7 @@ static bool apply_mesh_output_to_bmesh(BMesh *bm, IMesh &m_out, bool keep_hidden
}
BMVert *bmv = static_cast<BMVert *>(BM_iter_new(&iter, bm, BM_VERTS_OF_MESH, nullptr));
while (bmv != nullptr) {
# ifdef DEBUG
# ifndef NDEBUG
iter.count = BM_iter_mesh_count(BM_VERTS_OF_MESH, bm);
# endif
BMVert *bmv_next = static_cast<BMVert *>(BM_iter_step(&iter));

View File

@ -1086,7 +1086,7 @@ bool BM_mesh_intersect(BMesh *bm,
const bool isect_tri_tri_no_shared = (boolean_mode != BMESH_ISECT_BOOLEAN_NONE);
int flag = BVH_OVERLAP_USE_THREADING | BVH_OVERLAP_RETURN_PAIRS;
# ifdef DEBUG
# ifndef NDEBUG
/* The overlap result must match that obtained in Release to succeed
* in the `bmesh_boolean` test. */
if (looptris_tot < 1024) {

View File

@ -237,7 +237,7 @@ float Light::point_radiance_get(const ::Light *la)
void Light::debug_draw()
{
#ifdef DEBUG
#ifndef NDEBUG
drw_debug_sphere(_position, influence_radius_max, float4(0.8f, 0.3f, 0.0f, 1.0f));
#endif
}

View File

@ -52,7 +52,7 @@ ShaderModule::ShaderModule()
shader = nullptr;
}
#ifdef DEBUG
#ifndef NDEBUG
/* Ensure all shader are described. */
for (auto i : IndexRange(MAX_SHADER_TYPE)) {
const char *name = static_shader_create_info_name_get(eShaderType(i));

View File

@ -345,7 +345,7 @@ GPU_SHADER_CREATE_INFO(eevee_surf_occupancy)
* Variations that are only there to test shaders at compile time.
* \{ */
#ifdef DEBUG
#ifndef NDEBUG
/* Stub functions defined by the material evaluation. */
GPU_SHADER_CREATE_INFO(eevee_material_stub)

View File

@ -38,7 +38,7 @@ ShaderModule::ShaderModule()
shader = nullptr;
}
#ifdef DEBUG
#ifndef NDEBUG
/* Ensure all shader are described. */
for (auto i : IndexRange(MAX_SHADER_TYPE)) {
const char *name = static_shader_create_info_name_get(eShaderType(i));

View File

@ -88,7 +88,7 @@ struct SelectMap {
/** Mapping between internal IDs and `object->runtime->select_id`. */
Vector<uint> select_id_map;
#ifdef DEBUG
#ifndef NDEBUG
/** Debug map containing a copy of the object name. */
Vector<std::string> map_names;
#endif
@ -113,7 +113,7 @@ struct SelectMap {
uint object_id = ob_ref.object->runtime->select_id;
uint id = select_id_map.append_and_get_index(object_id | sub_object_id);
#ifdef DEBUG
#ifndef NDEBUG
map_names.append(ob_ref.object->id.name);
#endif
return {id};
@ -153,7 +153,7 @@ struct SelectMap {
info_buf.push_update();
select_id_map.clear();
#ifdef DEBUG
#ifndef NDEBUG
map_names.clear();
#endif
}

View File

@ -641,7 +641,7 @@ static bool workbench_render_framebuffers_init()
GPU_framebuffer_check_valid(dfbl->depth_only_fb, nullptr);
}
#ifdef DEBUG
#ifdef _DEBUG
/* This is just to ease GPU debugging when the frame delimiter is set to Finish */
# define GPU_FINISH_DELIMITER() GPU_finish()
#else

View File

@ -172,7 +172,7 @@ class UniformCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
protected:
GPUUniformBuf *ubo_;
#ifdef DEBUG
#ifndef NDEBUG
const char *name_ = typeid(T).name();
#else
const char *name_ = "UniformBuffer";
@ -215,7 +215,7 @@ class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
protected:
GPUStorageBuf *ssbo_;
#ifdef DEBUG
#ifndef NDEBUG
const char *name_ = typeid(T).name();
#else
const char *name_ = "StorageBuffer";

View File

@ -916,7 +916,7 @@ void DRW_curve_batch_cache_create_requested(Object *ob, const Scene *scene)
curve_render_data_free(rdata);
#ifdef DEBUG
#ifndef NDEBUG
/* Make sure all requested batches have been setup. */
for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], (GPUPrimType)0));

View File

@ -1299,7 +1299,7 @@ static void drw_add_attributes_vbo(GPUBatch *batch,
}
}
#ifdef DEBUG
#ifndef NDEBUG
/* Sanity check function to test if all requested batches are available. */
static void drw_mesh_batch_cache_check_available(TaskGraph *task_graph, Mesh *me)
{
@ -1350,13 +1350,13 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph *task_graph,
/* Early out */
if (cache.batch_requested == 0) {
<