Build: replace Blender specific DEBUG by standard NDEBUG #115774

Merged
Brecht Van Lommel merged 2 commits from brecht/blender:ndebug into main 2023-12-06 16:05:26 +01:00
89 changed files with 166 additions and 167 deletions

View File

@ -55,7 +55,9 @@ endif()
# global compile definitions since add_definitions() adds for all.
set_property(DIRECTORY APPEND PROPERTY COMPILE_DEFINITIONS
$<$<CONFIG:Debug>:DEBUG;_DEBUG>
# Visual Studio define for debug, enable on all platforms.
$<$<CONFIG:Debug>:_DEBUG>
# Standard C define to disable asserts.
$<$<CONFIG:Release>:NDEBUG>
$<$<CONFIG:MinSizeRel>:NDEBUG>
$<$<CONFIG:RelWithDebInfo>:NDEBUG>

View File

@ -1311,7 +1311,7 @@ int curve_fit_cubic_to_points_refit_db(
#ifdef USE_CORNER_DETECT
if (use_corner) {
#ifdef DEBUG
#ifndef NDEBUG
for (uint i = 0; i < knots_len; i++) {
assert(knots[i].heap_node == NULL);
}

View File

@ -206,7 +206,7 @@ static void rt_node_free(RangeTreeUInt *rt, Node *node);
#ifdef USE_BTREE
#ifdef DEBUG
#ifndef NDEBUG
static bool rb_is_balanced_root(const Node *root);
#endif
@ -238,7 +238,7 @@ static int key_cmp(uint key1, uint key2)
/* removed from the tree */
static void rb_node_invalidate(Node *node)
{
#ifdef DEBUG
#ifndef NDEBUG
node->left = NULL;
node->right = NULL;
node->color = false;
@ -481,7 +481,7 @@ static Node *rb_get_or_lower_recursive(Node *n, const uint key)
}
}
#ifdef DEBUG
#ifndef NDEBUG
static bool rb_is_balanced_recursive(const Node *node, int black)
{
@ -511,7 +511,7 @@ static bool rb_is_balanced_root(const Node *root)
return rb_is_balanced_recursive(root, black);
}
#endif // DEBUG
#endif // NDEBUG
/* End BTree API */
@ -703,7 +703,7 @@ RangeTreeUInt *range_tree_uint_alloc(uint min, uint max)
void range_tree_uint_free(RangeTreeUInt *rt)
{
#ifdef DEBUG
#ifndef NDEBUG
#ifdef USE_BTREE
assert(rb_is_balanced_root(rt->root));
#endif

View File

@ -259,7 +259,7 @@ string HIPDevice::compile_kernel(const uint kernel_features, const char *name, c
# else
options.append("Wno-parentheses-equality -Wno-unused-value --hipcc-func-supp -O3 -ffast-math");
# endif
# ifdef _DEBUG
# ifndef NDEBUG
options.append(" -save-temps");
# endif
options.append(" --amdgpu-target=").append(arch);

View File

@ -545,7 +545,7 @@ void OneapiDevice::usm_free(void *usm_ptr)
void OneapiDevice::check_usm(SyclQueue *queue_, const void *usm_ptr, bool allow_host = false)
{
# ifdef _DEBUG
# ifndef NDEBUG
sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_);
sycl::info::device_type device_type =
queue->get_device().get_info<sycl::info::device::device_type>();

View File

@ -10,7 +10,7 @@
#pragma once
#ifdef _MSC_VER
# ifdef DEBUG
# ifdef _DEBUG
/* Suppress STL-MSVC debug info warning. */
# pragma warning(disable : 4786)
# endif

View File

@ -159,7 +159,7 @@ GHOST_TSuccess GHOST_DisplayManagerX11::setCurrentDisplaySetting(
fprintf(stderr, "Error: XF86VidMode extension missing!\n");
return GHOST_kFailure;
}
# ifdef DEBUG
# ifndef NDEBUG
printf("Using XFree86-VidModeExtension Version %d.%d\n", majorVersion, minorVersion);
# endif
@ -199,7 +199,7 @@ GHOST_TSuccess GHOST_DisplayManagerX11::setCurrentDisplaySetting(
}
if (best_fit != -1) {
# ifdef DEBUG
# ifndef NDEBUG
printf("Switching to video mode %dx%d %dx%d %d\n",
vidmodes[best_fit]->hdisplay,
vidmodes[best_fit]->vdisplay,

View File

@ -345,7 +345,7 @@ int BLF_load_default(bool unique);
int BLF_load_mono_default(bool unique);
void BLF_load_font_stack(void);
#ifdef DEBUG
#ifndef NDEBUG
void BLF_state_print(int fontid);
#endif

View File

@ -1017,7 +1017,7 @@ float BLF_character_to_curves(int fontid,
return blf_character_to_curves(font, unicode, nurbsbase, scale);
}
#ifdef DEBUG
#ifndef NDEBUG
void BLF_state_print(int fontid)
{
FontBLF *font = blf_get(fontid);

View File

@ -765,7 +765,7 @@ static FT_UInt blf_glyph_index_from_charcode(FontBLF **font, const uint charcode
}
}
#ifdef DEBUG
#ifndef NDEBUG
printf("Unicode character U+%04X not found in loaded fonts. \n", charcode);
#endif

View File

@ -882,7 +882,7 @@ static void where_am_i(char *program_filepath,
/* Remove "/./" and "/../" so string comparisons can be used on the path. */
BLI_path_normalize_native(program_filepath);
# if defined(DEBUG)
# ifndef NDEBUG
if (!STREQ(program_name, program_filepath)) {
CLOG_INFO(&LOG, 2, "guessing '%s' == '%s'", program_name, program_filepath);
}

View File

@ -681,7 +681,7 @@ void MutableAttributeAccessor::remove_anonymous()
/**
* Debug utility that checks whether the #finish function of an #AttributeWriter has been called.
*/
#ifdef DEBUG
#ifndef NDEBUG
struct FinishCallChecker {
std::string name;
bool finish_called = false;
@ -700,7 +700,7 @@ GAttributeWriter MutableAttributeAccessor::lookup_for_write(const AttributeIDRef
{
GAttributeWriter attribute = fn_->lookup_for_write(owner_, attribute_id);
/* Check that the #finish method is called in debug builds. */
#ifdef DEBUG
#ifndef NDEBUG
if (attribute) {
auto checker = std::make_shared<FinishCallChecker>();
checker->name = attribute_id.name();

View File

@ -1217,7 +1217,7 @@ BVHTree *BKE_bvhtree_from_mesh_get(BVHTreeFromMesh *data,
bvhcache_insert(*bvh_cache_p, data->tree, bvh_cache_type);
bvhcache_unlock(*bvh_cache_p, lock_started);
#ifdef DEBUG
#ifndef NDEBUG
if (data->tree != nullptr) {
if (BLI_bvhtree_get_tree_type(data->tree) != tree_type) {
printf("tree_type %d obtained instead of %d\n",
@ -1307,7 +1307,7 @@ BVHTree *BKE_bvhtree_from_editmesh_get(BVHTreeFromEditMesh *data,
bvhcache_unlock(*bvh_cache_p, lock_started);
}
#ifdef DEBUG
#ifndef NDEBUG
if (data->tree != nullptr) {
if (BLI_bvhtree_get_tree_type(data->tree) != tree_type) {
printf("tree_type %d obtained instead of %d\n",

View File

@ -72,7 +72,7 @@ CurvesGeometry::CurvesGeometry(const int point_num, const int curve_num)
MEM_malloc_arrayN(this->curve_num + 1, sizeof(int), __func__));
this->runtime->curve_offsets_sharing_info = implicit_sharing::info_for_mem_free(
this->curve_offsets);
#ifdef DEBUG
#ifndef NDEBUG
this->offsets_for_write().fill(-1);
#endif
/* Set common values for convenience. */

View File

@ -535,7 +535,7 @@ void BKE_mesh_face_offsets_ensure_alloc(Mesh *mesh)
mesh->runtime->face_offsets_sharing_info = blender::implicit_sharing::info_for_mem_free(
mesh->face_offset_indices);
#ifdef DEBUG
#ifndef NDEBUG
/* Fill offsets with obviously bad values to simplify finding missing initialization. */
mesh->face_offsets_for_write().fill(-1);
#endif

View File

@ -74,7 +74,7 @@ Mesh *BKE_mesh_wrapper_from_editmesh(BMEditMesh *em,
me->edit_mesh->is_shallow_copy = true;
/* Make sure we crash if these are ever used. */
#ifdef DEBUG
#ifndef NDEBUG
me->totvert = INT_MAX;
me->totedge = INT_MAX;
me->faces_num = INT_MAX;

View File

@ -512,7 +512,7 @@ class NodeTreeMainUpdater {
result.interface_changed = true;
}
#ifdef DEBUG
#ifndef NDEBUG
/* Check the uniqueness of node identifiers. */
Set<int32_t> node_identifiers;
const Span<const bNode *> nodes = ntree.all_nodes();

View File

@ -1121,7 +1121,7 @@ bool BKE_paint_ensure(ToolSettings *ts, Paint **r_paint)
(Paint *)ts->uvsculpt,
(Paint *)ts->curves_sculpt,
(Paint *)&ts->imapaint));
#ifdef DEBUG
#ifndef NDEBUG
Paint paint_test = **r_paint;
BKE_paint_runtime_init(ts, *r_paint);
/* Swap so debug doesn't hide errors when release fails. */

View File

@ -379,7 +379,7 @@ WorkSpaceLayout *BKE_workspace_layout_add(Main *bmain,
WorkSpaceLayout *layout = MEM_cnew<WorkSpaceLayout>(__func__);
BLI_assert(!workspaces_is_screen_used(bmain, screen));
#ifndef DEBUG
#ifdef NDEBUG
UNUSED_VARS(bmain);
#endif
layout->screen = screen;

View File

@ -12,7 +12,7 @@
*/
/* only validate array-bounds in debug mode */
#ifdef DEBUG
#ifndef NDEBUG
# define STACK_DECLARE(stack) unsigned int _##stack##_index, _##stack##_num_alloc
# define STACK_INIT(stack, stack_num) \
((void)stack, \

View File

@ -49,7 +49,7 @@
/* Setting zero so we can catch bugs in BLI_task/KDOPBVH.
* TODO(sergey): Deduplicate the limits with PBVH from BKE.
*/
#ifdef DEBUG
#ifndef NDEBUG
# define KDOPBVH_THREAD_LEAF_THRESHOLD 0
#else
# define KDOPBVH_THREAD_LEAF_THRESHOLD 1024

View File

@ -22,7 +22,7 @@ namespace blender::index_mask {
template<typename T> void build_reverse_map(const IndexMask &mask, MutableSpan<T> r_map)
{
#ifdef DEBUG
#ifndef NDEBUG
/* Catch errors with asserts in debug builds. */
r_map.fill(-1);
#endif

View File

@ -37,7 +37,7 @@ struct KDTree {
uint nodes_len;
uint root;
int max_node_index;
#ifdef DEBUG
#ifndef NDEBUG
bool is_balanced; /* ensure we call balance first */
uint nodes_len_capacity; /* max size of the tree */
#endif
@ -97,7 +97,7 @@ KDTree *BLI_kdtree_nd_(new)(uint nodes_len_capacity)
tree->root = KD_NODE_ROOT_IS_INIT;
tree->max_node_index = -1;
#ifdef DEBUG
#ifndef NDEBUG
tree->is_balanced = false;
tree->nodes_len_capacity = nodes_len_capacity;
#endif
@ -120,7 +120,7 @@ void BLI_kdtree_nd_(insert)(KDTree *tree, int index, const float co[KD_DIMS])
{
KDTreeNode *node = &tree->nodes[tree->nodes_len++];
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->nodes_len <= tree->nodes_len_capacity);
#endif
@ -133,7 +133,7 @@ void BLI_kdtree_nd_(insert)(KDTree *tree, int index, const float co[KD_DIMS])
node->d = 0;
tree->max_node_index = MAX2(tree->max_node_index, index);
#ifdef DEBUG
#ifndef NDEBUG
tree->is_balanced = false;
#endif
}
@ -205,7 +205,7 @@ void BLI_kdtree_nd_(balance)(KDTree *tree)
tree->root = kdtree_balance(tree->nodes, tree->nodes_len, 0, 0);
#ifdef DEBUG
#ifndef NDEBUG
tree->is_balanced = true;
#endif
}
@ -236,7 +236,7 @@ int BLI_kdtree_nd_(find_nearest)(const KDTree *tree,
float min_dist, cur_dist;
uint stack_len_capacity, cur = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -346,7 +346,7 @@ int BLI_kdtree_nd_(find_nearest_cb)(
float min_dist = FLT_MAX, cur_dist;
uint stack_len_capacity, cur = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -487,7 +487,7 @@ int BLI_kdtree_nd_(find_nearest_n_with_len_squared_cb)(
uint stack_len_capacity, cur = 0;
uint i, nearest_len = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -652,7 +652,7 @@ int BLI_kdtree_nd_(range_search_with_len_squared_cb)(
uint stack_len_capacity, cur = 0;
uint nearest_len = 0, nearest_len_capacity = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -746,7 +746,7 @@ void BLI_kdtree_nd_(range_search_cb)(
float range_sq = range * range, dist_sq;
uint stack_len_capacity, cur = 0;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(tree->is_balanced == true);
#endif
@ -978,7 +978,7 @@ static int kdtree_node_cmp_deduplicate(const void *n0_p, const void *n1_p)
*/
int BLI_kdtree_nd_(deduplicate)(KDTree *tree)
{
#ifdef DEBUG
#ifndef NDEBUG
tree->is_balanced = false;
#endif
qsort(tree->nodes, (size_t)tree->nodes_len, sizeof(*tree->nodes), kdtree_node_cmp_deduplicate);

View File

@ -3589,7 +3589,7 @@ static bool barycentric_weights(const float v1[3],
wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{
@ -3686,7 +3686,7 @@ bool barycentric_coords_v2(
const float x3 = v3[0], y3 = v3[1];
const float det = (y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3);
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (det != 0.0f)
#endif
{
@ -3711,7 +3711,7 @@ void barycentric_weights_v2(
w[2] = cross_tri_v2(v1, v2, co);
wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{
@ -3734,7 +3734,7 @@ void barycentric_weights_v2_clamped(
w[2] = max_ff(cross_tri_v2(v1, v2, co), 0.0f);
wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{
@ -3757,7 +3757,7 @@ void barycentric_weights_v2_persp(
w[2] = cross_tri_v2(v1, v2, co) / v3[3];
wtot = w[0] + w[1] + w[2];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{
@ -3849,7 +3849,7 @@ void barycentric_weights_v2_quad(const float v1[2],
wtot = w[0] + w[1] + w[2] + w[3];
#ifdef DEBUG /* Avoid floating point exception when debugging. */
#ifndef NDEBUG /* Avoid floating point exception when debugging. */
if (wtot != 0.0f)
#endif
{

View File

@ -17,7 +17,7 @@
/******************************** Quaternions ********************************/
/* used to test is a quat is not normalized (only used for debug prints) */
#ifdef DEBUG
#ifndef NDEBUG
# define QUAT_EPSILON 0.0001
#endif
@ -216,7 +216,7 @@ static void quat_to_mat3_no_error(float m[3][3], const float q[4])
void quat_to_mat3(float m[3][3], const float q[4])
{
#ifdef DEBUG
#ifndef NDEBUG
float f;
if (!((f = dot_qtqt(q, q)) == 0.0f || (fabsf(f - 1.0f) < (float)QUAT_EPSILON))) {
fprintf(stderr,
@ -232,7 +232,7 @@ void quat_to_mat4(float m[4][4], const float q[4])
{
double q0, q1, q2, q3, qda, qdb, qdc, qaa, qab, qac, qbb, qbc, qcc;
#ifdef DEBUG
#ifndef NDEBUG
if (!((q0 = dot_qtqt(q, q)) == 0.0 || (fabs(q0 - 1.0) < QUAT_EPSILON))) {
fprintf(stderr,
"Warning! quat_to_mat4() called with non-normalized: size %.8f *** report a bug ***\n",
@ -1065,7 +1065,7 @@ void quat_to_axis_angle(float axis[3], float *angle, const float q[4])
{
float ha, si;
#ifdef DEBUG
#ifndef NDEBUG
if (!((ha = dot_qtqt(q, q)) == 0.0f || (fabsf(ha - 1.0f) < (float)QUAT_EPSILON))) {
fprintf(stderr,
"Warning! quat_to_axis_angle() called with non-normalized: size %.8f *** report a bug "

View File

@ -463,7 +463,7 @@ static void pf_coord_remove(PolyFill *pf, PolyIndex *pi)
if (UNLIKELY(pf->indices == pi)) {
pf->indices = pi->next;
}
#ifdef DEBUG
#ifndef NDEBUG
pi->index = (uint32_t)-1;
pi->next = pi->prev = NULL;
#endif

View File

@ -842,7 +842,7 @@ uint BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const float n
BLI_assert(!nor_proj || len_squared_v3(nor_proj) > FLT_EPSILON);
#ifdef DEBUG
#ifndef NDEBUG
for (eve = sf_ctx->fillvertbase.first; eve; eve = eve->next) {
/* these values used to be set,
* however they should always be zero'd so check instead */
@ -984,7 +984,7 @@ uint BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const float n
}
if (eed) {
/* otherwise it's impossible to be sure you can clear vertices */
#ifdef DEBUG
#ifndef NDEBUG
printf("No vertices with 250 edges allowed!\n");
#endif
return 0;
@ -1027,7 +1027,7 @@ uint BLI_scanfill_calc_ex(ScanFillContext *sf_ctx, const int flag, const float n
eed->v1->edge_count++;
eed->v2->edge_count++;
}
#ifdef DEBUG
#ifndef NDEBUG
/* ensure we're right! */
for (eed = sf_ctx->filledgebase.first; eed; eed = eed->next) {
BLI_assert(eed->v1->edge_count != 1);

View File

@ -664,9 +664,5 @@ typedef bool (*BMLoopPairFilterFunc)(const BMLoop *, const BMLoop *, void *user_
#define BM_LOOP_RADIAL_MAX 10000
#define BM_NGON_MAX 100000
/* setting zero so we can catch bugs in OpenMP/BMesh */
#ifdef DEBUG
# define BM_OMP_LIMIT 0
#else
# define BM_OMP_LIMIT 10000
#endif
/* Minimum number of elements before using threading. */
#define BM_THREAD_LIMIT 10000

View File

@ -76,7 +76,7 @@ void BM_face_copy_shared(BMesh *bm, BMFace *f, BMLoopFilterFunc filter_fn, void
BMLoop *l_first;
BMLoop *l_iter;
#ifdef DEBUG
#ifndef NDEBUG
l_iter = l_first = BM_FACE_FIRST_LOOP(f);
do {
BLI_assert(BM_ELEM_API_FLAG_TEST(l_iter, _FLAG_OVERLAP) == 0);

View File

@ -360,7 +360,7 @@ int BM_iter_mesh_count_flag(const char itype, BMesh *bm, const char hflag, const
* allow adding but not removing, this isn't _totally_ safe since
* you could add/remove within the same loop, but catches common cases
*/
#ifdef DEBUG
#ifndef NDEBUG
# define USE_IMMUTABLE_ASSERT
#endif

View File

@ -73,7 +73,7 @@ extern const char bm_iter_itype_htype_map[BM_ITYPE_MAX];
/* a version of BM_ITER_MESH which keeps the next item in storage
* so we can delete the current item, see bug #36923. */
#ifdef DEBUG
#ifndef NDEBUG
# define BM_ITER_MESH_MUTABLE(ele, ele_next, iter, bm, itype) \
for (BM_CHECK_TYPE_ELEM_ASSIGN(ele) = BM_iter_new(iter, bm, itype, NULL); \
ele ? ((void)((iter)->count = BM_iter_mesh_count(itype, bm)), \

View File

@ -381,7 +381,7 @@ static void bm_mesh_select_mode_flush_vert_to_edge(BMesh *bm)
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = bm->totedge >= BM_OMP_LIMIT;
settings.use_threading = bm->totedge >= BM_THREAD_LIMIT;
settings.userdata_chunk = &chunk_data;
settings.userdata_chunk_size = sizeof(chunk_data);
settings.func_reduce = bm_mesh_select_mode_flush_reduce_fn;
@ -397,7 +397,7 @@ static void bm_mesh_select_mode_flush_edge_to_face(BMesh *bm)
TaskParallelSettings settings;
BLI_parallel_range_settings_defaults(&settings);
settings.use_threading = bm->totface >= BM_OMP_LIMIT;
settings.use_threading = bm->totface >= BM_THREAD_LIMIT;
settings.userdata_chunk = &chunk_data;
settings.userdata_chunk_size = sizeof(chunk_data);
settings.func_reduce = bm_mesh_select_mode_flush_reduce_fn;
@ -1156,7 +1156,7 @@ void BM_select_history_merge_from_targetmap(
BMesh *bm, GHash *vert_map, GHash *edge_map, GHash *face_map, const bool use_chain)
{
#ifdef DEBUG
#ifndef NDEBUG
LISTBASE_FOREACH (BMEditSelection *, ese, &bm->selected) {
BLI_assert(BM_ELEM_API_FLAG_TEST(ese->ele, _FLAG_OVERLAP) == 0);
}

View File

@ -335,7 +335,7 @@ void bmesh_edit_end(BMesh *bm, BMOpTypeFlag type_flag)
void BM_mesh_elem_index_ensure_ex(BMesh *bm, const char htype, int elem_offset[4])
{
#ifdef DEBUG
#ifndef NDEBUG
BM_ELEM_INDEX_VALIDATE(bm, "Should Never Fail!", __func__);
#endif
@ -514,7 +514,7 @@ void BM_mesh_elem_index_validate(
}
#if 0 /* mostly annoying, even in debug mode */
# ifdef DEBUG
# ifndef NDEBUG
if (is_any_error == 0) {
fprintf(stderr, "Valid Index Success: at %s, %s, '%s', '%s'\n", location, func, msg_a, msg_b);
}

View File

@ -212,7 +212,7 @@ static void bm_mesh_verts_calc_normals(BMesh *bm,
TaskParallelSettings settings;
BLI_parallel_mempool_settings_defaults(&settings);
settings.use_threading = bm->totvert >= BM_OMP_LIMIT;
settings.use_threading = bm->totvert >= BM_THREAD_LIMIT;
if (vcos == nullptr) {
BM_iter_parallel(bm, BM_VERTS_OF_MESH, bm_vert_calc_normals_cb, nullptr, &settings);
@ -242,7 +242,7 @@ void BM_mesh_normals_update_ex(BMesh *bm, const BMeshNormalsUpdate_Params *param
/* Calculate all face normals. */
TaskParallelSettings settings;
BLI_parallel_mempool_settings_defaults(&settings);
settings.use_threading = bm->totedge >= BM_OMP_LIMIT;
settings.use_threading = bm->totedge >= BM_THREAD_LIMIT;
BM_iter_parallel(bm, BM_FACES_OF_MESH, bm_face_calc_normals_cb, nullptr, &settings);
}
@ -1356,7 +1356,7 @@ static void bm_mesh_loops_calc_normals(BMesh *bm,
const bool do_rebuild,
const float split_angle_cos)
{
if (bm->totloop < BM_OMP_LIMIT) {
if (bm->totloop < BM_THREAD_LIMIT) {
bm_mesh_loops_calc_normals__single_threaded(bm,
vcos,
fnos,

View File

@ -49,7 +49,7 @@ BLI_INLINE void bmesh_calc_tessellation_for_face_impl(BMLoop *(*looptris)[3],
MemArena **pf_arena_p,
const bool face_normal)
{
#ifdef DEBUG
#ifndef NDEBUG
/* The face normal is used for projecting faces into 2D space for tessellation.
* Invalid normals may result in invalid tessellation.
* Either `face_normal` should be true or normals should be updated first. */

View File

@ -9,7 +9,7 @@
*/
/* debug builds only */
#ifdef DEBUG
#ifndef NDEBUG
# include "BLI_map.hh"
# include "BLI_ordered_edge.hh"

View File

@ -132,7 +132,7 @@ void BMO_op_init(BMesh *bm, BMOperator *op, const int flag, const char *opname)
{
int opcode = BMO_opcode_from_opname(opname);
#ifdef DEBUG
#ifndef NDEBUG
BM_ELEM_INDEX_VALIDATE(bm, "pre bmo", opname);
#else
(void)bm;
@ -185,7 +185,7 @@ void BMO_op_finish(BMesh *bm, BMOperator *op)
BLI_memarena_free(op->arena);
#ifdef DEBUG
#ifndef NDEBUG
BM_ELEM_INDEX_VALIDATE(bm, "post bmo", bmo_opdefines[op->type]->opname);
/* avoid accidental re-use */

View File

@ -492,7 +492,7 @@ bool BM_face_split_edgenet(BMesh *bm,
BLI_assert(BM_ELEM_API_FLAG_TEST(f, FACE_NET) == 0);
BM_ELEM_API_FLAG_ENABLE(f, FACE_NET);
#ifdef DEBUG
#ifndef NDEBUG
for (i = 0; i < edge_net_len; i++) {
BLI_assert(BM_ELEM_API_FLAG_TEST(edge_net[i], EDGE_NET) == 0);
BLI_assert(BM_edge_in_face(edge_net[i], f) == false);
@ -1637,7 +1637,7 @@ finally:
if (use_partial_connect) {
/* Sanity check: ensure we don't have connecting edges before splicing begins. */
# ifdef DEBUG
# ifndef NDEBUG
{
struct TempVertPair *tvp = temp_vert_pairs.list;
do {

View File

@ -1828,7 +1828,7 @@ BMFace *BM_face_exists_overlap(BMVert **varr, const int len)
BMFace *f_overlap = nullptr;
LinkNode *f_lnk = nullptr;
#ifdef DEBUG
#ifndef NDEBUG
/* check flag isn't already set */
for (i = 0; i < len; i++) {
BM_ITER_ELEM (f, &viter, varr[i], BM_FACES_OF_VERT) {
@ -1866,7 +1866,7 @@ bool BM_face_exists_overlap_subset(BMVert **varr, const int len)
bool is_overlap = false;
LinkNode *f_lnk = nullptr;
#ifdef DEBUG
#ifndef NDEBUG
/* check flag isn't already set */
for (int i = 0; i < len; i++) {
BLI_assert(BM_ELEM_API_FLAG_TEST(varr[i], _FLAG_OVERLAP) == 0);
@ -2112,7 +2112,7 @@ int BM_mesh_calc_face_groups(BMesh *bm,
{
/* NOTE: almost duplicate of #BM_mesh_calc_edge_groups, keep in sync. */
#ifdef DEBUG
#ifndef NDEBUG
int group_index_len = 1;
#else
int group_index_len = 32;
@ -2266,7 +2266,7 @@ int BM_mesh_calc_edge_groups(BMesh *bm,
{
/* NOTE: almost duplicate of #BM_mesh_calc_face_groups, keep in sync. */
#ifdef DEBUG
#ifndef NDEBUG
int group_index_len = 1;
#else
int group_index_len = 32;

View File

@ -475,7 +475,7 @@ static void bm_grid_fill(BMesh *bm,
const uint ytot = uint(BM_edgeloop_length_get(estore_rail_a));
// BMVert *v;
uint i;
#ifdef DEBUG
#ifndef NDEBUG
uint x, y;
#endif
LinkData *el;
@ -523,7 +523,7 @@ static void bm_grid_fill(BMesh *bm,
for (el = static_cast<LinkData *>(lb_rail_b->first), i = 0; el; el = el->next, i++) {
v_grid[(xtot * i) + (xtot - 1)] = static_cast<BMVert *>(el->data);
}
#ifdef DEBUG
#ifndef NDEBUG
for (x = 1; x < xtot - 1; x++) {
for (y = 1; y < ytot - 1; y++) {
BLI_assert(v_grid[(y * xtot) + x] == nullptr);

View File

@ -300,7 +300,7 @@ static bool apply_mesh_output_to_bmesh(BMesh *bm, IMesh &m_out, bool keep_hidden
BMIter iter;
BMFace *bmf = static_cast<BMFace *>(BM_iter_new(&iter, bm, BM_FACES_OF_MESH, nullptr));
while (bmf != nullptr) {
# ifdef DEBUG
# ifndef NDEBUG
iter.count = BM_iter_mesh_count(BM_FACES_OF_MESH, bm);
# endif
BMFace *bmf_next = static_cast<BMFace *>(BM_iter_step(&iter));
@ -318,7 +318,7 @@ static bool apply_mesh_output_to_bmesh(BMesh *bm, IMesh &m_out, bool keep_hidden
}
BMVert *bmv = static_cast<BMVert *>(BM_iter_new(&iter, bm, BM_VERTS_OF_MESH, nullptr));
while (bmv != nullptr) {
# ifdef DEBUG
# ifndef NDEBUG
iter.count = BM_iter_mesh_count(BM_VERTS_OF_MESH, bm);
# endif
BMVert *bmv_next = static_cast<BMVert *>(BM_iter_step(&iter));

View File

@ -1086,7 +1086,7 @@ bool BM_mesh_intersect(BMesh *bm,
const bool isect_tri_tri_no_shared = (boolean_mode != BMESH_ISECT_BOOLEAN_NONE);
int flag = BVH_OVERLAP_USE_THREADING | BVH_OVERLAP_RETURN_PAIRS;
# ifdef DEBUG
# ifndef NDEBUG
/* The overlap result must match that obtained in Release to succeed
* in the `bmesh_boolean` test. */
if (looptris_tot < 1024) {

View File

@ -237,7 +237,7 @@ float Light::point_radiance_get(const ::Light *la)
void Light::debug_draw()
{
#ifdef DEBUG
#ifndef NDEBUG
drw_debug_sphere(_position, influence_radius_max, float4(0.8f, 0.3f, 0.0f, 1.0f));
#endif
}

View File

@ -52,7 +52,7 @@ ShaderModule::ShaderModule()
shader = nullptr;
}
#ifdef DEBUG
#ifndef NDEBUG
/* Ensure all shader are described. */
for (auto i : IndexRange(MAX_SHADER_TYPE)) {
const char *name = static_shader_create_info_name_get(eShaderType(i));

View File

@ -345,7 +345,7 @@ GPU_SHADER_CREATE_INFO(eevee_surf_occupancy)
* Variations that are only there to test shaders at compile time.
* \{ */
#ifdef DEBUG
#ifndef NDEBUG
/* Stub functions defined by the material evaluation. */
GPU_SHADER_CREATE_INFO(eevee_material_stub)

View File

@ -38,7 +38,7 @@ ShaderModule::ShaderModule()
shader = nullptr;
}
#ifdef DEBUG
#ifndef NDEBUG
/* Ensure all shader are described. */
for (auto i : IndexRange(MAX_SHADER_TYPE)) {
const char *name = static_shader_create_info_name_get(eShaderType(i));

View File

@ -88,7 +88,7 @@ struct SelectMap {
/** Mapping between internal IDs and `object->runtime->select_id`. */
Vector<uint> select_id_map;
#ifdef DEBUG
#ifndef NDEBUG
/** Debug map containing a copy of the object name. */
Vector<std::string> map_names;
#endif
@ -113,7 +113,7 @@ struct SelectMap {
uint object_id = ob_ref.object->runtime->select_id;
uint id = select_id_map.append_and_get_index(object_id | sub_object_id);
#ifdef DEBUG
#ifndef NDEBUG
map_names.append(ob_ref.object->id.name);
#endif
return {id};
@ -153,7 +153,7 @@ struct SelectMap {
info_buf.push_update();
select_id_map.clear();
#ifdef DEBUG
#ifndef NDEBUG
map_names.clear();
#endif
}

View File

@ -641,7 +641,7 @@ static bool workbench_render_framebuffers_init()
GPU_framebuffer_check_valid(dfbl->depth_only_fb, nullptr);
}
#ifdef DEBUG
#ifdef _DEBUG
/* This is just to ease GPU debugging when the frame delimiter is set to Finish */
# define GPU_FINISH_DELIMITER() GPU_finish()
#else

View File

@ -172,7 +172,7 @@ class UniformCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
protected:
GPUUniformBuf *ubo_;
#ifdef DEBUG
#ifndef NDEBUG
const char *name_ = typeid(T).name();
#else
const char *name_ = "UniformBuffer";
@ -215,7 +215,7 @@ class StorageCommon : public DataBuffer<T, len, false>, NonMovable, NonCopyable
protected:
GPUStorageBuf *ssbo_;
#ifdef DEBUG
#ifndef NDEBUG
const char *name_ = typeid(T).name();
#else
const char *name_ = "StorageBuffer";

View File

@ -916,7 +916,7 @@ void DRW_curve_batch_cache_create_requested(Object *ob, const Scene *scene)
curve_render_data_free(rdata);
#ifdef DEBUG
#ifndef NDEBUG
/* Make sure all requested batches have been setup. */
for (int i = 0; i < sizeof(cache->batch) / sizeof(void *); i++) {
BLI_assert(!DRW_batch_requested(((GPUBatch **)&cache->batch)[i], (GPUPrimType)0));

View File

@ -1299,7 +1299,7 @@ static void drw_add_attributes_vbo(GPUBatch *batch,
}
}
#ifdef DEBUG
#ifndef NDEBUG
/* Sanity check function to test if all requested batches are available. */
static void drw_mesh_batch_cache_check_available(TaskGraph *task_graph, Mesh *me)
{
@ -1350,13 +1350,13 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph *task_graph,
/* Early out */
if (cache.batch_requested == 0) {
#ifdef DEBUG
#ifndef NDEBUG
drw_mesh_batch_cache_check_available(task_graph, me);
#endif
return;
}
#ifdef DEBUG
#ifndef NDEBUG
/* Map the index of a buffer to a flag containing all batches that use it. */
Map<int, DRWBatchFlag> batches_that_use_buffer_local;
@ -1492,7 +1492,7 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph *task_graph,
/* Second chance to early out */
if ((batch_requested & ~cache.batch_ready) == 0) {
#ifdef DEBUG
#ifndef NDEBUG
drw_mesh_batch_cache_check_available(task_graph, me);
#endif
return;
@ -1815,7 +1815,7 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph *task_graph,
DRW_vbo_request(cache.batch.surface_viewer_attribute, &mbuflist->vbo.attr_viewer);
}
#ifdef DEBUG
#ifndef NDEBUG
auto assert_final_deps_valid = [&](const int buffer_index) {
BLI_assert(batches_that_use_buffer(buffer_index) ==
batches_that_use_buffer_local.lookup(buffer_index));
@ -1940,7 +1940,7 @@ void DRW_mesh_batch_cache_create_requested(TaskGraph *task_graph,
* based on the mode the correct one will be updated. Other option is to look into using
* drw_batch_cache_generate_requested_delayed. */
BLI_task_graph_work_and_wait(task_graph);
#ifdef DEBUG
#ifndef NDEBUG
drw_mesh_batch_cache_check_available(task_graph, me);
#endif
}

View File

@ -24,7 +24,7 @@
#include <iomanip>
#include <sstream>
#if defined(DEBUG) || defined(WITH_DRAW_DEBUG)
#if defined(_DEBUG) || defined(WITH_DRAW_DEBUG)
# define DRAW_DEBUG
#else
/* Uncomment to forcibly enable debug draw in release mode. */

View File

@ -47,7 +47,7 @@ void Manager::begin_sync()
acquired_textures.clear();
layer_attributes.clear();
#ifdef DEBUG
#ifndef NDEBUG
/* Detect uninitialized data. */
memset(matrix_buf.current().data(),
0xF0,
@ -132,7 +132,7 @@ void Manager::end_sync()
void Manager::debug_bind()
{
#ifdef DEBUG
#ifdef _DEBUG
if (DST.debug == nullptr) {
return;
}

View File

@ -123,7 +123,7 @@ static void drw_state_prepare_clean_for_draw(DRWManager *dst)
* where we don't re-use data by accident across different
* draw calls.
*/
#ifdef DEBUG
#ifndef NDEBUG
static void drw_state_ensure_not_reused(DRWManager *dst)
{
memset(dst, 0xff, offsetof(DRWManager, system_gpu_context));
@ -645,7 +645,7 @@ static void drw_manager_exit(DRWManager *dst)
}
dst->vmempool = nullptr;
dst->viewport = nullptr;
#ifdef DEBUG
#ifndef NDEBUG
/* Avoid accidental reuse. */
drw_state_ensure_not_reused(dst);
#endif

View File

@ -272,7 +272,7 @@ void View::compute_visibility(ObjectBoundsBuf &bounds, uint resource_len, bool d
culling_freeze_[0] = static_cast<ViewCullingData>(culling_[0]);
culling_freeze_.push_update();
}
#ifdef DEBUG
#ifdef _DEBUG
if (debug_freeze) {
float4x4 persmat = data_freeze_[0].winmat * data_freeze_[0].viewmat;
drw_debug_matrix_as_bbox(math::invert(persmat), float4(0, 1, 0, 1));

View File

@ -990,7 +990,7 @@ bool ED_operator_rv3d_user_region_poll(bContext *C);
*/
void ED_view3d_init_mats_rv3d(const Object *ob, RegionView3D *rv3d);
void ED_view3d_init_mats_rv3d_gl(const Object *ob, RegionView3D *rv3d);
#ifdef DEBUG
#ifndef NDEBUG
/**
* Ensure we correctly initialize.
*/

View File

@ -6382,7 +6382,7 @@ void UI_but_func_search_set(uiBut *but,
search_but->arg_free_fn = search_arg_free_fn;
if (search_exec_fn) {
#ifdef DEBUG
#ifndef NDEBUG
if (but->func) {
/* watch this, can be cause of much confusion, see: #47691 */
printf("%s: warning, overwriting button callback with search function callback!\n",

View File

@ -9222,7 +9222,7 @@ static bool ui_handle_button_activate_by_type(bContext *C, ARegion *region, uiBu
ui_handle_button_activate(C, region, but, BUTTON_ACTIVATE);
}
else {
#ifdef DEBUG
#ifndef NDEBUG
printf("%s: error, unhandled type: %d\n", __func__, but->type);
#endif
return false;

View File

@ -595,7 +595,7 @@ uiBlock *ui_popup_block_refresh(bContext *C,
BLI_assert(!handle->refresh || handle->can_refresh);
#ifdef DEBUG
#ifndef NDEBUG
wmEvent *event_back = window->eventstate;
wmEvent *event_last_back = window->event_last_handled;
#endif
@ -618,7 +618,7 @@ uiBlock *ui_popup_block_refresh(bContext *C,
BLI_assert(!block->endblock);
/* ensure we don't use mouse coords here! */
#ifdef DEBUG
#ifndef NDEBUG
window->eventstate = nullptr;
#endif
@ -783,7 +783,7 @@ uiBlock *ui_popup_block_refresh(bContext *C,
ED_region_update_rect(region);
#ifdef DEBUG
#ifndef NDEBUG
window->eventstate = event_back;
window->event_last_handled = event_last_back;
#endif

View File

@ -1689,7 +1689,7 @@ void EDBM_update(Mesh *mesh, const EDBMUpdate_Params *params)
em->bm->spacearr_dirty &= ~BM_SPACEARR_BMO_SET;
}
#ifdef DEBUG
#ifndef NDEBUG
{
LISTBASE_FOREACH (BMEditSelection *, ese, &em->bm->selected) {
BLI_assert(BM_elem_flag_test(ese->ele, BM_ELEM_SELECT));

View File

@ -154,7 +154,7 @@ void ED_view3d_init_mats_rv3d_gl(const Object *ob, RegionView3D *rv3d)
GPU_matrix_mul(ob->object_to_world);
}
#ifdef DEBUG
#ifndef NDEBUG
void ED_view3d_clear_mats_rv3d(RegionView3D *rv3d)
{
zero_m4(rv3d->viewmatob);

View File

@ -1029,7 +1029,7 @@ static void posttrans_mask_clean(Mask *mask)
}
}
#ifdef DEBUG
#ifndef NDEBUG
for (masklay_shape = static_cast<MaskLayerShape *>(masklay->splines_shapes.first);
masklay_shape;
masklay_shape = masklay_shape->next)
@ -1064,7 +1064,7 @@ static void posttrans_gpd_clean(bGPdata *gpd)
}
}
#ifdef DEBUG
#ifndef NDEBUG
for (gpf = static_cast<bGPDframe *>(gpl->frames.first); gpf; gpf = gpf->next) {
BLI_assert(!gpf->next || gpf->framenum < gpf->next->framenum);
}

View File

@ -310,7 +310,7 @@ void SnapData::register_result(SnapObjectContext *sctx,
sctx->ret.loc = math::transform_point(obmat, sctx->ret.loc);
sctx->ret.no = math::normalize(math::transform_direction(obmat, sctx->ret.no));
#ifdef DEBUG
#ifndef NDEBUG
/* Make sure this is only called once. */
r_nearest->index = -2;
#endif
@ -1206,7 +1206,7 @@ bool ED_transform_snap_object_project_ray_all(SnapObjectContext *sctx,
return false;
}
#ifdef DEBUG
#ifndef NDEBUG
float ray_depth_prev = sctx->ret.ray_depth_max;
#endif
if (raycastObjects(sctx)) {
@ -1214,7 +1214,7 @@ bool ED_transform_snap_object_project_ray_all(SnapObjectContext *sctx,
BLI_listbase_sort(r_hit_list, hit_depth_cmp);
}
/* meant to be readonly for 'all' hits, ensure it is */
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(ray_depth_prev == sctx->ret.ray_depth_max);
#endif
return true;

View File

@ -49,7 +49,7 @@
#include <atomic>
#include <thread>
#ifdef DEBUG
#ifndef NDEBUG
# define FN_LAZY_FUNCTION_DEBUG_THREADS
#endif

View File

@ -182,7 +182,7 @@ class ParamsBuilder {
void assert_current_param_type(ParamType param_type, StringRef expected_name = "")
{
UNUSED_VARS_NDEBUG(param_type, expected_name);
#ifdef DEBUG
#ifndef NDEBUG
int param_index = this->current_param_index();
if (expected_name != "") {
@ -198,7 +198,7 @@ class ParamsBuilder {
void assert_current_param_name(StringRef expected_name)
{
UNUSED_VARS_NDEBUG(expected_name);
#ifdef DEBUG
#ifndef NDEBUG
if (expected_name.is_empty()) {
return;
}
@ -334,7 +334,7 @@ class Params {
void assert_correct_param(int param_index, StringRef name, ParamType param_type)
{
UNUSED_VARS_NDEBUG(param_index, name, param_type);
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(builder_->signature_->params[param_index].type == param_type);
if (name.size() > 0) {
BLI_assert(builder_->signature_->params[param_index].name == name);
@ -345,7 +345,7 @@ class Params {
void assert_correct_param(int param_index, StringRef name, ParamCategory category)
{
UNUSED_VARS_NDEBUG(param_index, name, category);
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(builder_->signature_->params[param_index].type.category() == category);
if (name.size() > 0) {
BLI_assert(builder_->signature_->params[param_index].name == name);

View File

@ -927,7 +927,7 @@ class Executor {
this->with_locked_node(
node, node_state, current_task, local_data, [&](LockedNode &locked_node) {
#ifdef DEBUG
#ifndef NDEBUG
if (node_needs_execution) {
this->assert_expected_outputs_have_been_computed(locked_node, local_data);
}
@ -1128,7 +1128,7 @@ class Executor {
const int input_index = target_socket->index();
InputState &input_state = node_state.inputs[input_index];
const bool is_last_target = target_socket == targets.last();
#ifdef DEBUG
#ifndef NDEBUG
if (input_state.value != nullptr) {
if (self_.logger_ != nullptr) {
self_.logger_->dump_when_input_is_set_twice(*target_socket, from_socket, local_context);

View File

@ -300,7 +300,7 @@ bke::CurvesGeometry subdivide_curves(
* Storing the leading zero is unnecessary but makes the array a bit simpler to use by avoiding
* a check for the first segment, and because some existing utilities also use leading zeros. */
Array<int> all_point_offset_data(src_curves.points_num() + src_curves.curves_num());
#ifdef DEBUG
#ifndef NDEBUG
all_point_offset_data.fill(-1);
#endif
calculate_result_offsets(src_curves,

View File

@ -343,7 +343,7 @@ static void sample_interval_linear(const Span<T> src_data,
else {
dst_data[dst_index] = bke::attribute_math::mix2(
end_point.parameter, src_data[end_point.index], src_data[end_point.next_index]);
#ifdef DEBUG
#ifndef NDEBUG
++dst_index;
#endif
}
@ -381,7 +381,7 @@ static void sample_interval_catmull_rom(const Span<T> src_data,
}
else {
dst_data[dst_index] = interpolate_catmull_rom(src_data, end_point, src_cyclic);
#ifdef DEBUG
#ifndef NDEBUG
++dst_index;
#endif
}
@ -566,9 +566,9 @@ static void sample_interval_bezier(const Span<float3> src_positions,
dst_positions[dst_index] = end_point_insert.position;
dst_types_l[dst_index] = src_types_l[end_point.next_index];
dst_types_r[dst_index] = src_types_r[end_point.next_index];
#ifdef DEBUG
#ifndef NDEBUG
++dst_index;
#endif // DEBUG
#endif
}
BLI_assert(dst_index == dst_range.one_after_last());
}

View File

@ -129,7 +129,7 @@ typedef struct GPUVertBufRaw {
uint stride;
unsigned char *data;
unsigned char *data_init;
#ifdef DEBUG
#ifndef NDEBUG
/* Only for overflow check */
unsigned char *_data_end;
#endif
@ -139,7 +139,7 @@ GPU_INLINE void *GPU_vertbuf_raw_step(GPUVertBufRaw *a)
{
unsigned char *data = a->data;
a->data += a->stride;
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(data < a->_data_end);
#endif
return (void *)data;

View File

@ -64,7 +64,7 @@ inline GPUAttachmentType &operator--(GPUAttachmentType &a)
namespace blender {
namespace gpu {
#ifdef DEBUG
#ifndef NDEBUG
# define DEBUG_NAME_LEN 64
#else
# define DEBUG_NAME_LEN 16

View File

@ -512,7 +512,7 @@ void gpu_shader_create_info_init()
info->builtins_ |= gpu_shader_dependency_get_builtins(info->geometry_source_);
info->builtins_ |= gpu_shader_dependency_get_builtins(info->compute_source_);
#ifdef DEBUG
#ifndef NDEBUG
/* Automatically amend the create info for ease of use of the debug feature. */
if ((info->builtins_ & BuiltinBits::USE_DEBUG_DRAW) == BuiltinBits::USE_DEBUG_DRAW) {
info->additional_info("draw_debug_draw");

View File

@ -109,7 +109,7 @@ struct GPUSource {
if (source.find("'") != StringRef::not_found) {
char_literals_preprocess();
}
#ifdef DEBUG
#ifndef NDEBUG
if (source.find("drw_print") != StringRef::not_found) {
string_preprocess();
}
@ -205,7 +205,7 @@ struct GPUSource {
*/
void check_no_quotes()
{
#ifdef DEBUG
#ifndef NDEBUG
int64_t pos = -1;
do {
pos = source.find('"', pos + 1);

View File

@ -18,7 +18,7 @@ namespace gpu {
class VertBuf;
#ifdef DEBUG
#ifndef NDEBUG
# define DEBUG_NAME_LEN 64
#else
# define DEBUG_NAME_LEN 8

View File

@ -68,7 +68,7 @@ enum eGPUSamplerFormat {
ENUM_OPERATORS(eGPUSamplerFormat, GPU_SAMPLER_TYPE_UINT)
#ifdef DEBUG
#ifndef NDEBUG
# define DEBUG_NAME_LEN 64
#else
# define DEBUG_NAME_LEN 8

View File

@ -15,7 +15,7 @@ struct GPUUniformBuf;
namespace blender {
namespace gpu {
#ifdef DEBUG
#ifndef NDEBUG
# define DEBUG_NAME_LEN 64
#else
# define DEBUG_NAME_LEN 8

View File

@ -273,7 +273,7 @@ void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts_, uint a_idx, GPUVertBufRaw
access->stride = format->stride;
access->data = (uchar *)verts->data + a->offset;
access->data_init = access->data;
#ifdef DEBUG
#ifndef NDEBUG
access->_data_end = access->data_init + size_t(verts->vertex_alloc * format->stride);
#endif
}

View File

@ -206,6 +206,7 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
BLI_assert(range_len > 0);
BLI_assert(mip_texture_base_level_ < texture_.mipmapLevelCount);
BLI_assert(mip_texture_base_layer_ < max_slices);
UNUSED_VARS_NDEBUG(max_slices);
mip_swizzle_view_ = [texture_
newTextureViewWithPixelFormat:texture_view_pixel_format
textureType:texture_view_texture_type

View File

@ -61,7 +61,7 @@
#define ARG_LIST_CALL(...) VA_NARGS_CALL_OVERLOAD(_VA_ARG_LIST_CALL, __VA_ARGS__)
/* clang-format on */
#ifdef DEBUG
#ifndef NDEBUG
# define GL_CHECK_RESOURCES(info) debug::check_gl_resources(info)
#else
# define GL_CHECK_RESOURCES(info)

View File

@ -93,7 +93,7 @@ void GLStorageBuf::bind(int slot)
slot_ = slot;
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, slot_, ssbo_id_);
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(slot < 16);
/* TODO */
// GLContext::get()->bound_ssbo_slots |= 1 << slot;
@ -109,7 +109,7 @@ void GLStorageBuf::bind_as(GLenum target)
void GLStorageBuf::unbind()
{
#ifdef DEBUG
#ifndef NDEBUG
/* NOTE: This only unbinds the last bound slot. */
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, slot_, 0);
/* Hope that the context did not change. */

View File

@ -117,7 +117,7 @@ void GLUniformBuf::bind(int slot)
slot_ = slot;
glBindBufferBase(GL_UNIFORM_BUFFER, slot_, ubo_id_);
#ifdef DEBUG
#ifndef NDEBUG
BLI_assert(slot < 16);
GLContext::get()->bound_ubo_slots |= 1 << slot;
#endif
@ -138,7 +138,7 @@ void GLUniformBuf::bind_as_ssbo(int slot)
void GLUniformBuf::unbind()
{
#ifdef DEBUG
#ifndef NDEBUG
/* NOTE: This only unbinds the last bound slot. */
glBindBufferBase(GL_UNIFORM_BUFFER, slot_, 0);
/* Hope that the context did not change. */

View File

@ -92,7 +92,7 @@ static void warning_callback(const char *msg, void *client_data)
fprintf(stream, "[WARNING] %s", msg);
}
#ifdef DEBUG
#ifndef NDEBUG
/**
* sample debug callback expecting no client object
*/
@ -378,7 +378,7 @@ static ImBuf *imb_load_jp2_stream(opj_stream_t *stream,
/* configure the event callbacks (not required) */
opj_set_error_handler(codec, error_callback, stderr);
opj_set_warning_handler(codec, warning_callback, stderr);
#ifdef DEBUG /* too noisy */
#ifndef NDEBUG /* too noisy */
opj_set_info_handler(codec, info_callback, stderr);
#endif
@ -1235,7 +1235,7 @@ bool imb_save_jp2_stream(ImBuf *ibuf, opj_stream_t *stream, int /*flags*/)
/* configure the event callbacks (not required) */
opj_set_error_handler(codec, error_callback, stderr);
opj_set_warning_handler(codec, warning_callback, stderr);
#ifdef DEBUG /* too noisy */
#ifndef NDEBUG /* too noisy */
opj_set_info_handler(codec, info_callback, stderr);
#endif

View File

@ -94,7 +94,7 @@ void importer_main(Main *bmain,
if (import_params.use_mesh_validate) {
bool verbose_validate = false;
#ifdef DEBUG
#ifndef NDEBUG
verbose_validate = true;
#endif
BKE_mesh_validate(mesh, verbose_validate, false);

View File

@ -67,7 +67,7 @@ Object *MeshFromGeometry::create_mesh(Main *bmain,
if (import_params.validate_meshes || mesh_geometry_.has_invalid_faces_) {
bool verbose_validate = false;
#ifdef DEBUG
#ifndef NDEBUG
verbose_validate = true;
#endif
BKE_mesh_validate(mesh, verbose_validate, false);

View File

@ -36,7 +36,7 @@
static CLG_LogRef LOG = {"rna.define"};
#ifdef DEBUG
#ifndef NDEBUG
# define ASSERT_SOFT_HARD_LIMITS \
if (softmin < hardmin || softmax > hardmax) { \
CLOG_ERROR(&LOG, "error with soft/hard limits: %s.%s", CONTAINER_RNA_ID(cont), identifier); \
@ -1772,7 +1772,7 @@ void RNA_def_property_range(PropertyRNA *prop, double min, double max)
{
StructRNA *srna = DefRNA.laststruct;
#ifdef DEBUG
#ifndef NDEBUG
if (min > max) {
CLOG_ERROR(&LOG, "\"%s.%s\", min > max.", srna->identifier, prop->identifier);
DefRNA.error = true;
@ -4240,7 +4240,7 @@ PropertyRNA *RNA_def_float_percentage(StructOrFunctionRNA *cont_,
ASSERT_SOFT_HARD_LIMITS;
#ifdef DEBUG
#ifndef NDEBUG
/* Properties with PROP_PERCENTAGE should use a range like 0 to 100, unlike PROP_FACTOR. */
if (hardmax < 2.0f) {
CLOG_WARN(&LOG,
@ -4559,7 +4559,7 @@ void RNA_enum_item_add(EnumPropertyItem **items, int *totitem, const EnumPropert
if (tot == 0) {
*items = static_cast<EnumPropertyItem *>(MEM_callocN(sizeof(EnumPropertyItem[8]), __func__));
/* Ensure we get crashes on missing calls to 'RNA_enum_item_end', see #74227. */
#ifdef DEBUG
#ifndef NDEBUG
memset(*items, 0xff, sizeof(EnumPropertyItem[8]));
#endif
}
@ -4567,7 +4567,7 @@ void RNA_enum_item_add(EnumPropertyItem **items, int *totitem, const EnumPropert
/* Power of two > 8. */
*items = static_cast<EnumPropertyItem *>(
MEM_recallocN_id(*items, sizeof(EnumPropertyItem) * tot * 2, __func__));
#ifdef DEBUG
#ifndef NDEBUG
memset((*items) + tot, 0xff, sizeof(EnumPropertyItem) * tot);
#endif
}

View File

@ -114,7 +114,7 @@ class GeoNodeExecParams {
return value_or_field_type.as_field(params_.try_get_input_data_ptr(index));
}
else {
#ifdef DEBUG
#ifndef NDEBUG
this->check_input_access(identifier, &CPPType::get<T>());
#endif
const int index = this->get_input_index(identifier);
@ -144,7 +144,7 @@ class GeoNodeExecParams {
return value_or_field.as_field();
}
else {
#ifdef DEBUG
#ifndef NDEBUG
this->check_input_access(identifier, &CPPType::get<T>());
#endif
const int index = this->get_input_index(identifier);
@ -177,7 +177,7 @@ class GeoNodeExecParams {
});
}
else {
#ifdef DEBUG
#ifndef NDEBUG
const CPPType &type = CPPType::get<StoredT>();
this->check_output_access(identifier, type);
#endif

View File

@ -110,7 +110,7 @@ void GeoNodeExecParams::check_input_geometry_set(StringRef identifier,
void GeoNodeExecParams::check_output_geometry_set(const GeometrySet &geometry_set) const
{
UNUSED_VARS_NDEBUG(geometry_set);
#ifdef DEBUG
#ifndef NDEBUG
if (const bke::CurvesEditHints *curve_edit_hints = geometry_set.get_curve_edit_hints()) {
/* If this is not valid, it's likely that the number of stored deformed points does not match
* the number of points in the original data. */

View File

@ -8829,7 +8829,7 @@ void pyrna_alloc_types()
* But keep running in debug mode so we get immediate notification of bad class hierarchy
* or any errors in "bpy_types.py" at load time, so errors don't go unnoticed. */
#ifdef DEBUG
#ifndef NDEBUG
PyGILState_STATE gilstate;
PropertyRNA *prop;
@ -8855,7 +8855,7 @@ void pyrna_alloc_types()
RNA_PROP_END;
PyGILState_Release(gilstate);
#endif /* DEBUG */
#endif /* NDEBUG */
}
void pyrna_free_types()

View File

@ -20,7 +20,7 @@ typedef unsigned short ushort;
/* matrix[row][col] == MATRIX_ITEM_INDEX(matrix, row, col) */
#ifdef DEBUG
#ifndef NDEBUG
# define MATRIX_ITEM_ASSERT(_mat, _row, _col) \
(BLI_assert(_row < (_mat)->row_num && _col < (_mat)->col_num))
#else

View File

@ -53,7 +53,7 @@ struct wmMsgTypeInfo {
struct wmMsg {
unsigned int type;
// #ifdef DEBUG
// #ifndef NDEBUG
/* For debugging: '__func__:__LINE__'. */
const char *id;
// #endif