Cleanup: BLI_task - API changes.
Based on usages so far: - Split callback worker func in two, 'basic' and 'extended' versions. The former goes back to the simplest verion, while the later keeps the 'userdata_chunk', and gets the thread_id too. - Add use_threading to simple BLI_task_parallel_range(), turns out we need this pretty much systematically, and allows to get rid of most usages of BLI_task_parallel_range_ex(). - Now BLI_task_parallel_range() expects 'basic' version of callback, while BLI_task_parallel_range_ex() expectes 'extended' version of the callback. All in all, this should make common usage of BLI_task_parallel_range simpler (less verbose), and add access to advanced callback to thread id, which is mandatory in some (future) cases.
This commit is contained in:
@@ -502,7 +502,7 @@ typedef struct OceanSimulateData {
|
||||
float chop_amount;
|
||||
} OceanSimulateData;
|
||||
|
||||
static void ocean_compute_htilda(void *userdata, void *UNUSED(userdata_chunk), int i)
|
||||
static void ocean_compute_htilda(void *userdata, const int i)
|
||||
{
|
||||
OceanSimulateData *osd = userdata;
|
||||
const Ocean *o = osd->o;
|
||||
@@ -748,7 +748,7 @@ void BKE_ocean_simulate(struct Ocean *o, float t, float scale, float chop_amount
|
||||
* This is not optimal in all cases, but remains reasonably simple and should be OK most of the time. */
|
||||
|
||||
/* compute a new htilda */
|
||||
BLI_task_parallel_range(0, o->_M, &osd, ocean_compute_htilda);
|
||||
BLI_task_parallel_range(0, o->_M, &osd, ocean_compute_htilda, o->_M > 16);
|
||||
|
||||
if (o->_do_disp_y) {
|
||||
BLI_task_pool_push(pool, ocean_compute_displacement_y, NULL, false, TASK_PRIORITY_HIGH);
|
||||
|
||||
@@ -112,19 +112,20 @@ ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool);
|
||||
size_t BLI_task_pool_tasks_done(TaskPool *pool);
|
||||
|
||||
/* Parallel for routines */
|
||||
typedef void (*TaskParallelRangeFunc)(void *userdata, void *userdata_chunk, int iter);
|
||||
typedef void (*TaskParallelRangeFunc)(void *userdata, const int iter);
|
||||
typedef void (*TaskParallelRangeFuncEx)(void *userdata, void *userdata_chunk, const int iter, const int thread_id);
|
||||
void BLI_task_parallel_range_ex(
|
||||
int start, int stop,
|
||||
void *userdata,
|
||||
void *userdata_chunk,
|
||||
const size_t userdata_chunk_size,
|
||||
TaskParallelRangeFunc func,
|
||||
const size_t userdata_chunk_size, TaskParallelRangeFuncEx func_ex,
|
||||
const bool use_threading,
|
||||
const bool use_dynamic_scheduling);
|
||||
void BLI_task_parallel_range(
|
||||
int start, int stop,
|
||||
void *userdata,
|
||||
TaskParallelRangeFunc func);
|
||||
TaskParallelRangeFunc func,
|
||||
const bool use_threading);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -750,7 +750,7 @@ typedef struct BVHDivNodesData {
|
||||
int first_of_next_level;
|
||||
} BVHDivNodesData;
|
||||
|
||||
static void non_recursive_bvh_div_nodes_task_cb(void *userdata, void *UNUSED(userdata_chunk), int j)
|
||||
static void non_recursive_bvh_div_nodes_task_cb(void *userdata, const int j)
|
||||
{
|
||||
BVHDivNodesData *data = userdata;
|
||||
|
||||
@@ -873,9 +873,9 @@ static void non_recursive_bvh_div_nodes(BVHTree *tree, BVHNode *branches_array,
|
||||
cb_data.i = i;
|
||||
cb_data.depth = depth;
|
||||
|
||||
BLI_task_parallel_range_ex(
|
||||
i, end_j, &cb_data, NULL, 0, non_recursive_bvh_div_nodes_task_cb,
|
||||
num_leafs > KDOPBVH_THREAD_LEAF_THRESHOLD, false);
|
||||
BLI_task_parallel_range(
|
||||
i, end_j, &cb_data, non_recursive_bvh_div_nodes_task_cb,
|
||||
num_leafs > KDOPBVH_THREAD_LEAF_THRESHOLD);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1195,7 +1195,7 @@ int BLI_bvhtree_overlap_thread_num(const BVHTree *tree)
|
||||
return (int)MIN2(tree->tree_type, tree->nodes[tree->totleaf]->totnode);
|
||||
}
|
||||
|
||||
static void bvhtree_overlap_task_cb(void *userdata, void *UNUSED(userdata_chunk), int j)
|
||||
static void bvhtree_overlap_task_cb(void *userdata, const int j)
|
||||
{
|
||||
BVHOverlapData_Thread *data = &((BVHOverlapData_Thread *)userdata)[j];
|
||||
BVHOverlapData_Shared *data_shared = data->shared;
|
||||
@@ -1260,9 +1260,9 @@ BVHTreeOverlap *BLI_bvhtree_overlap(
|
||||
data[j].thread = j;
|
||||
}
|
||||
|
||||
BLI_task_parallel_range_ex(
|
||||
0, thread_num, data, NULL, 0, bvhtree_overlap_task_cb,
|
||||
tree1->totleaf > KDOPBVH_THREAD_LEAF_THRESHOLD, false);
|
||||
BLI_task_parallel_range(
|
||||
0, thread_num, data, bvhtree_overlap_task_cb,
|
||||
tree1->totleaf > KDOPBVH_THREAD_LEAF_THRESHOLD);
|
||||
|
||||
for (j = 0; j < thread_num; j++)
|
||||
total += BLI_stack_count(data[j].overlap);
|
||||
|
||||
@@ -46,7 +46,7 @@ typedef struct CovarianceData {
|
||||
int nbr_cos_vn;
|
||||
} CovarianceData;
|
||||
|
||||
static void covariance_m_vn_ex_task_cb(void *userdata, void *UNUSED(userdata_chunk), int a)
|
||||
static void covariance_m_vn_ex_task_cb(void *userdata, const int a)
|
||||
{
|
||||
CovarianceData *data = userdata;
|
||||
const float *cos_vn = data->cos_vn;
|
||||
@@ -117,8 +117,8 @@ void BLI_covariance_m_vn_ex(
|
||||
.covfac = covfac, .n = n, .nbr_cos_vn = nbr_cos_vn,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(
|
||||
0, n * n, &data, NULL, 0, covariance_m_vn_ex_task_cb, (nbr_cos_vn * n * n) >= 10000, false);
|
||||
BLI_task_parallel_range(
|
||||
0, n * n, &data, covariance_m_vn_ex_task_cb, (nbr_cos_vn * n * n) >= 10000);
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -584,7 +584,9 @@ typedef struct ParallelRangeState {
|
||||
void *userdata;
|
||||
void *userdata_chunk;
|
||||
size_t userdata_chunk_size;
|
||||
|
||||
TaskParallelRangeFunc func;
|
||||
TaskParallelRangeFuncEx func_ex;
|
||||
|
||||
int iter;
|
||||
int chunk_size;
|
||||
@@ -610,23 +612,31 @@ BLI_INLINE bool parallel_range_next_iter_get(
|
||||
static void parallel_range_func(
|
||||
TaskPool * __restrict pool,
|
||||
void *UNUSED(taskdata),
|
||||
int UNUSED(threadid))
|
||||
int threadid)
|
||||
{
|
||||
ParallelRangeState * __restrict state = BLI_task_pool_userdata(pool);
|
||||
int iter, count;
|
||||
|
||||
const bool use_userdata_chunk = (state->userdata_chunk_size != 0) && (state->userdata_chunk != NULL);
|
||||
const bool use_userdata_chunk = (state->func_ex != NULL) &&
|
||||
(state->userdata_chunk_size != 0) && (state->userdata_chunk != NULL);
|
||||
void *userdata_chunk = use_userdata_chunk ? MALLOCA(state->userdata_chunk_size) : NULL;
|
||||
|
||||
while (parallel_range_next_iter_get(state, &iter, &count)) {
|
||||
int i;
|
||||
|
||||
if (use_userdata_chunk) {
|
||||
memcpy(userdata_chunk, state->userdata_chunk, state->userdata_chunk_size);
|
||||
}
|
||||
if (state->func_ex) {
|
||||
if (use_userdata_chunk) {
|
||||
memcpy(userdata_chunk, state->userdata_chunk, state->userdata_chunk_size);
|
||||
}
|
||||
|
||||
for (i = 0; i < count; ++i) {
|
||||
state->func(state->userdata, userdata_chunk, iter + i);
|
||||
for (i = 0; i < count; ++i) {
|
||||
state->func_ex(state->userdata, userdata_chunk, iter + i, threadid);
|
||||
}
|
||||
}
|
||||
else {
|
||||
for (i = 0; i < count; ++i) {
|
||||
state->func(state->userdata, iter + i);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -642,18 +652,20 @@ static void parallel_range_func(
|
||||
* \param userdata_chunk Optional, each instance of looping chunks will get a copy of this data
|
||||
* (similar to OpenMP's firstprivate).
|
||||
* \param userdata_chunk_size Memory size of \a userdata_chunk.
|
||||
* \param func Callback function.
|
||||
* \param func Callback function (simple version).
|
||||
* \param func_ex Callback function (advanced version).
|
||||
* \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
|
||||
* (allows caller to use any kind of test to switch on parallelization or not).
|
||||
* \param use_dynamic_scheduling If \a true, the whole range is divided in a lot of small chunks (of size 32 currently),
|
||||
* otherwise whole range is split in a few big chunks (num_threads * 2 chunks currently).
|
||||
*/
|
||||
void BLI_task_parallel_range_ex(
|
||||
static void task_parallel_range_ex(
|
||||
int start, int stop,
|
||||
void *userdata,
|
||||
void *userdata_chunk,
|
||||
const size_t userdata_chunk_size,
|
||||
TaskParallelRangeFunc func,
|
||||
TaskParallelRangeFuncEx func_ex,
|
||||
const bool use_threading,
|
||||
const bool use_dynamic_scheduling)
|
||||
{
|
||||
@@ -666,25 +678,37 @@ void BLI_task_parallel_range_ex(
|
||||
return;
|
||||
}
|
||||
|
||||
BLI_assert(start <= stop);
|
||||
BLI_assert(start < stop);
|
||||
if (userdata_chunk_size != 0) {
|
||||
BLI_assert(func_ex != NULL && func == NULL);
|
||||
BLI_assert(userdata_chunk != NULL);
|
||||
}
|
||||
|
||||
/* If it's not enough data to be crunched, don't bother with tasks at all,
|
||||
* do everything from the main thread.
|
||||
*/
|
||||
if (!use_threading) {
|
||||
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
|
||||
void *userdata_chunk_local = NULL;
|
||||
if (func_ex) {
|
||||
const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL);
|
||||
void *userdata_chunk_local = NULL;
|
||||
|
||||
if (use_userdata_chunk) {
|
||||
userdata_chunk_local = MALLOCA(userdata_chunk_size);
|
||||
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
|
||||
if (use_userdata_chunk) {
|
||||
userdata_chunk_local = MALLOCA(userdata_chunk_size);
|
||||
memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size);
|
||||
}
|
||||
|
||||
for (i = start; i < stop; ++i) {
|
||||
func_ex(userdata, userdata_chunk, i, 0);
|
||||
}
|
||||
|
||||
MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size);
|
||||
}
|
||||
else {
|
||||
for (i = start; i < stop; ++i) {
|
||||
func(userdata, i);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = start; i < stop; ++i) {
|
||||
func(userdata, userdata_chunk_local, i);
|
||||
}
|
||||
|
||||
MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -705,6 +729,7 @@ void BLI_task_parallel_range_ex(
|
||||
state.userdata_chunk = userdata_chunk;
|
||||
state.userdata_chunk_size = userdata_chunk_size;
|
||||
state.func = func;
|
||||
state.func_ex = func_ex;
|
||||
state.iter = start;
|
||||
if (use_dynamic_scheduling) {
|
||||
state.chunk_size = 32;
|
||||
@@ -728,16 +753,47 @@ void BLI_task_parallel_range_ex(
|
||||
BLI_spin_end(&state.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* This function allows to parallelized for loops in a similar way to OpenMP's 'parallel for' statement.
|
||||
*
|
||||
* \param start First index to process.
|
||||
* \param stop Index to stop looping (excluded).
|
||||
* \param userdata Common userdata passed to all instances of \a func.
|
||||
* \param userdata_chunk Optional, each instance of looping chunks will get a copy of this data
|
||||
* (similar to OpenMP's firstprivate).
|
||||
* \param userdata_chunk_size Memory size of \a userdata_chunk.
|
||||
* \param func Callback function (simple version).
|
||||
* \param func_ex Callback function (advanced version).
|
||||
* \param use_threading If \a true, actually split-execute loop in threads, else just do a sequential forloop
|
||||
* (allows caller to use any kind of test to switch on parallelization or not).
|
||||
* \param use_dynamic_scheduling If \a true, the whole range is divided in a lot of small chunks (of size 32 currently),
|
||||
* otherwise whole range is split in a few big chunks (num_threads * 2 chunks currently).
|
||||
*/
|
||||
void BLI_task_parallel_range_ex(
|
||||
int start, int stop,
|
||||
void *userdata,
|
||||
void *userdata_chunk,
|
||||
const size_t userdata_chunk_size,
|
||||
TaskParallelRangeFuncEx func_ex,
|
||||
const bool use_threading,
|
||||
const bool use_dynamic_scheduling)
|
||||
{
|
||||
task_parallel_range_ex(
|
||||
start, stop, userdata, userdata_chunk, userdata_chunk_size, NULL, func_ex,
|
||||
use_threading, use_dynamic_scheduling);
|
||||
}
|
||||
|
||||
/**
|
||||
* A simpler version of \a BLI_task_parallel_range_ex, which does not use \a use_dynamic_scheduling,
|
||||
* has a \a range_threshold of 64, and does not handle 'firstprivate'-like \a userdata_chunk.
|
||||
* and does not handle 'firstprivate'-like \a userdata_chunk.
|
||||
*/
|
||||
void BLI_task_parallel_range(
|
||||
int start, int stop,
|
||||
void *userdata,
|
||||
TaskParallelRangeFunc func)
|
||||
TaskParallelRangeFunc func,
|
||||
const bool use_threading)
|
||||
{
|
||||
BLI_task_parallel_range_ex(start, stop, userdata, NULL, 0, func, (stop - start) > 64, false);
|
||||
task_parallel_range_ex(start, stop, userdata, NULL, 0, func, NULL, use_threading, false);
|
||||
}
|
||||
|
||||
#undef MALLOCA
|
||||
|
||||
@@ -449,7 +449,7 @@ typedef struct SculptThreadedTaskData {
|
||||
ThreadMutex mutex;
|
||||
} SculptThreadedTaskData;
|
||||
|
||||
static void paint_mesh_restore_co_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void paint_mesh_restore_co_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -511,8 +511,9 @@ static void paint_mesh_restore_co(Sculpt *sd, Object *ob)
|
||||
.sd = sd, .ob = ob, .brush = brush, .nodes = nodes,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, paint_mesh_restore_co_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && !ss->bm && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, paint_mesh_restore_co_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && !ss->bm && totnode > SCULPT_OMP_LIMIT));
|
||||
|
||||
if (nodes)
|
||||
MEM_freeN(nodes);
|
||||
@@ -819,7 +820,7 @@ static float calc_symmetry_feather(Sculpt *sd, StrokeCache *cache)
|
||||
* \note These are all _very_ similar, when changing one, check others.
|
||||
* \{ */
|
||||
|
||||
static void calc_area_normal_and_center_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void calc_area_normal_and_center_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -966,8 +967,9 @@ static void calc_area_center(
|
||||
};
|
||||
BLI_mutex_init(&data.mutex);
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, calc_area_normal_and_center_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, calc_area_normal_and_center_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
|
||||
BLI_mutex_end(&data.mutex);
|
||||
|
||||
@@ -1005,8 +1007,9 @@ static void calc_area_normal(
|
||||
};
|
||||
BLI_mutex_init(&data.mutex);
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, calc_area_normal_and_center_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, calc_area_normal_and_center_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
|
||||
BLI_mutex_end(&data.mutex);
|
||||
|
||||
@@ -1042,8 +1045,9 @@ static void calc_area_normal_and_center(
|
||||
};
|
||||
BLI_mutex_init(&data.mutex);
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, calc_area_normal_and_center_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, calc_area_normal_and_center_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
|
||||
BLI_mutex_end(&data.mutex);
|
||||
|
||||
@@ -1651,7 +1655,8 @@ static void do_smooth_brush_bmesh_task_cb(void *userdata, void *UNUSED(userdata_
|
||||
BKE_pbvh_vertex_iter_end;
|
||||
}
|
||||
|
||||
static void do_smooth_brush_multires_task_cb(void *userdata, void *userdata_chunk, int n)
|
||||
static void do_smooth_brush_multires_task_cb_ex(
|
||||
void *userdata, void *userdata_chunk, const int n, const int UNUSED(thread_id))
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptDoBrushSmoothGridDataChunk *data_chunk = userdata_chunk;
|
||||
@@ -1836,21 +1841,21 @@ static void smooth(
|
||||
size += sizeof(*data_chunk);
|
||||
|
||||
BLI_task_parallel_range_ex(
|
||||
0, totnode, &data, data_chunk, size, do_smooth_brush_multires_task_cb,
|
||||
0, totnode, &data, data_chunk, size, do_smooth_brush_multires_task_cb_ex,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
|
||||
MEM_freeN(data_chunk);
|
||||
break;
|
||||
}
|
||||
case PBVH_FACES:
|
||||
BLI_task_parallel_range_ex(
|
||||
0, totnode, &data, NULL, 0, do_smooth_brush_mesh_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_smooth_brush_mesh_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
break;
|
||||
case PBVH_BMESH:
|
||||
BLI_task_parallel_range_ex(
|
||||
0, totnode, &data, NULL, 0, do_smooth_brush_bmesh_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_smooth_brush_bmesh_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1865,7 +1870,7 @@ static void do_smooth_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnod
|
||||
smooth(sd, ob, nodes, totnode, ss->cache->bstrength, false);
|
||||
}
|
||||
|
||||
static void do_mask_brush_draw_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_mask_brush_draw_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -1901,8 +1906,9 @@ static void do_mask_brush_draw(Sculpt *sd, Object *ob, PBVHNode **nodes, int tot
|
||||
.sd = sd, .ob = ob, .brush = brush, .nodes = nodes,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_mask_brush_draw_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_mask_brush_draw_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_mask_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
|
||||
@@ -1920,7 +1926,7 @@ static void do_mask_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
|
||||
}
|
||||
}
|
||||
|
||||
static void do_draw_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_draw_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -1968,11 +1974,12 @@ static void do_draw_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
|
||||
.offset = offset,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_draw_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_draw_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_crease_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_crease_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2054,11 +2061,12 @@ static void do_crease_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnod
|
||||
.spvc = &spvc, .offset = offset, .flippedbstrength = flippedbstrength,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_crease_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_crease_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_pinch_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_pinch_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2098,11 +2106,12 @@ static void do_pinch_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode
|
||||
.sd = sd, .ob = ob, .brush = brush, .nodes = nodes,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_pinch_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_pinch_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_grab_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_grab_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2160,11 +2169,12 @@ static void do_grab_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
|
||||
.grab_delta = grab_delta,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_grab_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_grab_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_nudge_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_nudge_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2212,11 +2222,12 @@ static void do_nudge_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode
|
||||
.cono = cono,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_nudge_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_nudge_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_snake_hook_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_snake_hook_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2273,11 +2284,12 @@ static void do_snake_hook_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int to
|
||||
.grab_delta = grab_delta,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_snake_hook_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_snake_hook_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_thumb_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_thumb_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2330,11 +2342,12 @@ static void do_thumb_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode
|
||||
.cono = cono,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_thumb_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_thumb_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_rotate_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_rotate_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2388,11 +2401,12 @@ static void do_rotate_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnod
|
||||
.angle = angle,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_rotate_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_rotate_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_layer_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_layer_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2469,13 +2483,14 @@ static void do_layer_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode
|
||||
};
|
||||
BLI_mutex_init(&data.mutex);
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_layer_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_layer_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
|
||||
BLI_mutex_end(&data.mutex);
|
||||
}
|
||||
|
||||
static void do_inflate_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_inflate_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2520,8 +2535,9 @@ static void do_inflate_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totno
|
||||
.sd = sd, .ob = ob, .brush = brush, .nodes = nodes,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_inflate_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_inflate_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void calc_sculpt_plane(
|
||||
@@ -2649,7 +2665,7 @@ static float get_offset(Sculpt *sd, SculptSession *ss)
|
||||
return rv;
|
||||
}
|
||||
|
||||
static void do_flatten_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_flatten_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2717,11 +2733,12 @@ static void do_flatten_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totno
|
||||
.area_no = area_no, .area_co = area_co,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_flatten_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_flatten_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_clay_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_clay_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2797,11 +2814,12 @@ static void do_clay_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
|
||||
.area_no = area_no, .area_co = area_co,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_clay_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_clay_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_clay_strips_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_clay_strips_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2904,11 +2922,12 @@ static void do_clay_strips_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int t
|
||||
.area_no_sp = area_no_sp, .area_co = area_co, .mat = mat,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_clay_strips_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_clay_strips_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_fill_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_fill_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -2980,11 +2999,12 @@ static void do_fill_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode)
|
||||
.area_no = area_no, .area_co = area_co,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_fill_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_fill_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_scrape_brush_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_scrape_brush_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -3055,11 +3075,12 @@ static void do_scrape_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnod
|
||||
.area_no = area_no, .area_co = area_co,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_scrape_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_scrape_brush_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
static void do_gravity_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_gravity_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -3108,8 +3129,9 @@ static void do_gravity(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode, fl
|
||||
.offset = offset,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, do_gravity_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, do_gravity_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
|
||||
@@ -3219,7 +3241,7 @@ static void sculpt_topology_update(Sculpt *sd, Object *ob, Brush *brush, Unified
|
||||
}
|
||||
}
|
||||
|
||||
static void do_brush_action_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void do_brush_action_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
|
||||
@@ -3250,8 +3272,9 @@ static void do_brush_action(Sculpt *sd, Object *ob, Brush *brush, UnifiedPaintSe
|
||||
.sd = sd, .ob = ob, .brush = brush, .nodes = nodes,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &task_data, NULL, 0, do_brush_action_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &task_data, do_brush_action_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
|
||||
if (sculpt_brush_needs_normal(brush))
|
||||
update_sculpt_normal(sd, ob, nodes, totnode);
|
||||
@@ -3363,7 +3386,7 @@ static void sculpt_flush_pbvhvert_deform(Object *ob, PBVHVertexIter *vd)
|
||||
copy_v3_v3(me->mvert[index].co, newco);
|
||||
}
|
||||
|
||||
static void sculpt_combine_proxies_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void sculpt_combine_proxies_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -3430,8 +3453,9 @@ static void sculpt_combine_proxies(Sculpt *sd, Object *ob)
|
||||
.sd = sd, .ob = ob, .brush = brush, .nodes = nodes,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, sculpt_combine_proxies_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, sculpt_combine_proxies_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
}
|
||||
|
||||
if (nodes)
|
||||
@@ -3457,7 +3481,7 @@ static void sculpt_update_keyblock(Object *ob)
|
||||
}
|
||||
}
|
||||
|
||||
static void sculpt_flush_stroke_deform_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void sculpt_flush_stroke_deform_task_cb(void *userdata, const int n)
|
||||
{
|
||||
SculptThreadedTaskData *data = userdata;
|
||||
SculptSession *ss = data->ob->sculpt;
|
||||
@@ -3510,8 +3534,9 @@ static void sculpt_flush_stroke_deform(Sculpt *sd, Object *ob)
|
||||
.vertCos = vertCos,
|
||||
};
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, &data, NULL, 0, sculpt_flush_stroke_deform_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, &data, sculpt_flush_stroke_deform_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
|
||||
if (vertCos) {
|
||||
sculpt_vertcos_to_key(ob, ss->kb, vertCos);
|
||||
|
||||
@@ -266,7 +266,7 @@ static int sculpt_undo_restore_mask(bContext *C, DerivedMesh *dm, SculptUndoNode
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void sculpt_undo_bmesh_restore_generic_task_cb(void *userdata, void *UNUSED(userdata_chunk), int n)
|
||||
static void sculpt_undo_bmesh_restore_generic_task_cb(void *userdata, const int n)
|
||||
{
|
||||
PBVHNode **nodes = userdata;
|
||||
|
||||
@@ -294,8 +294,9 @@ static void sculpt_undo_bmesh_restore_generic(bContext *C,
|
||||
|
||||
BKE_pbvh_search_gather(ss->pbvh, NULL, NULL, &nodes, &totnode);
|
||||
|
||||
BLI_task_parallel_range_ex(0, totnode, nodes, NULL, 0, sculpt_undo_bmesh_restore_generic_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT), false);
|
||||
BLI_task_parallel_range(
|
||||
0, totnode, nodes, sculpt_undo_bmesh_restore_generic_task_cb,
|
||||
((sd->flags & SCULPT_USE_OPENMP) && totnode > SCULPT_OMP_LIMIT));
|
||||
|
||||
if (nodes)
|
||||
MEM_freeN(nodes);
|
||||
|
||||
@@ -234,7 +234,7 @@ typedef struct MeshdeformUserdata {
|
||||
float (*icagemat)[3];
|
||||
} MeshdeformUserdata;
|
||||
|
||||
static void meshdeform_vert_task(void *userdata, void *UNUSED(userdata_chunck), int iter)
|
||||
static void meshdeform_vert_task(void *userdata, const int iter)
|
||||
{
|
||||
MeshdeformUserdata *data = userdata;
|
||||
/*const*/ MeshDeformModifierData *mmd = data->mmd;
|
||||
@@ -413,7 +413,7 @@ static void meshdeformModifier_do(
|
||||
data.icagemat = icagemat;
|
||||
|
||||
/* Do deformation. */
|
||||
BLI_task_parallel_range(0, totvert, &data, meshdeform_vert_task);
|
||||
BLI_task_parallel_range(0, totvert, &data, meshdeform_vert_task, totvert > 1000);
|
||||
|
||||
/* release cage derivedmesh */
|
||||
MEM_freeN(dco);
|
||||
|
||||
@@ -280,7 +280,7 @@ typedef struct GenerateOceanGeometryData {
|
||||
float ix, iy;
|
||||
} GenerateOceanGeometryData;
|
||||
|
||||
static void generate_ocean_geometry_vertices(void *userdata, void *UNUSED(userdata_chunk), int y)
|
||||
static void generate_ocean_geometry_vertices(void *userdata, const int y)
|
||||
{
|
||||
GenerateOceanGeometryData *gogd = userdata;
|
||||
int x;
|
||||
@@ -294,7 +294,7 @@ static void generate_ocean_geometry_vertices(void *userdata, void *UNUSED(userda
|
||||
}
|
||||
}
|
||||
|
||||
static void generate_ocean_geometry_polygons(void *userdata, void *UNUSED(userdata_chunk), int y)
|
||||
static void generate_ocean_geometry_polygons(void *userdata, const int y)
|
||||
{
|
||||
GenerateOceanGeometryData *gogd = userdata;
|
||||
int x;
|
||||
@@ -324,7 +324,7 @@ static void generate_ocean_geometry_polygons(void *userdata, void *UNUSED(userda
|
||||
}
|
||||
}
|
||||
|
||||
static void generate_ocean_geometry_uvs(void *userdata, void *UNUSED(userdata_chunk), int y)
|
||||
static void generate_ocean_geometry_uvs(void *userdata, const int y)
|
||||
{
|
||||
GenerateOceanGeometryData *gogd = userdata;
|
||||
int x;
|
||||
@@ -360,6 +360,8 @@ static DerivedMesh *generate_ocean_geometry(OceanModifierData *omd)
|
||||
int num_verts;
|
||||
int num_polys;
|
||||
|
||||
const bool use_threading = omd->resolution > 4;
|
||||
|
||||
gogd.rx = omd->resolution * omd->resolution;
|
||||
gogd.ry = omd->resolution * omd->resolution;
|
||||
gogd.res_x = gogd.rx * omd->repeat_x;
|
||||
@@ -385,10 +387,10 @@ static DerivedMesh *generate_ocean_geometry(OceanModifierData *omd)
|
||||
gogd.origindex = CustomData_get_layer(&result->polyData, CD_ORIGINDEX);
|
||||
|
||||
/* create vertices */
|
||||
BLI_task_parallel_range(0, gogd.res_y + 1, &gogd, generate_ocean_geometry_vertices);
|
||||
BLI_task_parallel_range(0, gogd.res_y + 1, &gogd, generate_ocean_geometry_vertices, use_threading);
|
||||
|
||||
/* create faces */
|
||||
BLI_task_parallel_range(0, gogd.res_y, &gogd, generate_ocean_geometry_polygons);
|
||||
BLI_task_parallel_range(0, gogd.res_y, &gogd, generate_ocean_geometry_polygons, use_threading);
|
||||
|
||||
CDDM_calc_edges(result);
|
||||
|
||||
@@ -401,7 +403,7 @@ static DerivedMesh *generate_ocean_geometry(OceanModifierData *omd)
|
||||
gogd.ix = 1.0 / gogd.rx;
|
||||
gogd.iy = 1.0 / gogd.ry;
|
||||
|
||||
BLI_task_parallel_range(0, gogd.res_y, &gogd, generate_ocean_geometry_uvs);
|
||||
BLI_task_parallel_range(0, gogd.res_y, &gogd, generate_ocean_geometry_uvs, use_threading);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -112,7 +112,7 @@ typedef struct UVWarpData {
|
||||
int axis_v;
|
||||
} UVWarpData;
|
||||
|
||||
static void uv_warp_compute(void *userdata, void *UNUSED(userdata_chunk), int i)
|
||||
static void uv_warp_compute(void *userdata, const int i)
|
||||
{
|
||||
const UVWarpData *data = userdata;
|
||||
|
||||
@@ -210,7 +210,7 @@ static DerivedMesh *applyModifier(ModifierData *md, Object *ob,
|
||||
UVWarpData data = {.mpoly = mpoly, .mloop = mloop, .mloopuv = mloopuv,
|
||||
.dvert = dvert, .defgrp_index = defgrp_index,
|
||||
.warp_mat = warp_mat, .axis_u = axis_u, .axis_v = axis_v};
|
||||
BLI_task_parallel_range_ex(0, numPolys, &data, NULL, 0, uv_warp_compute, numPolys > 1000, false);
|
||||
BLI_task_parallel_range(0, numPolys, &data, uv_warp_compute, numPolys > 1000);
|
||||
|
||||
dm->dirty |= DM_DIRTY_TESS_CDLAYERS;
|
||||
|
||||
|
||||
@@ -89,7 +89,7 @@ typedef struct Vert2GeomDataChunk {
|
||||
/**
|
||||
* Callback used by BLI_task 'for loop' helper.
|
||||
*/
|
||||
static void vert2geom_task_cb(void *userdata, void *userdata_chunk, int iter)
|
||||
static void vert2geom_task_cb_ex(void *userdata, void *userdata_chunk, const int iter, const int UNUSED(thread_id))
|
||||
{
|
||||
Vert2GeomData *data = userdata;
|
||||
Vert2GeomDataChunk *data_chunk = userdata_chunk;
|
||||
@@ -177,7 +177,8 @@ static void get_vert2geom_distance(int numVerts, float (*v_cos)[3],
|
||||
data.dist[2] = dist_f;
|
||||
|
||||
BLI_task_parallel_range_ex(
|
||||
0, numVerts, &data, &data_chunk, sizeof(data_chunk), vert2geom_task_cb, numVerts > 10000, false);
|
||||
0, numVerts, &data, &data_chunk, sizeof(data_chunk), vert2geom_task_cb_ex,
|
||||
numVerts > 10000, false);
|
||||
|
||||
if (dist_v)
|
||||
free_bvhtree_from_mesh(&treeData_v);
|
||||
|
||||
Reference in New Issue
Block a user