Cycles: make TBB a required library dependency, and use in a few places

Now that the rest of Blender also relies on TBB, no point in maintaining custom
code for paraller_for and thread local storage.
This commit is contained in:
2020-06-05 12:53:38 +02:00
parent ace3268482
commit d8c2092b15
10 changed files with 58 additions and 138 deletions

View File

@@ -654,6 +654,7 @@ if(WITH_BOOST AND NOT (WITH_CYCLES OR WITH_OPENIMAGEIO OR WITH_INTERNATIONAL OR
set(WITH_BOOST OFF) set(WITH_BOOST OFF)
endif() endif()
set_and_warn_dependency(WITH_TBB WITH_CYCLES OFF)
set_and_warn_dependency(WITH_TBB WITH_USD OFF) set_and_warn_dependency(WITH_TBB WITH_USD OFF)
set_and_warn_dependency(WITH_TBB WITH_OPENIMAGEDENOISE OFF) set_and_warn_dependency(WITH_TBB WITH_OPENIMAGEDENOISE OFF)
set_and_warn_dependency(WITH_TBB WITH_OPENVDB OFF) set_and_warn_dependency(WITH_TBB WITH_OPENVDB OFF)

View File

@@ -286,6 +286,7 @@ include_directories(
${OPENEXR_INCLUDE_DIR} ${OPENEXR_INCLUDE_DIR}
${OPENEXR_INCLUDE_DIRS} ${OPENEXR_INCLUDE_DIRS}
${PUGIXML_INCLUDE_DIR} ${PUGIXML_INCLUDE_DIR}
${TBB_INCLUDE_DIRS}
) )
if(CYCLES_STANDALONE_REPOSITORY) if(CYCLES_STANDALONE_REPOSITORY)

View File

@@ -423,22 +423,6 @@ BVHNode *BVHBuild::run()
} }
spatial_min_overlap = root.bounds().safe_area() * params.spatial_split_alpha; spatial_min_overlap = root.bounds().safe_area() * params.spatial_split_alpha;
if (params.use_spatial_split) {
/* NOTE: The API here tries to be as much ready for multi-threaded build
* as possible, but at the same time it tries not to introduce any
* changes in behavior for until all refactoring needed for threading is
* finished.
*
* So we currently allocate single storage for now, which is only used by
* the only thread working on the spatial BVH build.
*/
spatial_storage.resize(TaskScheduler::num_threads() + 1);
size_t num_bins = max(root.size(), (int)BVHParams::NUM_SPATIAL_BINS) - 1;
foreach (BVHSpatialStorage &storage, spatial_storage) {
storage.right_bounds.clear();
}
spatial_storage[0].right_bounds.resize(num_bins);
}
spatial_free_index = 0; spatial_free_index = 0;
need_prim_time = params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0; need_prim_time = params.num_motion_curve_steps > 0 || params.num_motion_triangle_steps > 0;
@@ -475,6 +459,9 @@ BVHNode *BVHBuild::run()
task_pool.wait_work(); task_pool.wait_work();
} }
/* clean up temporary memory usage by threads */
spatial_storage.clear();
/* delete if we canceled */ /* delete if we canceled */
if (rootnode) { if (rootnode) {
if (progress.get_cancel()) { if (progress.get_cancel()) {
@@ -551,19 +538,18 @@ void BVHBuild::thread_build_node(InnerNode *inner, int child, BVHObjectBinning *
} }
} }
void BVHBuild::thread_build_spatial_split_node(InnerNode *inner, void BVHBuild::thread_build_spatial_split_node(
int child, InnerNode *inner, int child, BVHRange *range, vector<BVHReference> *references, int level)
BVHRange *range,
vector<BVHReference> *references,
int level,
int thread_id)
{ {
if (progress.get_cancel()) { if (progress.get_cancel()) {
return; return;
} }
/* Get per-thread memory for spatial split. */
BVHSpatialStorage *local_storage = &spatial_storage.local();
/* build nodes */ /* build nodes */
BVHNode *node = build_node(*range, references, level, thread_id); BVHNode *node = build_node(*range, references, level, local_storage);
/* set child in inner node */ /* set child in inner node */
inner->children[child] = node; inner->children[child] = node;
@@ -690,7 +676,7 @@ BVHNode *BVHBuild::build_node(const BVHObjectBinning &range, int level)
BVHNode *BVHBuild::build_node(const BVHRange &range, BVHNode *BVHBuild::build_node(const BVHRange &range,
vector<BVHReference> *references, vector<BVHReference> *references,
int level, int level,
int thread_id) BVHSpatialStorage *storage)
{ {
/* Update progress. /* Update progress.
* *
@@ -712,7 +698,6 @@ BVHNode *BVHBuild::build_node(const BVHRange &range,
} }
/* Perform splitting test. */ /* Perform splitting test. */
BVHSpatialStorage *storage = &spatial_storage[thread_id];
BVHMixedSplit split(this, storage, range, references, level); BVHMixedSplit split(this, storage, range, references, level);
if (!(range.size() > 0 && params.top_level && level == 0)) { if (!(range.size() > 0 && params.top_level && level == 0)) {

View File

@@ -76,7 +76,7 @@ class BVHBuild {
BVHNode *build_node(const BVHRange &range, BVHNode *build_node(const BVHRange &range,
vector<BVHReference> *references, vector<BVHReference> *references,
int level, int level,
int thread_id); BVHSpatialStorage *storage);
BVHNode *build_node(const BVHObjectBinning &range, int level); BVHNode *build_node(const BVHObjectBinning &range, int level);
BVHNode *create_leaf_node(const BVHRange &range, const vector<BVHReference> &references); BVHNode *create_leaf_node(const BVHRange &range, const vector<BVHReference> &references);
BVHNode *create_object_leaf_nodes(const BVHReference *ref, int start, int num); BVHNode *create_object_leaf_nodes(const BVHReference *ref, int start, int num);
@@ -87,12 +87,8 @@ class BVHBuild {
/* Threads. */ /* Threads. */
enum { THREAD_TASK_SIZE = 4096 }; enum { THREAD_TASK_SIZE = 4096 };
void thread_build_node(InnerNode *node, int child, BVHObjectBinning *range, int level); void thread_build_node(InnerNode *node, int child, BVHObjectBinning *range, int level);
void thread_build_spatial_split_node(InnerNode *node, void thread_build_spatial_split_node(
int child, InnerNode *node, int child, BVHRange *range, vector<BVHReference> *references, int level);
BVHRange *range,
vector<BVHReference> *references,
int level,
int thread_id);
thread_mutex build_mutex; thread_mutex build_mutex;
/* Progress. */ /* Progress. */
@@ -127,7 +123,7 @@ class BVHBuild {
/* Spatial splitting. */ /* Spatial splitting. */
float spatial_min_overlap; float spatial_min_overlap;
vector<BVHSpatialStorage> spatial_storage; enumerable_thread_specific<BVHSpatialStorage> spatial_storage;
size_t spatial_free_index; size_t spatial_free_index;
thread_spin_lock spatial_spin_lock; thread_spin_lock spatial_spin_lock;

View File

@@ -20,6 +20,7 @@
#include "util/util_logging.h" #include "util/util_logging.h"
#include "util/util_path.h" #include "util/util_path.h"
#include "util/util_sky_model.h" #include "util/util_sky_model.h"
#include "util/util_task.h"
CCL_NAMESPACE_BEGIN CCL_NAMESPACE_BEGIN
@@ -58,26 +59,21 @@ bool SkyLoader::load_pixels(const ImageMetaData &metadata,
float altitude_f = (float)altitude; float altitude_f = (float)altitude;
/* precompute sky texture */ /* precompute sky texture */
const int num_chunks = TaskScheduler::num_threads(); const int rows_per_task = divide_up(1024, width);
const int chunk_size = height / num_chunks; parallel_for(blocked_range<size_t>(0, height, rows_per_task),
TaskPool pool; [&](const blocked_range<size_t> &r) {
for (int chunk = 0; chunk < num_chunks; chunk++) { nishita_skymodel_precompute_texture(pixel_data,
const int chunk_start = chunk * chunk_size; metadata.channels,
const int chunk_end = (chunk + 1 < num_chunks) ? (chunk + 1) * chunk_size : height; r.begin(),
pool.push(function_bind(&nishita_skymodel_precompute_texture, r.end(),
pixel_data, width,
metadata.channels, height,
chunk_start, sun_elevation,
chunk_end, altitude_f,
width, air_density,
height, dust_density,
sun_elevation, ozone_density);
altitude_f, });
air_density,
dust_density,
ozone_density));
}
pool.wait_work();
return true; return true;
} }

View File

@@ -680,29 +680,13 @@ void LightManager::device_update_background(Device *device,
float2 *cond_cdf = dscene->light_background_conditional_cdf.alloc(cdf_width * res.y); float2 *cond_cdf = dscene->light_background_conditional_cdf.alloc(cdf_width * res.y);
double time_start = time_dt(); double time_start = time_dt();
if (max(res.x, res.y) < 512) {
/* Small enough resolution, faster to do single-threaded. */ /* Create CDF in parallel. */
background_cdf(0, res.y, res.x, res.y, &pixels, cond_cdf); const int rows_per_task = divide_up(10240, res.x);
} parallel_for(blocked_range<size_t>(0, res.y, rows_per_task),
else { [&](const blocked_range<size_t> &r) {
/* Threaded evaluation for large resolution. */ background_cdf(r.begin(), r.end(), res.x, res.y, &pixels, cond_cdf);
const int num_blocks = TaskScheduler::num_threads(); });
const int chunk_size = res.y / num_blocks;
int start_row = 0;
TaskPool pool;
for (int i = 0; i < num_blocks; ++i) {
const int current_chunk_size = (i != num_blocks - 1) ? chunk_size : (res.y - i * chunk_size);
pool.push(function_bind(&background_cdf,
start_row,
start_row + current_chunk_size,
res.x,
res.y,
&pixels,
cond_cdf));
start_row += current_chunk_size;
}
pool.wait_work();
}
/* marginal CDFs (column, V direction, sum of rows) */ /* marginal CDFs (column, V direction, sum of rows) */
marg_cdf[0].x = cond_cdf[res.x].x; marg_cdf[0].x = cond_cdf[res.x].x;

View File

@@ -78,7 +78,6 @@ struct UpdateObjectTransformState {
Scene *scene; Scene *scene;
/* Some locks to keep everything thread-safe. */ /* Some locks to keep everything thread-safe. */
thread_spin_lock queue_lock;
thread_spin_lock surface_area_lock; thread_spin_lock surface_area_lock;
/* First unused object index in the queue. */ /* First unused object index in the queue. */
@@ -551,41 +550,6 @@ void ObjectManager::device_update_object_transform(UpdateObjectTransformState *s
} }
} }
bool ObjectManager::device_update_object_transform_pop_work(UpdateObjectTransformState *state,
int *start_index,
int *num_objects)
{
/* Tweakable parameter, number of objects per chunk.
* Too small value will cause some extra overhead due to spin lock,
* too big value might not use all threads nicely.
*/
static const int OBJECTS_PER_TASK = 32;
bool have_work = false;
state->queue_lock.lock();
int num_scene_objects = state->scene->objects.size();
if (state->queue_start_object < num_scene_objects) {
int count = min(OBJECTS_PER_TASK, num_scene_objects - state->queue_start_object);
*start_index = state->queue_start_object;
*num_objects = count;
state->queue_start_object += count;
have_work = true;
}
state->queue_lock.unlock();
return have_work;
}
void ObjectManager::device_update_object_transform_task(UpdateObjectTransformState *state)
{
int start_index, num_objects;
while (device_update_object_transform_pop_work(state, &start_index, &num_objects)) {
for (int i = 0; i < num_objects; ++i) {
const int object_index = start_index + i;
Object *ob = state->scene->objects[object_index];
device_update_object_transform(state, ob);
}
}
}
void ObjectManager::device_update_transforms(DeviceScene *dscene, Scene *scene, Progress &progress) void ObjectManager::device_update_transforms(DeviceScene *dscene, Scene *scene, Progress &progress)
{ {
UpdateObjectTransformState state; UpdateObjectTransformState state;
@@ -631,29 +595,16 @@ void ObjectManager::device_update_transforms(DeviceScene *dscene, Scene *scene,
numparticles += psys->particles.size(); numparticles += psys->particles.size();
} }
/* NOTE: If it's just a handful of objects we deal with them in a single /* Parallel object update, with grain size to avoid too much threadng overhead
* thread to avoid threading overhead. However, this threshold is might * for individual objects. */
* need some tweaks to make mid-complex scenes optimal. static const int OBJECTS_PER_TASK = 32;
*/ parallel_for(blocked_range<size_t>(0, scene->objects.size(), OBJECTS_PER_TASK),
if (scene->objects.size() < 64) { [&](const blocked_range<size_t> &r) {
foreach (Object *ob, scene->objects) { for (size_t i = r.begin(); i != r.end(); i++) {
device_update_object_transform(&state, ob); Object *ob = state.scene->objects[i];
if (progress.get_cancel()) { device_update_object_transform(&state, ob);
return; }
} });
}
}
else {
const int num_threads = TaskScheduler::num_threads();
TaskPool pool;
for (int i = 0; i < num_threads; ++i) {
pool.push(function_bind(&ObjectManager::device_update_object_transform_task, this, &state));
}
pool.wait_work();
if (progress.get_cancel()) {
return;
}
}
dscene->objects.copy_to_device(); dscene->objects.copy_to_device();
if (state.need_motion == Scene::MOTION_PASS) { if (state.need_motion == Scene::MOTION_PASS) {

View File

@@ -94,8 +94,7 @@ void SVMShaderManager::device_update(Device *device,
scene, scene,
scene->shaders[i], scene->shaders[i],
&progress, &progress,
&shader_svm_nodes[i]), &shader_svm_nodes[i]));
false);
} }
task_pool.wait_work(); task_pool.wait_work();

View File

@@ -29,7 +29,7 @@ set(SRC
) )
set(LIB set(LIB
${TBB_LIBRARIES}
) )
if(WITH_CYCLES_STANDALONE) if(WITH_CYCLES_STANDALONE)

View File

@@ -22,8 +22,15 @@
#include "util/util_thread.h" #include "util/util_thread.h"
#include "util/util_vector.h" #include "util/util_vector.h"
#define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
#include <tbb/tbb.h>
CCL_NAMESPACE_BEGIN CCL_NAMESPACE_BEGIN
using tbb::blocked_range;
using tbb::enumerable_thread_specific;
using tbb::parallel_for;
class Task; class Task;
class TaskPool; class TaskPool;
class TaskScheduler; class TaskScheduler;