2020-04-09 15:51:44 +02:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/** \file
|
|
|
|
* \ingroup bli
|
|
|
|
*
|
2020-04-30 07:59:23 +02:00
|
|
|
* Task pool to run tasks in parallel.
|
2020-04-09 15:51:44 +02:00
|
|
|
*/
|
|
|
|
|
2020-12-04 11:28:09 +01:00
|
|
|
#include <cstdlib>
|
2020-04-30 07:59:23 +02:00
|
|
|
#include <memory>
|
2021-06-08 09:37:45 +02:00
|
|
|
#include <thread>
|
2020-04-30 07:59:23 +02:00
|
|
|
#include <utility>
|
2020-04-09 15:51:44 +02:00
|
|
|
|
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
|
|
|
#include "DNA_listBase.h"
|
|
|
|
|
|
|
|
#include "BLI_math.h"
|
|
|
|
#include "BLI_mempool.h"
|
|
|
|
#include "BLI_task.h"
|
|
|
|
#include "BLI_threads.h"
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
2021-02-10 18:17:23 +01:00
|
|
|
# include <tbb/blocked_range.h>
|
|
|
|
# include <tbb/task_arena.h>
|
|
|
|
# include <tbb/task_group.h>
|
2020-04-09 15:51:44 +02:00
|
|
|
#endif
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Task
|
|
|
|
*
|
|
|
|
* Unit of work to execute. This is a C++ class to work with TBB. */
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
class Task {
|
|
|
|
public:
|
|
|
|
TaskPool *pool;
|
2020-04-09 15:51:44 +02:00
|
|
|
TaskRunFunction run;
|
|
|
|
void *taskdata;
|
|
|
|
bool free_taskdata;
|
|
|
|
TaskFreeFunction freedata;
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
Task(TaskPool *pool,
|
|
|
|
TaskRunFunction run,
|
|
|
|
void *taskdata,
|
|
|
|
bool free_taskdata,
|
|
|
|
TaskFreeFunction freedata)
|
|
|
|
: pool(pool), run(run), taskdata(taskdata), free_taskdata(free_taskdata), freedata(freedata)
|
|
|
|
{
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
~Task()
|
|
|
|
{
|
|
|
|
if (free_taskdata) {
|
|
|
|
if (freedata) {
|
|
|
|
freedata(pool, taskdata);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
MEM_freeN(taskdata);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-05-04 23:59:58 +02:00
|
|
|
/* Move constructor.
|
|
|
|
* For performance, ensure we never copy the task and only move it.
|
|
|
|
* For TBB version 2017 and earlier we apply a workaround to make up for
|
|
|
|
* the lack of move constructor support. */
|
2020-04-30 07:59:23 +02:00
|
|
|
Task(Task &&other)
|
|
|
|
: pool(other.pool),
|
|
|
|
run(other.run),
|
|
|
|
taskdata(other.taskdata),
|
|
|
|
free_taskdata(other.free_taskdata),
|
|
|
|
freedata(other.freedata)
|
|
|
|
{
|
2020-11-06 17:49:09 +01:00
|
|
|
other.pool = nullptr;
|
|
|
|
other.run = nullptr;
|
|
|
|
other.taskdata = nullptr;
|
2020-04-30 07:59:23 +02:00
|
|
|
other.free_taskdata = false;
|
2020-11-06 17:49:09 +01:00
|
|
|
other.freedata = nullptr;
|
2020-04-30 07:59:23 +02:00
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-05-04 23:59:58 +02:00
|
|
|
#if defined(WITH_TBB) && TBB_INTERFACE_VERSION_MAJOR < 10
|
|
|
|
Task(const Task &other)
|
|
|
|
: pool(other.pool),
|
|
|
|
run(other.run),
|
|
|
|
taskdata(other.taskdata),
|
|
|
|
free_taskdata(other.free_taskdata),
|
|
|
|
freedata(other.freedata)
|
2020-04-30 07:59:23 +02:00
|
|
|
{
|
2020-05-04 23:59:58 +02:00
|
|
|
((Task &)other).pool = NULL;
|
|
|
|
((Task &)other).run = NULL;
|
|
|
|
((Task &)other).taskdata = NULL;
|
|
|
|
((Task &)other).free_taskdata = false;
|
|
|
|
((Task &)other).freedata = NULL;
|
2020-04-30 07:59:23 +02:00
|
|
|
}
|
2020-05-04 23:59:58 +02:00
|
|
|
#else
|
2020-04-30 07:59:23 +02:00
|
|
|
Task(const Task &other) = delete;
|
2020-05-04 23:59:58 +02:00
|
|
|
#endif
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
Task &operator=(const Task &other) = delete;
|
|
|
|
Task &operator=(Task &&other) = delete;
|
2020-05-04 23:59:58 +02:00
|
|
|
|
2021-06-08 09:37:45 +02:00
|
|
|
void operator()() const;
|
2020-04-30 07:59:23 +02:00
|
|
|
};
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* TBB Task Group.
|
|
|
|
*
|
|
|
|
* Subclass since there seems to be no other way to set priority. */
|
|
|
|
|
|
|
|
#ifdef WITH_TBB
|
|
|
|
class TBBTaskGroup : public tbb::task_group {
|
|
|
|
public:
|
|
|
|
TBBTaskGroup(TaskPriority priority)
|
|
|
|
{
|
2021-01-22 18:14:04 +01:00
|
|
|
# if TBB_INTERFACE_VERSION_MAJOR >= 12
|
|
|
|
/* TODO: support priorities in TBB 2021, where they are only available as
|
|
|
|
* part of task arenas, no longer for task groups. Or remove support for
|
|
|
|
* task priorities if they are no longer useful. */
|
|
|
|
UNUSED_VARS(priority);
|
|
|
|
# else
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (priority) {
|
|
|
|
case TASK_PRIORITY_LOW:
|
|
|
|
my_context.set_priority(tbb::priority_low);
|
|
|
|
break;
|
|
|
|
case TASK_PRIORITY_HIGH:
|
|
|
|
my_context.set_priority(tbb::priority_normal);
|
|
|
|
break;
|
|
|
|
}
|
2021-01-22 18:14:04 +01:00
|
|
|
# endif
|
2020-04-30 07:59:23 +02:00
|
|
|
}
|
|
|
|
};
|
2020-04-09 15:51:44 +02:00
|
|
|
#endif
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Task Pool */
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-12-09 16:29:11 +01:00
|
|
|
enum TaskPoolType {
|
2020-04-30 07:59:23 +02:00
|
|
|
TASK_POOL_TBB,
|
|
|
|
TASK_POOL_TBB_SUSPENDED,
|
|
|
|
TASK_POOL_NO_THREADS,
|
|
|
|
TASK_POOL_BACKGROUND,
|
|
|
|
TASK_POOL_BACKGROUND_SERIAL,
|
2020-12-09 16:29:11 +01:00
|
|
|
};
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
struct TaskPool {
|
|
|
|
TaskPoolType type;
|
|
|
|
bool use_threads;
|
2021-06-08 09:37:45 +02:00
|
|
|
TaskIsolation task_isolation;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
ThreadMutex user_mutex;
|
|
|
|
void *userdata;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
2021-06-08 09:37:45 +02:00
|
|
|
/* TBB task pool. */
|
2020-04-30 07:59:23 +02:00
|
|
|
TBBTaskGroup tbb_group;
|
2021-06-08 09:37:45 +02:00
|
|
|
/* This is used to detect a common way to accidentally create a deadlock with task isolation. */
|
|
|
|
std::thread::id task_pool_create_thread_id;
|
2020-04-30 07:59:23 +02:00
|
|
|
#endif
|
|
|
|
volatile bool is_suspended;
|
|
|
|
BLI_mempool *suspended_mempool;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Background task pool. */
|
|
|
|
ListBase background_threads;
|
|
|
|
ThreadQueue *background_queue;
|
|
|
|
volatile bool background_is_canceling;
|
2020-04-09 15:51:44 +02:00
|
|
|
};
|
|
|
|
|
2021-06-08 09:37:45 +02:00
|
|
|
/* Execute task. */
|
|
|
|
void Task::operator()() const
|
|
|
|
{
|
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->task_isolation == TASK_ISOLATION_ON) {
|
|
|
|
tbb::this_task_arena::isolate([this] { run(pool, taskdata); });
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
run(pool, taskdata);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void assert_on_valid_thread(TaskPool *pool)
|
|
|
|
{
|
|
|
|
/* TODO: Remove this `return` to enable the check. */
|
|
|
|
return;
|
|
|
|
#ifdef DEBUG
|
|
|
|
# ifdef WITH_TBB
|
|
|
|
if (pool->task_isolation == TASK_ISOLATION_ON) {
|
|
|
|
const std::thread::id current_id = std::this_thread::get_id();
|
|
|
|
/* This task pool is modified from different threads. To avoid deadlocks, `TASK_ISOLATION_OFF`
|
|
|
|
* has to be used. Task isolation can still be used in a more fine-grained way within the
|
|
|
|
* tasks, but should not be enabled for the entire task pool. */
|
|
|
|
BLI_assert(pool->task_pool_create_thread_id == current_id);
|
|
|
|
}
|
|
|
|
# endif
|
|
|
|
#endif
|
|
|
|
UNUSED_VARS_NDEBUG(pool);
|
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* TBB Task Pool.
|
|
|
|
*
|
|
|
|
* Task pool using the TBB scheduler for tasks. When building without TBB
|
|
|
|
* support or running Blender with -t 1, this reverts to single threaded.
|
|
|
|
*
|
|
|
|
* Tasks may be suspended until in all are created, to make it possible to
|
|
|
|
* initialize data structures and create tasks in a single pass. */
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_create(TaskPool *pool, TaskPriority priority)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
if (pool->type == TASK_POOL_TBB_SUSPENDED) {
|
|
|
|
pool->is_suspended = true;
|
|
|
|
pool->suspended_mempool = BLI_mempool_create(sizeof(Task), 512, 512, BLI_MEMPOOL_ALLOW_ITER);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
|
|
|
new (&pool->tbb_group) TBBTaskGroup(priority);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-04-30 17:27:55 +10:00
|
|
|
#else
|
|
|
|
UNUSED_VARS(priority);
|
2020-04-09 15:51:44 +02:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_run(TaskPool *pool, Task &&task)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
if (pool->is_suspended) {
|
|
|
|
/* Suspended task that will be executed in work_and_wait(). */
|
|
|
|
Task *task_mem = (Task *)BLI_mempool_alloc(pool->suspended_mempool);
|
|
|
|
new (task_mem) Task(std::move(task));
|
|
|
|
#ifdef __GNUC__
|
|
|
|
/* Work around apparent compiler bug where task is not properly copied
|
|
|
|
* to task_mem. This appears unrelated to the use of placement new or
|
|
|
|
* move semantics, happens even writing to a plain C struct. Rather the
|
|
|
|
* call into TBB seems to have some indirect effect. */
|
|
|
|
std::atomic_thread_fence(std::memory_order_release);
|
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
else if (pool->use_threads) {
|
|
|
|
/* Execute in TBB task group. */
|
|
|
|
pool->tbb_group.run(std::move(task));
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
#endif
|
2020-04-30 07:59:23 +02:00
|
|
|
else {
|
|
|
|
/* Execute immediately. */
|
|
|
|
task();
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_work_and_wait(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Start any suspended task now. */
|
|
|
|
if (pool->suspended_mempool) {
|
|
|
|
pool->is_suspended = false;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
BLI_mempool_iter iter;
|
|
|
|
BLI_mempool_iternew(pool->suspended_mempool, &iter);
|
|
|
|
while (Task *task = (Task *)BLI_mempool_iterstep(&iter)) {
|
|
|
|
tbb_task_pool_run(pool, std::move(*task));
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
BLI_mempool_clear(pool->suspended_mempool);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
|
|
|
/* This is called wait(), but internally it can actually do work. This
|
|
|
|
* matters because we don't want recursive usage of task pools to run
|
|
|
|
* out of threads and get stuck. */
|
|
|
|
pool->tbb_group.wait();
|
|
|
|
}
|
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_cancel(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
|
|
|
pool->tbb_group.cancel();
|
|
|
|
pool->tbb_group.wait();
|
|
|
|
}
|
2020-04-30 17:27:55 +10:00
|
|
|
#else
|
|
|
|
UNUSED_VARS(pool);
|
2020-04-30 07:59:23 +02:00
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static bool tbb_task_pool_canceled(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
2021-01-15 17:21:14 +01:00
|
|
|
return tbb::is_current_task_group_canceling();
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-04-30 17:27:55 +10:00
|
|
|
#else
|
|
|
|
UNUSED_VARS(pool);
|
2020-04-30 07:59:23 +02:00
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
return false;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_free(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
|
|
|
pool->tbb_group.~TBBTaskGroup();
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-04-30 07:59:23 +02:00
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
if (pool->suspended_mempool) {
|
|
|
|
BLI_mempool_destroy(pool->suspended_mempool);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-04-30 07:59:23 +02:00
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Background Task Pool.
|
|
|
|
*
|
|
|
|
* Fallback for running background tasks when building without TBB. */
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void *background_task_run(void *userdata)
|
|
|
|
{
|
|
|
|
TaskPool *pool = (TaskPool *)userdata;
|
|
|
|
while (Task *task = (Task *)BLI_thread_queue_pop(pool->background_queue)) {
|
|
|
|
(*task)();
|
|
|
|
task->~Task();
|
|
|
|
MEM_freeN(task);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-11-06 17:49:09 +01:00
|
|
|
return nullptr;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_create(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
pool->background_queue = BLI_thread_queue_init();
|
|
|
|
BLI_threadpool_init(&pool->background_threads, background_task_run, 1);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_run(TaskPool *pool, Task &&task)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
Task *task_mem = (Task *)MEM_mallocN(sizeof(Task), __func__);
|
|
|
|
new (task_mem) Task(std::move(task));
|
|
|
|
BLI_thread_queue_push(pool->background_queue, task_mem);
|
2020-05-09 17:01:40 +02:00
|
|
|
|
|
|
|
if (BLI_available_threads(&pool->background_threads)) {
|
|
|
|
BLI_threadpool_insert(&pool->background_threads, pool);
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_work_and_wait(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Signal background thread to stop waiting for new tasks if none are
|
|
|
|
* left, and wait for tasks and thread to finish. */
|
|
|
|
BLI_thread_queue_nowait(pool->background_queue);
|
|
|
|
BLI_thread_queue_wait_finish(pool->background_queue);
|
2020-05-09 17:01:40 +02:00
|
|
|
BLI_threadpool_clear(&pool->background_threads);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_cancel(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
pool->background_is_canceling = true;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Remove tasks not yet started by background thread. */
|
|
|
|
BLI_thread_queue_nowait(pool->background_queue);
|
|
|
|
while (Task *task = (Task *)BLI_thread_queue_pop(pool->background_queue)) {
|
|
|
|
task->~Task();
|
|
|
|
MEM_freeN(task);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Let background thread finish or cancel task it is working on. */
|
|
|
|
BLI_threadpool_remove(&pool->background_threads, pool);
|
|
|
|
pool->background_is_canceling = false;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static bool background_task_pool_canceled(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
return pool->background_is_canceling;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_free(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
background_task_pool_work_and_wait(pool);
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
BLI_threadpool_end(&pool->background_threads);
|
|
|
|
BLI_thread_queue_free(pool->background_queue);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Task Pool */
|
|
|
|
|
2021-06-08 09:37:45 +02:00
|
|
|
static TaskPool *task_pool_create_ex(void *userdata,
|
|
|
|
TaskPoolType type,
|
|
|
|
TaskPriority priority,
|
|
|
|
TaskIsolation task_isolation)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
const bool use_threads = BLI_task_scheduler_num_threads() > 1 && type != TASK_POOL_NO_THREADS;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Background task pool uses regular TBB scheduling if available. Only when
|
|
|
|
* building without TBB or running with -t 1 do we need to ensure these tasks
|
|
|
|
* do not block the main thread. */
|
|
|
|
if (type == TASK_POOL_BACKGROUND && use_threads) {
|
|
|
|
type = TASK_POOL_TBB;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Allocate task pool. */
|
|
|
|
TaskPool *pool = (TaskPool *)MEM_callocN(sizeof(TaskPool), "TaskPool");
|
|
|
|
|
|
|
|
pool->type = type;
|
|
|
|
pool->use_threads = use_threads;
|
2021-06-08 09:37:45 +02:00
|
|
|
pool->task_isolation = task_isolation;
|
|
|
|
|
|
|
|
#ifdef WITH_TBB
|
|
|
|
pool->task_pool_create_thread_id = std::this_thread::get_id();
|
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
|
|
|
|
pool->userdata = userdata;
|
|
|
|
BLI_mutex_init(&pool->user_mutex);
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_create(pool, priority);
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_create(pool);
|
|
|
|
break;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a normal task pool. Tasks will be executed as soon as they are added.
|
|
|
|
*/
|
2021-06-08 09:37:45 +02:00
|
|
|
TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority, TaskIsolation task_isolation)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2021-06-08 09:37:45 +02:00
|
|
|
return task_pool_create_ex(userdata, TASK_POOL_TBB, priority, task_isolation);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a background task pool.
|
|
|
|
* In multi-threaded context, there is no differences with #BLI_task_pool_create(),
|
|
|
|
* but in single-threaded case it is ensured to have at least one worker thread to run on
|
|
|
|
* (i.e. you don't have to call #BLI_task_pool_work_and_wait
|
|
|
|
* on it to be sure it will be processed).
|
|
|
|
*
|
|
|
|
* \note Background pools are non-recursive
|
|
|
|
* (that is, you should not create other background pools in tasks assigned to a background pool,
|
|
|
|
* they could end never being executed, since the 'fallback' background thread is already
|
|
|
|
* busy with parent task in single-threaded context).
|
|
|
|
*/
|
2021-06-08 09:37:45 +02:00
|
|
|
TaskPool *BLI_task_pool_create_background(void *userdata,
|
|
|
|
TaskPriority priority,
|
|
|
|
TaskIsolation task_isolation)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2021-06-08 09:37:45 +02:00
|
|
|
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND, priority, task_isolation);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Similar to BLI_task_pool_create() but does not schedule any tasks for execution
|
|
|
|
* for until BLI_task_pool_work_and_wait() is called. This helps reducing threading
|
|
|
|
* overhead when pushing huge amount of small initial tasks from the main thread.
|
|
|
|
*/
|
2021-06-08 09:37:45 +02:00
|
|
|
TaskPool *BLI_task_pool_create_suspended(void *userdata,
|
|
|
|
TaskPriority priority,
|
|
|
|
TaskIsolation task_isolation)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2021-06-08 09:37:45 +02:00
|
|
|
return task_pool_create_ex(userdata, TASK_POOL_TBB_SUSPENDED, priority, task_isolation);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/**
|
|
|
|
* Single threaded task pool that executes pushed task immediately, for
|
|
|
|
* debugging purposes.
|
|
|
|
*/
|
|
|
|
TaskPool *BLI_task_pool_create_no_threads(void *userdata)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2021-06-08 09:37:45 +02:00
|
|
|
return task_pool_create_ex(
|
|
|
|
userdata, TASK_POOL_NO_THREADS, TASK_PRIORITY_HIGH, TASK_ISOLATION_ON);
|
2020-04-30 07:59:23 +02:00
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/**
|
2020-05-09 17:15:25 +10:00
|
|
|
* Task pool that executes one task after the other, possibly on different threads
|
2020-04-30 07:59:23 +02:00
|
|
|
* but never in parallel.
|
|
|
|
*/
|
|
|
|
TaskPool *BLI_task_pool_create_background_serial(void *userdata, TaskPriority priority)
|
|
|
|
{
|
2021-06-08 09:37:45 +02:00
|
|
|
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND_SERIAL, priority, TASK_ISOLATION_ON);
|
2020-04-30 07:59:23 +02:00
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
void BLI_task_pool_free(TaskPool *pool)
|
|
|
|
{
|
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_free(pool);
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_free(pool);
|
|
|
|
break;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
BLI_mutex_end(&pool->user_mutex);
|
2020-04-09 15:51:44 +02:00
|
|
|
|
|
|
|
MEM_freeN(pool);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BLI_task_pool_push(TaskPool *pool,
|
|
|
|
TaskRunFunction run,
|
|
|
|
void *taskdata,
|
|
|
|
bool free_taskdata,
|
|
|
|
TaskFreeFunction freedata)
|
|
|
|
{
|
2021-06-08 09:37:45 +02:00
|
|
|
assert_on_valid_thread(pool);
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
Task task(pool, run, taskdata, free_taskdata, freedata);
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_run(pool, std::move(task));
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_run(pool, std::move(task));
|
|
|
|
break;
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void BLI_task_pool_work_and_wait(TaskPool *pool)
|
|
|
|
{
|
2021-06-08 09:37:45 +02:00
|
|
|
assert_on_valid_thread(pool);
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_work_and_wait(pool);
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_work_and_wait(pool);
|
2020-04-09 15:51:44 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BLI_task_pool_cancel(TaskPool *pool)
|
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_cancel(pool);
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_cancel(pool);
|
|
|
|
break;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-01-15 17:21:14 +01:00
|
|
|
bool BLI_task_pool_current_canceled(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
return tbb_task_pool_canceled(pool);
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
return background_task_pool_canceled(pool);
|
|
|
|
}
|
|
|
|
BLI_assert("BLI_task_pool_canceled: Control flow should not come here!");
|
|
|
|
return false;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-21 15:36:35 +02:00
|
|
|
void *BLI_task_pool_user_data(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
|
|
|
return pool->userdata;
|
|
|
|
}
|
|
|
|
|
|
|
|
ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool)
|
|
|
|
{
|
|
|
|
return &pool->user_mutex;
|
2020-04-30 17:27:55 +10:00
|
|
|
}
|