2020-04-09 15:51:44 +02:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/** \file
|
|
|
|
* \ingroup bli
|
|
|
|
*
|
2020-04-30 07:59:23 +02:00
|
|
|
* Task pool to run tasks in parallel.
|
2020-04-09 15:51:44 +02:00
|
|
|
*/
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
#include <memory>
|
2020-04-09 15:51:44 +02:00
|
|
|
#include <stdlib.h>
|
2020-04-30 07:59:23 +02:00
|
|
|
#include <utility>
|
2020-04-09 15:51:44 +02:00
|
|
|
|
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
|
|
|
#include "DNA_listBase.h"
|
|
|
|
|
|
|
|
#include "BLI_math.h"
|
|
|
|
#include "BLI_mempool.h"
|
|
|
|
#include "BLI_task.h"
|
|
|
|
#include "BLI_threads.h"
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
/* Quiet top level deprecation message, unrelated to API usage here. */
|
|
|
|
# define TBB_SUPPRESS_DEPRECATED_MESSAGES 1
|
|
|
|
# include <tbb/tbb.h>
|
2020-04-09 15:51:44 +02:00
|
|
|
#endif
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Task
|
|
|
|
*
|
|
|
|
* Unit of work to execute. This is a C++ class to work with TBB. */
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
class Task {
|
|
|
|
public:
|
|
|
|
TaskPool *pool;
|
2020-04-09 15:51:44 +02:00
|
|
|
TaskRunFunction run;
|
|
|
|
void *taskdata;
|
|
|
|
bool free_taskdata;
|
|
|
|
TaskFreeFunction freedata;
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
Task(TaskPool *pool,
|
|
|
|
TaskRunFunction run,
|
|
|
|
void *taskdata,
|
|
|
|
bool free_taskdata,
|
|
|
|
TaskFreeFunction freedata)
|
|
|
|
: pool(pool), run(run), taskdata(taskdata), free_taskdata(free_taskdata), freedata(freedata)
|
|
|
|
{
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
~Task()
|
|
|
|
{
|
|
|
|
if (free_taskdata) {
|
|
|
|
if (freedata) {
|
|
|
|
freedata(pool, taskdata);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
MEM_freeN(taskdata);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Move constructor. */
|
|
|
|
Task(Task &&other)
|
|
|
|
: pool(other.pool),
|
|
|
|
run(other.run),
|
|
|
|
taskdata(other.taskdata),
|
|
|
|
free_taskdata(other.free_taskdata),
|
|
|
|
freedata(other.freedata)
|
|
|
|
{
|
|
|
|
other.pool = NULL;
|
|
|
|
other.run = NULL;
|
|
|
|
other.taskdata = NULL;
|
|
|
|
other.free_taskdata = false;
|
|
|
|
other.freedata = NULL;
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Execute task. */
|
|
|
|
void operator()() const
|
|
|
|
{
|
|
|
|
run(pool, taskdata);
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* For performance, ensure we never copy the task and only move it. */
|
|
|
|
Task(const Task &other) = delete;
|
|
|
|
Task &operator=(const Task &other) = delete;
|
|
|
|
Task &operator=(Task &&other) = delete;
|
|
|
|
};
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* TBB Task Group.
|
|
|
|
*
|
|
|
|
* Subclass since there seems to be no other way to set priority. */
|
|
|
|
|
|
|
|
#ifdef WITH_TBB
|
|
|
|
class TBBTaskGroup : public tbb::task_group {
|
|
|
|
public:
|
|
|
|
TBBTaskGroup(TaskPriority priority)
|
|
|
|
{
|
|
|
|
switch (priority) {
|
|
|
|
case TASK_PRIORITY_LOW:
|
|
|
|
my_context.set_priority(tbb::priority_low);
|
|
|
|
break;
|
|
|
|
case TASK_PRIORITY_HIGH:
|
|
|
|
my_context.set_priority(tbb::priority_normal);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
~TBBTaskGroup()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
};
|
2020-04-09 15:51:44 +02:00
|
|
|
#endif
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Task Pool */
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
typedef enum TaskPoolType {
|
|
|
|
TASK_POOL_TBB,
|
|
|
|
TASK_POOL_TBB_SUSPENDED,
|
|
|
|
TASK_POOL_NO_THREADS,
|
|
|
|
TASK_POOL_BACKGROUND,
|
|
|
|
TASK_POOL_BACKGROUND_SERIAL,
|
|
|
|
} TaskPoolType;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
struct TaskPool {
|
|
|
|
TaskPoolType type;
|
|
|
|
bool use_threads;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
ThreadMutex user_mutex;
|
|
|
|
void *userdata;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* TBB task pool. */
|
|
|
|
#ifdef WITH_TBB
|
|
|
|
TBBTaskGroup tbb_group;
|
|
|
|
#endif
|
|
|
|
volatile bool is_suspended;
|
|
|
|
BLI_mempool *suspended_mempool;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Background task pool. */
|
|
|
|
ListBase background_threads;
|
|
|
|
ThreadQueue *background_queue;
|
|
|
|
volatile bool background_is_canceling;
|
2020-04-09 15:51:44 +02:00
|
|
|
};
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* TBB Task Pool.
|
|
|
|
*
|
|
|
|
* Task pool using the TBB scheduler for tasks. When building without TBB
|
|
|
|
* support or running Blender with -t 1, this reverts to single threaded.
|
|
|
|
*
|
|
|
|
* Tasks may be suspended until in all are created, to make it possible to
|
|
|
|
* initialize data structures and create tasks in a single pass. */
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_create(TaskPool *pool, TaskPriority priority)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
if (pool->type == TASK_POOL_TBB_SUSPENDED) {
|
|
|
|
pool->is_suspended = true;
|
|
|
|
pool->suspended_mempool = BLI_mempool_create(sizeof(Task), 512, 512, BLI_MEMPOOL_ALLOW_ITER);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
|
|
|
new (&pool->tbb_group) TBBTaskGroup(priority);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_run(TaskPool *pool, Task &&task)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
if (pool->is_suspended) {
|
|
|
|
/* Suspended task that will be executed in work_and_wait(). */
|
|
|
|
Task *task_mem = (Task *)BLI_mempool_alloc(pool->suspended_mempool);
|
|
|
|
new (task_mem) Task(std::move(task));
|
|
|
|
#ifdef __GNUC__
|
|
|
|
/* Work around apparent compiler bug where task is not properly copied
|
|
|
|
* to task_mem. This appears unrelated to the use of placement new or
|
|
|
|
* move semantics, happens even writing to a plain C struct. Rather the
|
|
|
|
* call into TBB seems to have some indirect effect. */
|
|
|
|
std::atomic_thread_fence(std::memory_order_release);
|
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
else if (pool->use_threads) {
|
|
|
|
/* Execute in TBB task group. */
|
|
|
|
pool->tbb_group.run(std::move(task));
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
#endif
|
2020-04-30 07:59:23 +02:00
|
|
|
else {
|
|
|
|
/* Execute immediately. */
|
|
|
|
task();
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_work_and_wait(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Start any suspended task now. */
|
|
|
|
if (pool->suspended_mempool) {
|
|
|
|
pool->is_suspended = false;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
BLI_mempool_iter iter;
|
|
|
|
BLI_mempool_iternew(pool->suspended_mempool, &iter);
|
|
|
|
while (Task *task = (Task *)BLI_mempool_iterstep(&iter)) {
|
|
|
|
tbb_task_pool_run(pool, std::move(*task));
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
BLI_mempool_clear(pool->suspended_mempool);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
|
|
|
/* This is called wait(), but internally it can actually do work. This
|
|
|
|
* matters because we don't want recursive usage of task pools to run
|
|
|
|
* out of threads and get stuck. */
|
|
|
|
pool->tbb_group.wait();
|
|
|
|
}
|
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_cancel(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
|
|
|
pool->tbb_group.cancel();
|
|
|
|
pool->tbb_group.wait();
|
|
|
|
}
|
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static bool tbb_task_pool_canceled(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
|
|
|
return pool->tbb_group.is_canceling();
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-04-30 07:59:23 +02:00
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
return false;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void tbb_task_pool_free(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
#ifdef WITH_TBB
|
|
|
|
if (pool->use_threads) {
|
|
|
|
pool->tbb_group.~TBBTaskGroup();
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-04-30 07:59:23 +02:00
|
|
|
#endif
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
if (pool->suspended_mempool) {
|
|
|
|
BLI_mempool_destroy(pool->suspended_mempool);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
2020-04-30 07:59:23 +02:00
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Background Task Pool.
|
|
|
|
*
|
|
|
|
* Fallback for running background tasks when building without TBB. */
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void *background_task_run(void *userdata)
|
|
|
|
{
|
|
|
|
TaskPool *pool = (TaskPool *)userdata;
|
|
|
|
while (Task *task = (Task *)BLI_thread_queue_pop(pool->background_queue)) {
|
|
|
|
(*task)();
|
|
|
|
task->~Task();
|
|
|
|
MEM_freeN(task);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_create(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
pool->background_queue = BLI_thread_queue_init();
|
|
|
|
BLI_threadpool_init(&pool->background_threads, background_task_run, 1);
|
|
|
|
BLI_threadpool_insert(&pool->background_threads, pool);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_run(TaskPool *pool, Task &&task)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
Task *task_mem = (Task *)MEM_mallocN(sizeof(Task), __func__);
|
|
|
|
new (task_mem) Task(std::move(task));
|
|
|
|
BLI_thread_queue_push(pool->background_queue, task_mem);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_work_and_wait(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Signal background thread to stop waiting for new tasks if none are
|
|
|
|
* left, and wait for tasks and thread to finish. */
|
|
|
|
BLI_thread_queue_nowait(pool->background_queue);
|
|
|
|
BLI_thread_queue_wait_finish(pool->background_queue);
|
|
|
|
BLI_threadpool_remove(&pool->background_threads, pool);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_cancel(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
pool->background_is_canceling = true;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Remove tasks not yet started by background thread. */
|
|
|
|
BLI_thread_queue_nowait(pool->background_queue);
|
|
|
|
while (Task *task = (Task *)BLI_thread_queue_pop(pool->background_queue)) {
|
|
|
|
task->~Task();
|
|
|
|
MEM_freeN(task);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Let background thread finish or cancel task it is working on. */
|
|
|
|
BLI_threadpool_remove(&pool->background_threads, pool);
|
|
|
|
pool->background_is_canceling = false;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static bool background_task_pool_canceled(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
return pool->background_is_canceling;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static void background_task_pool_free(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
background_task_pool_work_and_wait(pool);
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
BLI_threadpool_end(&pool->background_threads);
|
|
|
|
BLI_thread_queue_free(pool->background_queue);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Task Pool */
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
static TaskPool *task_pool_create_ex(void *userdata, TaskPoolType type, TaskPriority priority)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Ensure malloc will go fine from threads,
|
|
|
|
*
|
|
|
|
* This is needed because we could be in main thread here
|
|
|
|
* and malloc could be non-thread safe at this point because
|
|
|
|
* no other jobs are running.
|
|
|
|
*/
|
|
|
|
BLI_threaded_malloc_begin();
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
const bool use_threads = BLI_task_scheduler_num_threads() > 1 && type != TASK_POOL_NO_THREADS;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Background task pool uses regular TBB scheduling if available. Only when
|
|
|
|
* building without TBB or running with -t 1 do we need to ensure these tasks
|
|
|
|
* do not block the main thread. */
|
|
|
|
if (type == TASK_POOL_BACKGROUND && use_threads) {
|
|
|
|
type = TASK_POOL_TBB;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Allocate task pool. */
|
|
|
|
TaskPool *pool = (TaskPool *)MEM_callocN(sizeof(TaskPool), "TaskPool");
|
|
|
|
|
|
|
|
pool->type = type;
|
|
|
|
pool->use_threads = use_threads;
|
2020-04-09 15:51:44 +02:00
|
|
|
|
|
|
|
pool->userdata = userdata;
|
|
|
|
BLI_mutex_init(&pool->user_mutex);
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_create(pool, priority);
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_create(pool);
|
|
|
|
break;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return pool;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a normal task pool. Tasks will be executed as soon as they are added.
|
|
|
|
*/
|
2020-04-30 07:59:23 +02:00
|
|
|
TaskPool *BLI_task_pool_create(void *userdata, TaskPriority priority)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
return task_pool_create_ex(userdata, TASK_POOL_TBB, priority);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a background task pool.
|
|
|
|
* In multi-threaded context, there is no differences with #BLI_task_pool_create(),
|
|
|
|
* but in single-threaded case it is ensured to have at least one worker thread to run on
|
|
|
|
* (i.e. you don't have to call #BLI_task_pool_work_and_wait
|
|
|
|
* on it to be sure it will be processed).
|
|
|
|
*
|
|
|
|
* \note Background pools are non-recursive
|
|
|
|
* (that is, you should not create other background pools in tasks assigned to a background pool,
|
|
|
|
* they could end never being executed, since the 'fallback' background thread is already
|
|
|
|
* busy with parent task in single-threaded context).
|
|
|
|
*/
|
2020-04-30 07:59:23 +02:00
|
|
|
TaskPool *BLI_task_pool_create_background(void *userdata, TaskPriority priority)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND, priority);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Similar to BLI_task_pool_create() but does not schedule any tasks for execution
|
|
|
|
* for until BLI_task_pool_work_and_wait() is called. This helps reducing threading
|
|
|
|
* overhead when pushing huge amount of small initial tasks from the main thread.
|
|
|
|
*/
|
2020-04-30 07:59:23 +02:00
|
|
|
TaskPool *BLI_task_pool_create_suspended(void *userdata, TaskPriority priority)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
return task_pool_create_ex(userdata, TASK_POOL_TBB_SUSPENDED, priority);
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/**
|
|
|
|
* Single threaded task pool that executes pushed task immediately, for
|
|
|
|
* debugging purposes.
|
|
|
|
*/
|
|
|
|
TaskPool *BLI_task_pool_create_no_threads(void *userdata)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
return task_pool_create_ex(userdata, TASK_POOL_NO_THREADS, TASK_PRIORITY_HIGH);
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/**
|
|
|
|
* Task pool that executeds one task after the other, possibly on different threads
|
|
|
|
* but never in parallel.
|
|
|
|
*/
|
|
|
|
TaskPool *BLI_task_pool_create_background_serial(void *userdata, TaskPriority priority)
|
|
|
|
{
|
|
|
|
return task_pool_create_ex(userdata, TASK_POOL_BACKGROUND_SERIAL, priority);
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
void BLI_task_pool_free(TaskPool *pool)
|
|
|
|
{
|
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_free(pool);
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_free(pool);
|
|
|
|
break;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
BLI_mutex_end(&pool->user_mutex);
|
2020-04-09 15:51:44 +02:00
|
|
|
|
|
|
|
MEM_freeN(pool);
|
|
|
|
|
|
|
|
BLI_threaded_malloc_end();
|
|
|
|
}
|
|
|
|
|
|
|
|
void BLI_task_pool_push(TaskPool *pool,
|
|
|
|
TaskRunFunction run,
|
|
|
|
void *taskdata,
|
|
|
|
bool free_taskdata,
|
|
|
|
TaskFreeFunction freedata)
|
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
Task task(pool, run, taskdata, free_taskdata, freedata);
|
2020-04-09 15:51:44 +02:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_run(pool, std::move(task));
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_run(pool, std::move(task));
|
|
|
|
break;
|
|
|
|
}
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void BLI_task_pool_work_and_wait(TaskPool *pool)
|
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_work_and_wait(pool);
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_work_and_wait(pool);
|
2020-04-09 15:51:44 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BLI_task_pool_cancel(TaskPool *pool)
|
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
tbb_task_pool_cancel(pool);
|
|
|
|
break;
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
background_task_pool_cancel(pool);
|
|
|
|
break;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BLI_task_pool_canceled(TaskPool *pool)
|
|
|
|
{
|
2020-04-30 07:59:23 +02:00
|
|
|
switch (pool->type) {
|
|
|
|
case TASK_POOL_TBB:
|
|
|
|
case TASK_POOL_TBB_SUSPENDED:
|
|
|
|
case TASK_POOL_NO_THREADS:
|
|
|
|
return tbb_task_pool_canceled(pool);
|
|
|
|
case TASK_POOL_BACKGROUND:
|
|
|
|
case TASK_POOL_BACKGROUND_SERIAL:
|
|
|
|
return background_task_pool_canceled(pool);
|
|
|
|
}
|
|
|
|
BLI_assert("BLI_task_pool_canceled: Control flow should not come here!");
|
|
|
|
return false;
|
2020-04-09 15:51:44 +02:00
|
|
|
}
|
|
|
|
|
2020-04-21 15:36:35 +02:00
|
|
|
void *BLI_task_pool_user_data(TaskPool *pool)
|
2020-04-09 15:51:44 +02:00
|
|
|
{
|
|
|
|
return pool->userdata;
|
|
|
|
}
|
|
|
|
|
|
|
|
ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool)
|
|
|
|
{
|
|
|
|
return &pool->user_mutex;
|
2020-04-30 07:59:23 +02:00
|
|
|
}
|