2022-02-11 09:07:11 +11:00
|
|
|
/* SPDX-License-Identifier: Apache-2.0 */
|
2017-11-23 21:14:43 +01:00
|
|
|
|
|
|
|
#include "testing/testing.h"
|
2022-02-09 13:08:04 +01:00
|
|
|
#include <atomic>
|
2020-12-04 11:28:09 +01:00
|
|
|
#include <cstring>
|
2017-11-23 21:14:43 +01:00
|
|
|
|
|
|
|
#include "atomic_ops.h"
|
|
|
|
|
2020-01-26 16:38:18 +01:00
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
2019-06-04 23:23:55 +02:00
|
|
|
#include "BLI_utildefines.h"
|
|
|
|
|
|
|
|
#include "BLI_listbase.h"
|
2017-11-23 21:14:43 +01:00
|
|
|
#include "BLI_mempool.h"
|
|
|
|
#include "BLI_task.h"
|
2022-02-09 13:08:04 +01:00
|
|
|
#include "BLI_task.hh"
|
2017-11-23 21:14:43 +01:00
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
#define ITEMS_NUM 10000
|
2017-11-23 21:14:43 +01:00
|
|
|
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
/* *** Parallel iterations over range of integer values. *** */
|
|
|
|
|
|
|
|
static void task_range_iter_func(void *userdata, int index, const TaskParallelTLS *__restrict tls)
|
|
|
|
{
|
|
|
|
int *data = (int *)userdata;
|
|
|
|
data[index] = index;
|
|
|
|
*((int *)tls->userdata_chunk) += index;
|
|
|
|
// printf("%d, %d, %d\n", index, data[index], *((int *)tls->userdata_chunk));
|
|
|
|
}
|
|
|
|
|
2022-10-03 17:37:25 -05:00
|
|
|
static void task_range_iter_reduce_func(const void *__restrict /*userdata*/,
|
2020-04-17 10:00:54 +02:00
|
|
|
void *__restrict join_v,
|
|
|
|
void *__restrict userdata_chunk)
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
{
|
2020-04-17 10:00:54 +02:00
|
|
|
int *join = (int *)join_v;
|
|
|
|
int *chunk = (int *)userdata_chunk;
|
|
|
|
*join += *chunk;
|
2022-03-30 17:26:42 +11:00
|
|
|
// printf("%d, %d\n", data[ITEMS_NUM], *((int *)userdata_chunk));
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST(task, RangeIter)
|
|
|
|
{
|
2022-03-30 17:26:42 +11:00
|
|
|
int data[ITEMS_NUM] = {0};
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
int sum = 0;
|
|
|
|
|
|
|
|
BLI_threadapi_init();
|
|
|
|
|
|
|
|
TaskParallelSettings settings;
|
|
|
|
BLI_parallel_range_settings_defaults(&settings);
|
|
|
|
settings.min_iter_per_thread = 1;
|
|
|
|
|
|
|
|
settings.userdata_chunk = ∑
|
|
|
|
settings.userdata_chunk_size = sizeof(sum);
|
2020-04-17 10:00:54 +02:00
|
|
|
settings.func_reduce = task_range_iter_reduce_func;
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
BLI_task_parallel_range(0, ITEMS_NUM, data, task_range_iter_func, &settings);
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
|
2020-04-30 07:59:23 +02:00
|
|
|
/* Those checks should ensure us all items of the listbase were processed once, and only once
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
* as expected. */
|
|
|
|
|
|
|
|
int expected_sum = 0;
|
2022-03-30 17:26:42 +11:00
|
|
|
for (int i = 0; i < ITEMS_NUM; i++) {
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
EXPECT_EQ(data[i], i);
|
|
|
|
expected_sum += i;
|
|
|
|
}
|
2020-04-17 10:00:54 +02:00
|
|
|
EXPECT_EQ(sum, expected_sum);
|
BLI_task: Add pooled threaded index range iterator, Take II.
This code allows to push a set of different operations all based on
iterations over a range of indices, and then process them all at once
over multiple threads.
This commit also adds unit tests for both old un-pooled, and new pooled
task_parallel_range family of functions, as well as some basic
performances tests.
This is mainly interesting for relatively low amount of individual
tasks, as expected.
E.g. performance tests on a 32 threads machine, for a set of 10
different tasks, shows following improvements when using pooled version
instead of ten sequential calls to BLI_task_parallel_range():
| Num Items | Sequential | Pooled | Speed-up |
| --------- | ---------- | ------- | -------- |
| 10K | 365 us | 138 us | 2.5 x |
| 100K | 877 us | 530 us | 1.66 x |
| 1000K | 5521 us | 4625 us | 1.25 x |
Differential Revision: https://developer.blender.org/D6189
Note: Compared to previous commit yesterday, this reworks atomic handling in
parallel iter code, and fixes a dummy double-free bug.
Now we should only use the two critical values for synchronization from
atomic calls results, which is the proper way to do things.
Reading a value after an atomic operation does not guarantee you will
get the latest value in all cases (especially on Windows release builds
it seems).
2019-11-26 14:26:47 +01:00
|
|
|
|
|
|
|
BLI_threadapi_exit();
|
|
|
|
}
|
|
|
|
|
2019-06-04 23:23:55 +02:00
|
|
|
/* *** Parallel iterations over mempool items. *** */
|
|
|
|
|
2021-06-09 22:49:45 +10:00
|
|
|
static void task_mempool_iter_func(void *userdata,
|
|
|
|
MempoolIterData *item,
|
2022-10-03 17:37:25 -05:00
|
|
|
const TaskParallelTLS *__restrict /*tls*/)
|
2017-11-23 21:14:43 +01:00
|
|
|
{
|
|
|
|
int *data = (int *)item;
|
|
|
|
int *count = (int *)userdata;
|
|
|
|
|
2020-11-06 17:49:09 +01:00
|
|
|
EXPECT_TRUE(data != nullptr);
|
2017-11-23 21:14:43 +01:00
|
|
|
|
|
|
|
*data += 1;
|
|
|
|
atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(task, MempoolIter)
|
|
|
|
{
|
2022-03-30 17:26:42 +11:00
|
|
|
int *data[ITEMS_NUM];
|
2019-05-21 06:51:24 -06:00
|
|
|
BLI_threadapi_init();
|
2017-11-23 21:14:43 +01:00
|
|
|
BLI_mempool *mempool = BLI_mempool_create(
|
2022-03-30 17:26:42 +11:00
|
|
|
sizeof(*data[0]), ITEMS_NUM, 32, BLI_MEMPOOL_ALLOW_ITER);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2017-11-23 21:14:43 +01:00
|
|
|
int i;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-03-11 14:24:22 +01:00
|
|
|
/* 'Randomly' add and remove some items from mempool, to create a non-homogeneous one. */
|
2022-03-30 17:26:42 +11:00
|
|
|
int items_num = 0;
|
|
|
|
for (i = 0; i < ITEMS_NUM; i++) {
|
2017-11-23 21:14:43 +01:00
|
|
|
data[i] = (int *)BLI_mempool_alloc(mempool);
|
|
|
|
*data[i] = i - 1;
|
2022-03-30 17:26:42 +11:00
|
|
|
items_num++;
|
2017-11-23 21:14:43 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
for (i = 0; i < ITEMS_NUM; i += 3) {
|
2017-11-23 21:14:43 +01:00
|
|
|
BLI_mempool_free(mempool, data[i]);
|
2020-11-06 17:49:09 +01:00
|
|
|
data[i] = nullptr;
|
2022-03-30 17:26:42 +11:00
|
|
|
items_num--;
|
2017-11-23 21:14:43 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
for (i = 0; i < ITEMS_NUM; i += 7) {
|
2020-11-06 17:49:09 +01:00
|
|
|
if (data[i] == nullptr) {
|
2017-11-23 21:14:43 +01:00
|
|
|
data[i] = (int *)BLI_mempool_alloc(mempool);
|
|
|
|
*data[i] = i - 1;
|
2022-03-30 17:26:42 +11:00
|
|
|
items_num++;
|
2017-11-23 21:14:43 +01:00
|
|
|
}
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
for (i = 0; i < ITEMS_NUM - 5; i += 23) {
|
2017-11-23 21:14:43 +01:00
|
|
|
for (int j = 0; j < 5; j++) {
|
2020-11-06 17:49:09 +01:00
|
|
|
if (data[i + j] != nullptr) {
|
2017-11-23 21:14:43 +01:00
|
|
|
BLI_mempool_free(mempool, data[i + j]);
|
2020-11-06 17:49:09 +01:00
|
|
|
data[i + j] = nullptr;
|
2022-03-30 17:26:42 +11:00
|
|
|
items_num--;
|
2017-11-23 21:14:43 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-06-09 22:49:45 +10:00
|
|
|
TaskParallelSettings settings;
|
|
|
|
BLI_parallel_mempool_settings_defaults(&settings);
|
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
BLI_task_parallel_mempool(mempool, &items_num, task_mempool_iter_func, &settings);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-04-20 10:06:01 +02:00
|
|
|
/* Those checks should ensure us all items of the mempool were processed once, and only once - as
|
|
|
|
* expected. */
|
2022-03-30 17:26:42 +11:00
|
|
|
EXPECT_EQ(items_num, 0);
|
|
|
|
for (i = 0; i < ITEMS_NUM; i++) {
|
2020-11-06 17:49:09 +01:00
|
|
|
if (data[i] != nullptr) {
|
2017-11-23 21:14:43 +01:00
|
|
|
EXPECT_EQ(*data[i], i);
|
|
|
|
}
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2017-11-23 21:14:43 +01:00
|
|
|
BLI_mempool_destroy(mempool);
|
2019-05-21 06:51:24 -06:00
|
|
|
BLI_threadapi_exit();
|
2017-11-23 21:14:43 +01:00
|
|
|
}
|
2019-06-04 23:23:55 +02:00
|
|
|
|
2021-06-09 22:49:45 +10:00
|
|
|
/* *** Parallel iterations over mempool items with TLS. *** */
|
|
|
|
|
2021-06-18 14:27:43 +10:00
|
|
|
using TaskMemPool_Chunk = struct TaskMemPool_Chunk {
|
2021-06-09 22:49:45 +10:00
|
|
|
ListBase *accumulate_items;
|
2021-06-18 14:27:43 +10:00
|
|
|
};
|
2021-06-09 22:49:45 +10:00
|
|
|
|
2022-10-03 17:37:25 -05:00
|
|
|
static void task_mempool_iter_tls_func(void * /*userdata*/,
|
2021-06-09 22:49:45 +10:00
|
|
|
MempoolIterData *item,
|
|
|
|
const TaskParallelTLS *__restrict tls)
|
|
|
|
{
|
|
|
|
TaskMemPool_Chunk *task_data = (TaskMemPool_Chunk *)tls->userdata_chunk;
|
|
|
|
int *data = (int *)item;
|
|
|
|
|
|
|
|
EXPECT_TRUE(data != nullptr);
|
|
|
|
if (task_data->accumulate_items == nullptr) {
|
2021-12-24 22:17:49 -05:00
|
|
|
task_data->accumulate_items = MEM_cnew<ListBase>(__func__);
|
2021-06-09 22:49:45 +10:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Flip to prove this has been touched. */
|
|
|
|
*data = -*data;
|
|
|
|
|
|
|
|
BLI_addtail(task_data->accumulate_items, BLI_genericNodeN(data));
|
|
|
|
}
|
|
|
|
|
2022-10-03 17:37:25 -05:00
|
|
|
static void task_mempool_iter_tls_reduce(const void *__restrict /*userdata*/,
|
2021-06-09 22:49:45 +10:00
|
|
|
void *__restrict chunk_join,
|
|
|
|
void *__restrict chunk)
|
|
|
|
{
|
|
|
|
TaskMemPool_Chunk *join_chunk = (TaskMemPool_Chunk *)chunk_join;
|
|
|
|
TaskMemPool_Chunk *data_chunk = (TaskMemPool_Chunk *)chunk;
|
|
|
|
|
|
|
|
if (data_chunk->accumulate_items != nullptr) {
|
|
|
|
if (join_chunk->accumulate_items == nullptr) {
|
2021-12-24 22:17:49 -05:00
|
|
|
join_chunk->accumulate_items = MEM_cnew<ListBase>(__func__);
|
2021-06-09 22:49:45 +10:00
|
|
|
}
|
|
|
|
BLI_movelisttolist(join_chunk->accumulate_items, data_chunk->accumulate_items);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-03 17:37:25 -05:00
|
|
|
static void task_mempool_iter_tls_free(const void * /*userdata*/, void *__restrict userdata_chunk)
|
2021-06-09 22:49:45 +10:00
|
|
|
{
|
|
|
|
TaskMemPool_Chunk *task_data = (TaskMemPool_Chunk *)userdata_chunk;
|
|
|
|
MEM_freeN(task_data->accumulate_items);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(task, MempoolIterTLS)
|
|
|
|
{
|
2022-03-30 17:26:42 +11:00
|
|
|
int *data[ITEMS_NUM];
|
2021-06-09 22:49:45 +10:00
|
|
|
BLI_threadapi_init();
|
|
|
|
BLI_mempool *mempool = BLI_mempool_create(
|
2022-03-30 17:26:42 +11:00
|
|
|
sizeof(*data[0]), ITEMS_NUM, 32, BLI_MEMPOOL_ALLOW_ITER);
|
2021-06-09 22:49:45 +10:00
|
|
|
|
|
|
|
int i;
|
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
/* Add numbers negative `1..ITEMS_NUM` inclusive. */
|
|
|
|
for (i = 0; i < ITEMS_NUM; i++) {
|
2021-06-09 22:49:45 +10:00
|
|
|
data[i] = (int *)BLI_mempool_alloc(mempool);
|
|
|
|
*data[i] = -(i + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
TaskParallelSettings settings;
|
|
|
|
BLI_parallel_mempool_settings_defaults(&settings);
|
|
|
|
|
|
|
|
TaskMemPool_Chunk tls_data;
|
2021-06-18 14:27:43 +10:00
|
|
|
tls_data.accumulate_items = nullptr;
|
2021-06-09 22:49:45 +10:00
|
|
|
|
|
|
|
settings.userdata_chunk = &tls_data;
|
|
|
|
settings.userdata_chunk_size = sizeof(tls_data);
|
|
|
|
|
|
|
|
settings.func_free = task_mempool_iter_tls_free;
|
|
|
|
settings.func_reduce = task_mempool_iter_tls_reduce;
|
|
|
|
|
|
|
|
BLI_task_parallel_mempool(mempool, nullptr, task_mempool_iter_tls_func, &settings);
|
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
EXPECT_EQ(BLI_listbase_count(tls_data.accumulate_items), ITEMS_NUM);
|
2021-06-09 22:49:45 +10:00
|
|
|
|
|
|
|
/* Check that all elements are added into the list once. */
|
2022-03-30 17:26:42 +11:00
|
|
|
int number_accum = 0;
|
2021-06-09 22:49:45 +10:00
|
|
|
for (LinkData *link = (LinkData *)tls_data.accumulate_items->first; link; link = link->next) {
|
|
|
|
int *data = (int *)link->data;
|
2022-03-30 17:26:42 +11:00
|
|
|
number_accum += *data;
|
2021-06-09 22:49:45 +10:00
|
|
|
}
|
2022-03-30 17:26:42 +11:00
|
|
|
EXPECT_EQ(number_accum, (ITEMS_NUM * (ITEMS_NUM + 1)) / 2);
|
2021-06-09 22:49:45 +10:00
|
|
|
|
|
|
|
BLI_freelistN(tls_data.accumulate_items);
|
|
|
|
MEM_freeN(tls_data.accumulate_items);
|
|
|
|
|
|
|
|
BLI_mempool_destroy(mempool);
|
|
|
|
BLI_threadapi_exit();
|
|
|
|
}
|
|
|
|
|
2019-06-04 23:23:55 +02:00
|
|
|
/* *** Parallel iterations over double-linked list items. *** */
|
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
static void task_listbase_iter_func(void *userdata,
|
|
|
|
void *item,
|
|
|
|
int index,
|
2022-10-03 17:37:25 -05:00
|
|
|
const TaskParallelTLS *__restrict /*tls*/)
|
2019-06-04 23:23:55 +02:00
|
|
|
{
|
|
|
|
LinkData *data = (LinkData *)item;
|
|
|
|
int *count = (int *)userdata;
|
|
|
|
|
|
|
|
data->data = POINTER_FROM_INT(POINTER_AS_INT(data->data) + index);
|
|
|
|
atomic_sub_and_fetch_uint32((uint32_t *)count, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST(task, ListBaseIter)
|
|
|
|
{
|
2020-11-06 17:49:09 +01:00
|
|
|
ListBase list = {nullptr, nullptr};
|
2019-06-04 23:23:55 +02:00
|
|
|
LinkData *items_buffer = (LinkData *)MEM_calloc_arrayN(
|
2022-03-30 17:26:42 +11:00
|
|
|
ITEMS_NUM, sizeof(*items_buffer), __func__);
|
2019-06-04 23:23:55 +02:00
|
|
|
BLI_threadapi_init();
|
|
|
|
|
|
|
|
int i;
|
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
int items_num = 0;
|
|
|
|
for (i = 0; i < ITEMS_NUM; i++) {
|
2019-06-04 23:23:55 +02:00
|
|
|
BLI_addtail(&list, &items_buffer[i]);
|
2022-03-30 17:26:42 +11:00
|
|
|
items_num++;
|
2019-06-04 23:23:55 +02:00
|
|
|
}
|
|
|
|
|
BLI_task: Add new generic `BLI_task_parallel_iterator()`.
This new function is part of the 'parallel for loops' functions. It
takes an iterator callback to generate items to be processed, in
addition to the usual 'process' func callback.
This allows to use common code from BLI_task for a wide range of custom
iteratiors, whithout having to re-invent the wheel of the whole tasks &
data chuncks handling.
This supports all settings features from `BLI_task_parallel_range()`,
including dynamic and static (if total number of items is knwon)
scheduling, TLS data and its finalize callback, etc.
One question here is whether we should provide usercode with a spinlock
by default, or enforce it to always handle its own sync mechanism.
I kept it, since imho it will be needed very often, and generating one
is pretty cheap even if unused...
----------
Additionaly, this commit converts (currently unused)
`BLI_task_parallel_listbase()` to use that generic code. This was done
mostly as proof of concept, but performance-wise it shows some
interesting data, roughly:
- Very light processing (that should not be threaded anyway) is several
times slower, which is expected due to more overhead in loop management
code.
- Heavier processing can be up to 10% quicker (probably thanks to the
switch from dynamic to static scheduling, which reduces a lot locking
to fill-in the per-tasks chunks of data). Similar speed-up in
non-threaded case comes as a surprise though, not sure what can
explain that.
While this conversion is not really needed, imho we should keep it
(instead of existing code for that function), it's easier to have
complex handling logic in as few places as possible, for maintaining and
for improving it.
Note: That work was initially done to allow for D5372 to be possible... Unfortunately that one proved to be not better than orig code on performances point of view.
Reviewed By: sergey
Differential Revision: https://developer.blender.org/D5371
2019-10-30 12:23:45 +01:00
|
|
|
TaskParallelSettings settings;
|
|
|
|
BLI_parallel_range_settings_defaults(&settings);
|
|
|
|
|
2022-03-30 17:26:42 +11:00
|
|
|
BLI_task_parallel_listbase(&list, &items_num, task_listbase_iter_func, &settings);
|
2019-06-04 23:23:55 +02:00
|
|
|
|
|
|
|
/* Those checks should ensure us all items of the listbase were processed once, and only once -
|
|
|
|
* as expected. */
|
2022-03-30 17:26:42 +11:00
|
|
|
EXPECT_EQ(items_num, 0);
|
2019-06-04 23:23:55 +02:00
|
|
|
LinkData *item;
|
2022-03-30 17:26:42 +11:00
|
|
|
for (i = 0, item = (LinkData *)list.first; i < ITEMS_NUM && item != nullptr;
|
2019-06-04 23:23:55 +02:00
|
|
|
i++, item = item->next) {
|
|
|
|
EXPECT_EQ(POINTER_AS_INT(item->data), i);
|
|
|
|
}
|
2022-03-30 17:26:42 +11:00
|
|
|
EXPECT_EQ(ITEMS_NUM, i);
|
2019-06-04 23:23:55 +02:00
|
|
|
|
|
|
|
MEM_freeN(items_buffer);
|
|
|
|
BLI_threadapi_exit();
|
|
|
|
}
|
2022-02-09 13:08:04 +01:00
|
|
|
|
|
|
|
TEST(task, ParallelInvoke)
|
|
|
|
{
|
|
|
|
std::atomic<int> counter = 0;
|
|
|
|
blender::threading::parallel_invoke([&]() { counter++; },
|
|
|
|
[&]() { counter++; },
|
|
|
|
[&]() { counter++; },
|
|
|
|
[&]() { counter++; },
|
|
|
|
[&]() { counter++; },
|
|
|
|
[&]() { counter++; });
|
|
|
|
EXPECT_EQ(counter, 6);
|
|
|
|
}
|