| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | /*
 | 
					
						
							|  |  |  |  * This program is free software; you can redistribute it and/or | 
					
						
							|  |  |  |  * modify it under the terms of the GNU General Public License | 
					
						
							|  |  |  |  * as published by the Free Software Foundation; either version 2 | 
					
						
							|  |  |  |  * of the License, or (at your option) any later version. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This program is distributed in the hope that it will be useful, | 
					
						
							|  |  |  |  * but WITHOUT ANY WARRANTY; without even the implied warranty of | 
					
						
							|  |  |  |  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the | 
					
						
							|  |  |  |  * GNU General Public License for more details. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * You should have received a copy of the GNU General Public License | 
					
						
							|  |  |  |  * along with this program; if not, write to the Free Software Foundation, | 
					
						
							|  |  |  |  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-02-18 08:08:12 +11:00
										 |  |  | /** \file
 | 
					
						
							|  |  |  |  * \ingroup bli | 
					
						
							| 
									
										
										
										
											2014-01-19 23:14:24 +11:00
										 |  |  |  * | 
					
						
							|  |  |  |  * A generic task system which can be used for any task based subsystem. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | #include <stdlib.h>
 | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #include "MEM_guardedalloc.h"
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | #include "DNA_listBase.h"
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | #include "BLI_listbase.h"
 | 
					
						
							| 
									
										
										
										
											2014-11-03 18:24:08 +01:00
										 |  |  | #include "BLI_math.h"
 | 
					
						
							| 
									
										
										
										
											2017-11-23 21:14:43 +01:00
										 |  |  | #include "BLI_mempool.h"
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | #include "BLI_task.h"
 | 
					
						
							|  |  |  | #include "BLI_threads.h"
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-12-02 15:23:58 +05:00
										 |  |  | #include "atomic_ops.h"
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | /* Define this to enable some detailed statistic print. */ | 
					
						
							|  |  |  | #undef DEBUG_STATS
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | /* Types */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | /* Number of per-thread pre-allocated tasks.
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * For more details see description of TaskMemPool. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #define MEMPOOL_SIZE 256
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-06 15:40:05 +01:00
										 |  |  | /* Number of tasks which are pushed directly to local thread queue.
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This allows thread to fetch next task without locking the whole queue. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2017-05-31 14:52:45 +02:00
										 |  |  | #define LOCAL_QUEUE_SIZE 1
 | 
					
						
							| 
									
										
										
										
											2017-03-06 15:40:05 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | /* Number of tasks which are allowed to be scheduled in a delayed manner.
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This allows to use less locks per graph node children schedule. More details | 
					
						
							|  |  |  |  * could be found at TaskThreadLocalStorage::do_delayed_push. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | #define DELAYED_QUEUE_SIZE 4096
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-06 15:40:05 +01:00
										 |  |  | #ifndef NDEBUG
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | #  define ASSERT_THREAD_ID(scheduler, thread_id) \
 | 
					
						
							|  |  |  |     do { \ | 
					
						
							|  |  |  |       if (!BLI_thread_is_main()) { \ | 
					
						
							|  |  |  |         TaskThread *thread = pthread_getspecific(scheduler->tls_id_key); \ | 
					
						
							|  |  |  |         if (thread == NULL) { \ | 
					
						
							|  |  |  |           BLI_assert(thread_id == 0); \ | 
					
						
							|  |  |  |         } \ | 
					
						
							|  |  |  |         else { \ | 
					
						
							|  |  |  |           BLI_assert(thread_id == thread->id); \ | 
					
						
							|  |  |  |         } \ | 
					
						
							|  |  |  |       } \ | 
					
						
							|  |  |  |       else { \ | 
					
						
							|  |  |  |         BLI_assert(thread_id == 0); \ | 
					
						
							|  |  |  |       } \ | 
					
						
							|  |  |  |     } while (false) | 
					
						
							| 
									
										
										
										
											2017-03-06 15:40:05 +01:00
										 |  |  | #else
 | 
					
						
							|  |  |  | #  define ASSERT_THREAD_ID(scheduler, thread_id)
 | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | typedef struct Task { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   struct Task *next, *prev; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskRunFunction run; | 
					
						
							|  |  |  |   void *taskdata; | 
					
						
							|  |  |  |   bool free_taskdata; | 
					
						
							|  |  |  |   TaskFreeFunction freedata; | 
					
						
							|  |  |  |   TaskPool *pool; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } Task; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | /* This is a per-thread storage of pre-allocated tasks.
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * The idea behind this is simple: reduce amount of malloc() calls when pushing | 
					
						
							|  |  |  |  * new task to the pool. This is done by keeping memory from the tasks which | 
					
						
							|  |  |  |  * were finished already, so instead of freeing that memory we put it to the | 
					
						
							|  |  |  |  * pool for the later re-use. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * The tricky part here is to avoid any inter-thread synchronization, hence no | 
					
						
							|  |  |  |  * lock must exist around this pool. The pool will become an owner of the pointer | 
					
						
							|  |  |  |  * from freed task, and only corresponding thread will be able to use this pool | 
					
						
							|  |  |  |  * (no memory stealing and such). | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * This leads to the following use of the pool: | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * - task_push() should provide proper thread ID from which the task is being | 
					
						
							|  |  |  |  *   pushed from. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * - Task allocation function which check corresponding memory pool and if there | 
					
						
							|  |  |  |  *   is any memory in there it'll mark memory as re-used, remove it from the pool | 
					
						
							|  |  |  |  *   and use that memory for the new task. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  *   At this moment task queue owns the memory. | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * - When task is done and task_free() is called the memory will be put to the | 
					
						
							| 
									
										
										
										
											2018-09-02 18:28:27 +10:00
										 |  |  |  *   pool which corresponds to a thread which handled the task. | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  |  */ | 
					
						
							|  |  |  | typedef struct TaskMemPool { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* Number of pre-allocated tasks in the pool. */ | 
					
						
							|  |  |  |   int num_tasks; | 
					
						
							|  |  |  |   /* Pre-allocated task memory pointers. */ | 
					
						
							|  |  |  |   Task *tasks[MEMPOOL_SIZE]; | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | } TaskMemPool; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | #ifdef DEBUG_STATS
 | 
					
						
							|  |  |  | typedef struct TaskMemPoolStats { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* Number of allocations. */ | 
					
						
							|  |  |  |   int num_alloc; | 
					
						
							|  |  |  |   /* Number of avoided allocations (pointer was re-used from the pool). */ | 
					
						
							|  |  |  |   int num_reuse; | 
					
						
							|  |  |  |   /* Number of discarded memory due to pool saturation, */ | 
					
						
							|  |  |  |   int num_discard; | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | } TaskMemPoolStats; | 
					
						
							|  |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-06 11:12:07 +01:00
										 |  |  | typedef struct TaskThreadLocalStorage { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* Memory pool for faster task allocation.
 | 
					
						
							|  |  |  |    * The idea is to re-use memory of finished/discarded tasks by this thread. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   TaskMemPool task_mempool; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Local queue keeps thread alive by keeping small amount of tasks ready
 | 
					
						
							|  |  |  |    * to be picked up without causing global thread locks for synchronization. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   int num_local_queue; | 
					
						
							|  |  |  |   Task *local_queue[LOCAL_QUEUE_SIZE]; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Thread can be marked for delayed tasks push. This is helpful when it's
 | 
					
						
							|  |  |  |    * know that lots of subsequent task pushed will happen from the same thread | 
					
						
							|  |  |  |    * without "interrupting" for task execution. | 
					
						
							|  |  |  |    * | 
					
						
							|  |  |  |    * We try to accumulate as much tasks as possible in a local queue without | 
					
						
							|  |  |  |    * any locks first, and then we push all of them into a scheduler's queue | 
					
						
							|  |  |  |    * from within a single mutex lock. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   bool do_delayed_push; | 
					
						
							|  |  |  |   int num_delayed_queue; | 
					
						
							|  |  |  |   Task *delayed_queue[DELAYED_QUEUE_SIZE]; | 
					
						
							| 
									
										
										
										
											2017-03-06 11:12:07 +01:00
										 |  |  | } TaskThreadLocalStorage; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | struct TaskPool { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskScheduler *scheduler; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   volatile size_t num; | 
					
						
							|  |  |  |   ThreadMutex num_mutex; | 
					
						
							|  |  |  |   ThreadCondition num_cond; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   void *userdata; | 
					
						
							|  |  |  |   ThreadMutex user_mutex; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   volatile bool do_cancel; | 
					
						
							|  |  |  |   volatile bool do_work; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   volatile bool is_suspended; | 
					
						
							|  |  |  |   bool start_suspended; | 
					
						
							|  |  |  |   ListBase suspended_queue; | 
					
						
							|  |  |  |   size_t num_suspended; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* If set, this pool may never be work_and_wait'ed, which means TaskScheduler
 | 
					
						
							|  |  |  |    * has to use its special background fallback thread in case we are in | 
					
						
							|  |  |  |    * single-threaded situation. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   bool run_in_background; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* This is a task scheduler's ID of a thread at which pool was constructed.
 | 
					
						
							|  |  |  |    * It will be used to access task TLS. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   int thread_id; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* For the pools which are created from non-main thread which is not a
 | 
					
						
							|  |  |  |    * scheduler worker thread we can't re-use any of scheduler's threads TLS | 
					
						
							|  |  |  |    * and have to use our own one. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   bool use_local_tls; | 
					
						
							|  |  |  |   TaskThreadLocalStorage local_tls; | 
					
						
							| 
									
										
										
										
											2017-04-13 13:32:39 +02:00
										 |  |  | #ifndef NDEBUG
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pthread_t creator_thread_id; | 
					
						
							| 
									
										
										
										
											2017-04-13 13:32:39 +02:00
										 |  |  | #endif
 | 
					
						
							| 
									
										
										
										
											2017-04-12 18:18:33 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #ifdef DEBUG_STATS
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskMemPoolStats *mempool_stats; | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #endif
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | struct TaskScheduler { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pthread_t *threads; | 
					
						
							|  |  |  |   struct TaskThread *task_threads; | 
					
						
							|  |  |  |   int num_threads; | 
					
						
							|  |  |  |   bool background_thread_only; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   ListBase queue; | 
					
						
							|  |  |  |   ThreadMutex queue_mutex; | 
					
						
							|  |  |  |   ThreadCondition queue_cond; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-25 17:18:17 -06:00
										 |  |  |   ThreadMutex startup_mutex; | 
					
						
							|  |  |  |   ThreadCondition startup_cond; | 
					
						
							|  |  |  |   volatile int num_thread_started; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   volatile bool do_exit; | 
					
						
							| 
									
										
										
										
											2017-03-06 11:21:50 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* NOTE: In pthread's TLS we store the whole TaskThread structure. */ | 
					
						
							|  |  |  |   pthread_key_t tls_id_key; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | }; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | typedef struct TaskThread { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskScheduler *scheduler; | 
					
						
							|  |  |  |   int id; | 
					
						
							|  |  |  |   TaskThreadLocalStorage tls; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } TaskThread; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-11-02 16:52:19 +01:00
										 |  |  | /* Helper */ | 
					
						
							| 
									
										
										
										
											2017-03-06 11:12:07 +01:00
										 |  |  | BLI_INLINE void task_data_free(Task *task, const int thread_id) | 
					
						
							| 
									
										
										
										
											2015-11-02 16:52:19 +01:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (task->free_taskdata) { | 
					
						
							|  |  |  |     if (task->freedata) { | 
					
						
							|  |  |  |       task->freedata(task->pool, task->taskdata, thread_id); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |       MEM_freeN(task->taskdata); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2015-11-02 16:52:19 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-04-12 18:18:33 +02:00
										 |  |  | BLI_INLINE void initialize_task_tls(TaskThreadLocalStorage *tls) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   memset(tls, 0, sizeof(TaskThreadLocalStorage)); | 
					
						
							| 
									
										
										
										
											2017-04-12 18:18:33 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | BLI_INLINE TaskThreadLocalStorage *get_task_tls(TaskPool *pool, const int thread_id) | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskScheduler *scheduler = pool->scheduler; | 
					
						
							|  |  |  |   BLI_assert(thread_id >= 0); | 
					
						
							|  |  |  |   BLI_assert(thread_id <= scheduler->num_threads); | 
					
						
							|  |  |  |   if (pool->use_local_tls && thread_id == 0) { | 
					
						
							|  |  |  |     BLI_assert(pool->thread_id == 0); | 
					
						
							|  |  |  |     BLI_assert(!BLI_thread_is_main()); | 
					
						
							|  |  |  |     BLI_assert(pthread_equal(pthread_self(), pool->creator_thread_id)); | 
					
						
							|  |  |  |     return &pool->local_tls; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   if (thread_id == 0) { | 
					
						
							|  |  |  |     BLI_assert(BLI_thread_is_main()); | 
					
						
							|  |  |  |     return &scheduler->task_threads[pool->thread_id].tls; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   return &scheduler->task_threads[thread_id].tls; | 
					
						
							| 
									
										
										
										
											2017-03-06 11:12:07 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | BLI_INLINE void free_task_tls(TaskThreadLocalStorage *tls) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskMemPool *task_mempool = &tls->task_mempool; | 
					
						
							|  |  |  |   for (int i = 0; i < task_mempool->num_tasks; ++i) { | 
					
						
							|  |  |  |     MEM_freeN(task_mempool->tasks[i]); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static Task *task_alloc(TaskPool *pool, const int thread_id) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_assert(thread_id <= pool->scheduler->num_threads); | 
					
						
							|  |  |  |   if (thread_id != -1) { | 
					
						
							|  |  |  |     BLI_assert(thread_id >= 0); | 
					
						
							|  |  |  |     BLI_assert(thread_id <= pool->scheduler->num_threads); | 
					
						
							|  |  |  |     TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id); | 
					
						
							|  |  |  |     TaskMemPool *task_mempool = &tls->task_mempool; | 
					
						
							|  |  |  |     /* Try to re-use task memory from a thread local storage. */ | 
					
						
							|  |  |  |     if (task_mempool->num_tasks > 0) { | 
					
						
							|  |  |  |       --task_mempool->num_tasks; | 
					
						
							|  |  |  |       /* Success! We've just avoided task allocation. */ | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #ifdef DEBUG_STATS
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       pool->mempool_stats[thread_id].num_reuse++; | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #endif
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       return task_mempool->tasks[task_mempool->num_tasks]; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     /* We are doomed to allocate new task data. */ | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #ifdef DEBUG_STATS
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     pool->mempool_stats[thread_id].num_alloc++; | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #endif
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   } | 
					
						
							|  |  |  |   return MEM_mallocN(sizeof(Task), "New task"); | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void task_free(TaskPool *pool, Task *task, const int thread_id) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   task_data_free(task, thread_id); | 
					
						
							|  |  |  |   BLI_assert(thread_id >= 0); | 
					
						
							|  |  |  |   BLI_assert(thread_id <= pool->scheduler->num_threads); | 
					
						
							|  |  |  |   if (thread_id == 0) { | 
					
						
							|  |  |  |     BLI_assert(pool->use_local_tls || BLI_thread_is_main()); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id); | 
					
						
							|  |  |  |   TaskMemPool *task_mempool = &tls->task_mempool; | 
					
						
							|  |  |  |   if (task_mempool->num_tasks < MEMPOOL_SIZE - 1) { | 
					
						
							|  |  |  |     /* Successfully allowed the task to be re-used later. */ | 
					
						
							|  |  |  |     task_mempool->tasks[task_mempool->num_tasks] = task; | 
					
						
							|  |  |  |     ++task_mempool->num_tasks; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else { | 
					
						
							|  |  |  |     /* Local storage saturated, no other way than just discard
 | 
					
						
							|  |  |  |      * the memory. | 
					
						
							|  |  |  |      * | 
					
						
							|  |  |  |      * TODO(sergey): We can perhaps store such pointer in a global | 
					
						
							|  |  |  |      * scheduler pool, maybe it'll be faster than discarding and | 
					
						
							|  |  |  |      * allocating again. | 
					
						
							|  |  |  |      */ | 
					
						
							|  |  |  |     MEM_freeN(task); | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #ifdef DEBUG_STATS
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     pool->mempool_stats[thread_id].num_discard++; | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #endif
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   } | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | /* Task Scheduler */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void task_pool_num_decrease(TaskPool *pool, size_t done) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_lock(&pool->num_mutex); | 
					
						
							| 
									
										
										
										
											2016-05-10 15:43:03 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_assert(pool->num >= done); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pool->num -= done; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (pool->num == 0) { | 
					
						
							|  |  |  |     BLI_condition_notify_all(&pool->num_cond); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2016-05-10 15:43:03 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_unlock(&pool->num_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-07 17:29:39 +01:00
										 |  |  | static void task_pool_num_increase(TaskPool *pool, size_t new) | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_lock(&pool->num_mutex); | 
					
						
							| 
									
										
										
										
											2016-05-10 15:43:03 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pool->num += new; | 
					
						
							|  |  |  |   BLI_condition_notify_all(&pool->num_cond); | 
					
						
							| 
									
										
										
										
											2016-05-10 15:43:03 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_unlock(&pool->num_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static bool task_scheduler_thread_wait_pop(TaskScheduler *scheduler, Task **task) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   bool found_task = false; | 
					
						
							|  |  |  |   BLI_mutex_lock(&scheduler->queue_mutex); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   while (!scheduler->queue.first && !scheduler->do_exit) { | 
					
						
							|  |  |  |     BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   do { | 
					
						
							|  |  |  |     Task *current_task; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     /* Assuming we can only have a void queue in 'exit' case here seems logical
 | 
					
						
							|  |  |  |      * (we should only be here after our worker thread has been woken up from a | 
					
						
							|  |  |  |      * condition_wait(), which only happens after a new task was added to the queue), | 
					
						
							|  |  |  |      * but it is wrong. | 
					
						
							|  |  |  |      * Waiting on condition may wake up the thread even if condition is not signaled | 
					
						
							|  |  |  |      * (spurious wake-ups), and some race condition may also empty the queue **after** | 
					
						
							|  |  |  |      * condition has been signaled, but **before** awoken thread reaches this point... | 
					
						
							|  |  |  |      * See http://stackoverflow.com/questions/8594591
 | 
					
						
							|  |  |  |      * | 
					
						
							|  |  |  |      * So we only abort here if do_exit is set. | 
					
						
							|  |  |  |      */ | 
					
						
							|  |  |  |     if (scheduler->do_exit) { | 
					
						
							|  |  |  |       BLI_mutex_unlock(&scheduler->queue_mutex); | 
					
						
							|  |  |  |       return false; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     for (current_task = scheduler->queue.first; current_task != NULL; | 
					
						
							|  |  |  |          current_task = current_task->next) { | 
					
						
							|  |  |  |       TaskPool *pool = current_task->pool; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       if (scheduler->background_thread_only && !pool->run_in_background) { | 
					
						
							|  |  |  |         continue; | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |       *task = current_task; | 
					
						
							|  |  |  |       found_task = true; | 
					
						
							|  |  |  |       BLI_remlink(&scheduler->queue, *task); | 
					
						
							|  |  |  |       break; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     if (!found_task) { | 
					
						
							|  |  |  |       BLI_condition_wait(&scheduler->queue_cond, &scheduler->queue_mutex); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } while (!found_task); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_mutex_unlock(&scheduler->queue_mutex); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   return true; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | BLI_INLINE void handle_local_queue(TaskThreadLocalStorage *tls, const int thread_id) | 
					
						
							| 
									
										
										
										
											2017-03-06 15:40:05 +01:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_assert(!tls->do_delayed_push); | 
					
						
							|  |  |  |   while (tls->num_local_queue > 0) { | 
					
						
							|  |  |  |     /* We pop task from queue before handling it so handler of the task can
 | 
					
						
							|  |  |  |      * push next job to the local queue. | 
					
						
							|  |  |  |      */ | 
					
						
							|  |  |  |     tls->num_local_queue--; | 
					
						
							|  |  |  |     Task *local_task = tls->local_queue[tls->num_local_queue]; | 
					
						
							|  |  |  |     /* TODO(sergey): Double-check work_and_wait() doesn't handle other's
 | 
					
						
							|  |  |  |      * pool tasks. | 
					
						
							|  |  |  |      */ | 
					
						
							|  |  |  |     TaskPool *local_pool = local_task->pool; | 
					
						
							|  |  |  |     local_task->run(local_pool, local_task->taskdata, thread_id); | 
					
						
							|  |  |  |     task_free(local_pool, local_task, thread_id); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   BLI_assert(!tls->do_delayed_push); | 
					
						
							| 
									
										
										
										
											2017-03-06 15:40:05 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | static void *task_scheduler_thread_run(void *thread_p) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskThread *thread = (TaskThread *)thread_p; | 
					
						
							|  |  |  |   TaskThreadLocalStorage *tls = &thread->tls; | 
					
						
							|  |  |  |   TaskScheduler *scheduler = thread->scheduler; | 
					
						
							|  |  |  |   int thread_id = thread->id; | 
					
						
							|  |  |  |   Task *task; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pthread_setspecific(scheduler->tls_id_key, thread); | 
					
						
							| 
									
										
										
										
											2017-03-06 11:21:50 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-25 17:18:17 -06:00
										 |  |  |   /* signal the main thread when all threads have started */ | 
					
						
							|  |  |  |   BLI_mutex_lock(&scheduler->startup_mutex); | 
					
						
							|  |  |  |   scheduler->num_thread_started++; | 
					
						
							|  |  |  |   if (scheduler->num_thread_started == scheduler->num_threads) { | 
					
						
							|  |  |  |     BLI_condition_notify_one(&scheduler->startup_cond); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   BLI_mutex_unlock(&scheduler->startup_mutex); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* keep popping off tasks */ | 
					
						
							|  |  |  |   while (task_scheduler_thread_wait_pop(scheduler, &task)) { | 
					
						
							|  |  |  |     TaskPool *pool = task->pool; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     /* run task */ | 
					
						
							|  |  |  |     BLI_assert(!tls->do_delayed_push); | 
					
						
							|  |  |  |     task->run(pool, task->taskdata, thread_id); | 
					
						
							|  |  |  |     BLI_assert(!tls->do_delayed_push); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     /* delete task */ | 
					
						
							|  |  |  |     task_free(pool, task, thread_id); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     /* Handle all tasks from local queue. */ | 
					
						
							|  |  |  |     handle_local_queue(tls, thread_id); | 
					
						
							| 
									
										
										
										
											2017-03-06 15:40:05 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     /* notify pool task was done */ | 
					
						
							|  |  |  |     task_pool_num_decrease(pool, 1); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return NULL; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | TaskScheduler *BLI_task_scheduler_create(int num_threads) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskScheduler *scheduler = MEM_callocN(sizeof(TaskScheduler), "TaskScheduler"); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* multiple places can use this task scheduler, sharing the same
 | 
					
						
							|  |  |  |    * threads, so we keep track of the number of users. */ | 
					
						
							|  |  |  |   scheduler->do_exit = false; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_listbase_clear(&scheduler->queue); | 
					
						
							|  |  |  |   BLI_mutex_init(&scheduler->queue_mutex); | 
					
						
							|  |  |  |   BLI_condition_init(&scheduler->queue_cond); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-25 17:18:17 -06:00
										 |  |  |   BLI_mutex_init(&scheduler->startup_mutex); | 
					
						
							|  |  |  |   BLI_condition_init(&scheduler->startup_cond); | 
					
						
							|  |  |  |   scheduler->num_thread_started = 0; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (num_threads == 0) { | 
					
						
							|  |  |  |     /* automatic number of threads will be main thread + num cores */ | 
					
						
							|  |  |  |     num_threads = BLI_system_thread_count(); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* main thread will also work, so we count it too */ | 
					
						
							|  |  |  |   num_threads -= 1; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* Add background-only thread if needed. */ | 
					
						
							|  |  |  |   if (num_threads == 0) { | 
					
						
							|  |  |  |     scheduler->background_thread_only = true; | 
					
						
							|  |  |  |     num_threads = 1; | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2015-11-02 16:57:48 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   scheduler->task_threads = MEM_mallocN(sizeof(TaskThread) * (num_threads + 1), | 
					
						
							|  |  |  |                                         "TaskScheduler task threads"); | 
					
						
							| 
									
										
										
										
											2017-03-06 11:12:07 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* Initialize TLS for main thread. */ | 
					
						
							|  |  |  |   initialize_task_tls(&scheduler->task_threads[0].tls); | 
					
						
							| 
									
										
										
										
											2017-04-12 18:18:33 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pthread_key_create(&scheduler->tls_id_key, NULL); | 
					
						
							| 
									
										
										
										
											2017-03-06 11:21:50 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* launch threads that will be waiting for work */ | 
					
						
							|  |  |  |   if (num_threads > 0) { | 
					
						
							|  |  |  |     int i; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     scheduler->num_threads = num_threads; | 
					
						
							|  |  |  |     scheduler->threads = MEM_callocN(sizeof(pthread_t) * num_threads, "TaskScheduler threads"); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     for (i = 0; i < num_threads; i++) { | 
					
						
							|  |  |  |       TaskThread *thread = &scheduler->task_threads[i + 1]; | 
					
						
							|  |  |  |       thread->scheduler = scheduler; | 
					
						
							|  |  |  |       thread->id = i + 1; | 
					
						
							|  |  |  |       initialize_task_tls(&thread->tls); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       if (pthread_create(&scheduler->threads[i], NULL, task_scheduler_thread_run, thread) != 0) { | 
					
						
							|  |  |  |         fprintf(stderr, "TaskScheduler failed to launch thread %d/%d\n", i, num_threads); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2015-11-02 16:52:19 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-05-25 17:18:17 -06:00
										 |  |  |   /* Wait for all worker threads to start before returning to caller to prevent the case where
 | 
					
						
							|  |  |  |    * threads are still starting and pthread_join is called, which causes a deadlock on pthreads4w. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   BLI_mutex_lock(&scheduler->startup_mutex); | 
					
						
							|  |  |  |   /* NOTE: Use loop here to avoid false-positive everything-is-ready caused by spontaneous thread
 | 
					
						
							|  |  |  |    * wake up. */ | 
					
						
							|  |  |  |   while (scheduler->num_thread_started != num_threads) { | 
					
						
							|  |  |  |     BLI_condition_wait(&scheduler->startup_cond, &scheduler->startup_mutex); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   BLI_mutex_unlock(&scheduler->startup_mutex); | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return scheduler; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void BLI_task_scheduler_free(TaskScheduler *scheduler) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   Task *task; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* stop all waiting threads */ | 
					
						
							|  |  |  |   BLI_mutex_lock(&scheduler->queue_mutex); | 
					
						
							|  |  |  |   scheduler->do_exit = true; | 
					
						
							|  |  |  |   BLI_condition_notify_all(&scheduler->queue_cond); | 
					
						
							|  |  |  |   BLI_mutex_unlock(&scheduler->queue_mutex); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   pthread_key_delete(scheduler->tls_id_key); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* delete threads */ | 
					
						
							|  |  |  |   if (scheduler->threads) { | 
					
						
							|  |  |  |     int i; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     for (i = 0; i < scheduler->num_threads; i++) { | 
					
						
							|  |  |  |       if (pthread_join(scheduler->threads[i], NULL) != 0) { | 
					
						
							|  |  |  |         fprintf(stderr, "TaskScheduler failed to join thread %d/%d\n", i, scheduler->num_threads); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     MEM_freeN(scheduler->threads); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* Delete task thread data */ | 
					
						
							|  |  |  |   if (scheduler->task_threads) { | 
					
						
							|  |  |  |     for (int i = 0; i < scheduler->num_threads + 1; ++i) { | 
					
						
							|  |  |  |       TaskThreadLocalStorage *tls = &scheduler->task_threads[i].tls; | 
					
						
							|  |  |  |       free_task_tls(tls); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     MEM_freeN(scheduler->task_threads); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* delete leftover tasks */ | 
					
						
							|  |  |  |   for (task = scheduler->queue.first; task; task = task->next) { | 
					
						
							|  |  |  |     task_data_free(task, 0); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   BLI_freelistN(&scheduler->queue); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* delete mutex/condition */ | 
					
						
							|  |  |  |   BLI_mutex_end(&scheduler->queue_mutex); | 
					
						
							|  |  |  |   BLI_condition_end(&scheduler->queue_cond); | 
					
						
							| 
									
										
										
										
											2019-05-25 17:18:17 -06:00
										 |  |  |   BLI_mutex_end(&scheduler->startup_mutex); | 
					
						
							|  |  |  |   BLI_condition_end(&scheduler->startup_cond); | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  |   MEM_freeN(scheduler); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | int BLI_task_scheduler_num_threads(TaskScheduler *scheduler) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return scheduler->num_threads + 1; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | static void task_scheduler_push(TaskScheduler *scheduler, Task *task, TaskPriority priority) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   task_pool_num_increase(task->pool, 1); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* add task to queue */ | 
					
						
							|  |  |  |   BLI_mutex_lock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (priority == TASK_PRIORITY_HIGH) { | 
					
						
							|  |  |  |     BLI_addhead(&scheduler->queue, task); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else { | 
					
						
							|  |  |  |     BLI_addtail(&scheduler->queue, task); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_condition_notify_one(&scheduler->queue_cond); | 
					
						
							|  |  |  |   BLI_mutex_unlock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | static void task_scheduler_push_all(TaskScheduler *scheduler, | 
					
						
							|  |  |  |                                     TaskPool *pool, | 
					
						
							|  |  |  |                                     Task **tasks, | 
					
						
							|  |  |  |                                     int num_tasks) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (num_tasks == 0) { | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   task_pool_num_increase(pool, num_tasks); | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_lock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   for (int i = 0; i < num_tasks; i++) { | 
					
						
							|  |  |  |     BLI_addhead(&scheduler->queue, tasks[i]); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_condition_notify_all(&scheduler->queue_cond); | 
					
						
							|  |  |  |   BLI_mutex_unlock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   Task *task, *nexttask; | 
					
						
							|  |  |  |   size_t done = 0; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_lock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* free all tasks from this pool from the queue */ | 
					
						
							|  |  |  |   for (task = scheduler->queue.first; task; task = nexttask) { | 
					
						
							|  |  |  |     nexttask = task->next; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     if (task->pool == pool) { | 
					
						
							|  |  |  |       task_data_free(task, pool->thread_id); | 
					
						
							|  |  |  |       BLI_freelinkN(&scheduler->queue, task); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       done++; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_unlock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* notify done */ | 
					
						
							|  |  |  |   task_pool_num_decrease(pool, done); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* Task Pool */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-03-07 17:29:39 +01:00
										 |  |  | static TaskPool *task_pool_create_ex(TaskScheduler *scheduler, | 
					
						
							|  |  |  |                                      void *userdata, | 
					
						
							|  |  |  |                                      const bool is_background, | 
					
						
							|  |  |  |                                      const bool is_suspended) | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskPool *pool = MEM_mallocN(sizeof(TaskPool), "TaskPool"); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-11-02 16:57:48 +01:00
										 |  |  | #ifndef NDEBUG
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* Assert we do not try to create a background pool from some parent task -
 | 
					
						
							|  |  |  |    * those only work OK from main thread. */ | 
					
						
							|  |  |  |   if (is_background) { | 
					
						
							|  |  |  |     const pthread_t thread_id = pthread_self(); | 
					
						
							|  |  |  |     int i = scheduler->num_threads; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     while (i--) { | 
					
						
							|  |  |  |       BLI_assert(!pthread_equal(scheduler->threads[i], thread_id)); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2015-11-02 16:57:48 +01:00
										 |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pool->scheduler = scheduler; | 
					
						
							|  |  |  |   pool->num = 0; | 
					
						
							|  |  |  |   pool->do_cancel = false; | 
					
						
							|  |  |  |   pool->do_work = false; | 
					
						
							|  |  |  |   pool->is_suspended = is_suspended; | 
					
						
							|  |  |  |   pool->start_suspended = is_suspended; | 
					
						
							|  |  |  |   pool->num_suspended = 0; | 
					
						
							|  |  |  |   pool->suspended_queue.first = pool->suspended_queue.last = NULL; | 
					
						
							|  |  |  |   pool->run_in_background = is_background; | 
					
						
							|  |  |  |   pool->use_local_tls = false; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_mutex_init(&pool->num_mutex); | 
					
						
							|  |  |  |   BLI_condition_init(&pool->num_cond); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   pool->userdata = userdata; | 
					
						
							|  |  |  |   BLI_mutex_init(&pool->user_mutex); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (BLI_thread_is_main()) { | 
					
						
							|  |  |  |     pool->thread_id = 0; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else { | 
					
						
							|  |  |  |     TaskThread *thread = pthread_getspecific(scheduler->tls_id_key); | 
					
						
							|  |  |  |     if (thread == NULL) { | 
					
						
							|  |  |  |       /* NOTE: Task pool is created from non-main thread which is not
 | 
					
						
							|  |  |  |        * managed by the task scheduler. We identify ourselves as thread ID | 
					
						
							|  |  |  |        * 0 but we do not use scheduler's TLS storage and use our own | 
					
						
							|  |  |  |        * instead to avoid any possible threading conflicts. | 
					
						
							|  |  |  |        */ | 
					
						
							|  |  |  |       pool->thread_id = 0; | 
					
						
							|  |  |  |       pool->use_local_tls = true; | 
					
						
							| 
									
										
										
										
											2017-04-13 13:32:39 +02:00
										 |  |  | #ifndef NDEBUG
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       pool->creator_thread_id = pthread_self(); | 
					
						
							| 
									
										
										
										
											2017-04-13 13:32:39 +02:00
										 |  |  | #endif
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       initialize_task_tls(&pool->local_tls); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     else { | 
					
						
							|  |  |  |       pool->thread_id = thread->id; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | 
 | 
					
						
							|  |  |  | #ifdef DEBUG_STATS
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pool->mempool_stats = MEM_callocN(sizeof(*pool->mempool_stats) * (scheduler->num_threads + 1), | 
					
						
							|  |  |  |                                     "per-taskpool mempool stats"); | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* Ensure malloc will go fine from threads,
 | 
					
						
							|  |  |  |    * | 
					
						
							|  |  |  |    * This is needed because we could be in main thread here | 
					
						
							|  |  |  |    * and malloc could be non-thread safe at this point because | 
					
						
							|  |  |  |    * no other jobs are running. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   BLI_threaded_malloc_begin(); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return pool; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-11-02 16:57:48 +01:00
										 |  |  | /**
 | 
					
						
							|  |  |  |  * Create a normal task pool. | 
					
						
							|  |  |  |  * This means that in single-threaded context, it will not be executed at all until you call | 
					
						
							|  |  |  |  * \a BLI_task_pool_work_and_wait() on it. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return task_pool_create_ex(scheduler, userdata, false, false); | 
					
						
							| 
									
										
										
										
											2015-11-02 16:57:48 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * Create a background task pool. | 
					
						
							| 
									
										
										
										
											2019-04-22 00:54:27 +10:00
										 |  |  |  * In multi-threaded context, there is no differences with #BLI_task_pool_create(), | 
					
						
							|  |  |  |  * but in single-threaded case it is ensured to have at least one worker thread to run on | 
					
						
							|  |  |  |  * (i.e. you don't have to call #BLI_task_pool_work_and_wait | 
					
						
							|  |  |  |  * on it to be sure it will be processed). | 
					
						
							| 
									
										
										
										
											2015-11-02 16:57:48 +01:00
										 |  |  |  * | 
					
						
							| 
									
										
										
										
											2019-04-22 00:54:27 +10:00
										 |  |  |  * \note Background pools are non-recursive | 
					
						
							|  |  |  |  * (that is, you should not create other background pools in tasks assigned to a background pool, | 
					
						
							|  |  |  |  * they could end never being executed, since the 'fallback' background thread is already | 
					
						
							|  |  |  |  * busy with parent task in single-threaded context). | 
					
						
							| 
									
										
										
										
											2015-11-02 16:57:48 +01:00
										 |  |  |  */ | 
					
						
							|  |  |  | TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler, void *userdata) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return task_pool_create_ex(scheduler, userdata, true, false); | 
					
						
							| 
									
										
										
										
											2017-03-07 17:29:39 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * Similar to BLI_task_pool_create() but does not schedule any tasks for execution | 
					
						
							| 
									
										
										
										
											2019-03-08 17:48:49 +11:00
										 |  |  |  * for until BLI_task_pool_work_and_wait() is called. This helps reducing threading | 
					
						
							| 
									
										
										
										
											2017-03-07 17:29:39 +01:00
										 |  |  |  * overhead when pushing huge amount of small initial tasks from the main thread. | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler, void *userdata) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return task_pool_create_ex(scheduler, userdata, false, true); | 
					
						
							| 
									
										
										
										
											2015-11-02 16:57:48 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | void BLI_task_pool_free(TaskPool *pool) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_task_pool_cancel(pool); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_end(&pool->num_mutex); | 
					
						
							|  |  |  |   BLI_condition_end(&pool->num_cond); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_end(&pool->user_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #ifdef DEBUG_STATS
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   printf("Thread ID    Allocated   Reused   Discarded\n"); | 
					
						
							|  |  |  |   for (int i = 0; i < pool->scheduler->num_threads + 1; ++i) { | 
					
						
							|  |  |  |     printf("%02d           %05d       %05d    %05d\n", | 
					
						
							|  |  |  |            i, | 
					
						
							|  |  |  |            pool->mempool_stats[i].num_alloc, | 
					
						
							|  |  |  |            pool->mempool_stats[i].num_reuse, | 
					
						
							|  |  |  |            pool->mempool_stats[i].num_discard); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   MEM_freeN(pool->mempool_stats); | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | #endif
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (pool->use_local_tls) { | 
					
						
							|  |  |  |     free_task_tls(&pool->local_tls); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2017-04-12 18:18:33 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   MEM_freeN(pool); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_threaded_malloc_end(); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | BLI_INLINE bool task_can_use_local_queues(TaskPool *pool, int thread_id) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return (thread_id != -1 && (thread_id != pool->thread_id || pool->do_work)); | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | static void task_pool_push(TaskPool *pool, | 
					
						
							|  |  |  |                            TaskRunFunction run, | 
					
						
							|  |  |  |                            void *taskdata, | 
					
						
							|  |  |  |                            bool free_taskdata, | 
					
						
							|  |  |  |                            TaskFreeFunction freedata, | 
					
						
							|  |  |  |                            TaskPriority priority, | 
					
						
							|  |  |  |                            int thread_id) | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* Allocate task and fill it's properties. */ | 
					
						
							|  |  |  |   Task *task = task_alloc(pool, thread_id); | 
					
						
							|  |  |  |   task->run = run; | 
					
						
							|  |  |  |   task->taskdata = taskdata; | 
					
						
							|  |  |  |   task->free_taskdata = free_taskdata; | 
					
						
							|  |  |  |   task->freedata = freedata; | 
					
						
							|  |  |  |   task->pool = pool; | 
					
						
							|  |  |  |   /* For suspended pools we put everything yo a global queue first
 | 
					
						
							|  |  |  |    * and exit as soon as possible. | 
					
						
							|  |  |  |    * | 
					
						
							|  |  |  |    * This tasks will be moved to actual execution when pool is | 
					
						
							|  |  |  |    * activated by work_and_wait(). | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   if (pool->is_suspended) { | 
					
						
							|  |  |  |     BLI_addhead(&pool->suspended_queue, task); | 
					
						
							|  |  |  |     atomic_fetch_and_add_z(&pool->num_suspended, 1); | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   /* Populate to any local queue first, this is cheapest push ever. */ | 
					
						
							|  |  |  |   if (task_can_use_local_queues(pool, thread_id)) { | 
					
						
							|  |  |  |     ASSERT_THREAD_ID(pool->scheduler, thread_id); | 
					
						
							|  |  |  |     TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id); | 
					
						
							|  |  |  |     /* Try to push to a local execution queue.
 | 
					
						
							|  |  |  |      * These tasks will be picked up next. | 
					
						
							|  |  |  |      */ | 
					
						
							|  |  |  |     if (tls->num_local_queue < LOCAL_QUEUE_SIZE) { | 
					
						
							|  |  |  |       tls->local_queue[tls->num_local_queue] = task; | 
					
						
							|  |  |  |       tls->num_local_queue++; | 
					
						
							|  |  |  |       return; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     /* If we are in the delayed tasks push mode, we push tasks to a
 | 
					
						
							|  |  |  |      * temporary local queue first without any locks, and then move them | 
					
						
							|  |  |  |      * to global execution queue with a single lock. | 
					
						
							|  |  |  |      */ | 
					
						
							|  |  |  |     if (tls->do_delayed_push && tls->num_delayed_queue < DELAYED_QUEUE_SIZE) { | 
					
						
							|  |  |  |       tls->delayed_queue[tls->num_delayed_queue] = task; | 
					
						
							|  |  |  |       tls->num_delayed_queue++; | 
					
						
							|  |  |  |       return; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   /* Do push to a global execution pool, slowest possible method,
 | 
					
						
							|  |  |  |    * causes quite reasonable amount of threading overhead. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   task_scheduler_push(pool->scheduler, task, priority); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | void BLI_task_pool_push_ex(TaskPool *pool, | 
					
						
							|  |  |  |                            TaskRunFunction run, | 
					
						
							|  |  |  |                            void *taskdata, | 
					
						
							|  |  |  |                            bool free_taskdata, | 
					
						
							|  |  |  |                            TaskFreeFunction freedata, | 
					
						
							|  |  |  |                            TaskPriority priority) | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   task_pool_push(pool, run, taskdata, free_taskdata, freedata, priority, -1); | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-11-02 16:52:19 +01:00
										 |  |  | void BLI_task_pool_push( | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskPriority priority) | 
					
						
							| 
									
										
										
										
											2015-11-02 16:52:19 +01:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_task_pool_push_ex(pool, run, taskdata, free_taskdata, NULL, priority); | 
					
						
							| 
									
										
										
										
											2015-11-02 16:52:19 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | void BLI_task_pool_push_from_thread(TaskPool *pool, | 
					
						
							|  |  |  |                                     TaskRunFunction run, | 
					
						
							|  |  |  |                                     void *taskdata, | 
					
						
							|  |  |  |                                     bool free_taskdata, | 
					
						
							|  |  |  |                                     TaskPriority priority, | 
					
						
							|  |  |  |                                     int thread_id) | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   task_pool_push(pool, run, taskdata, free_taskdata, NULL, priority, thread_id); | 
					
						
							| 
									
										
										
										
											2016-05-10 09:55:58 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | void BLI_task_pool_work_and_wait(TaskPool *pool) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskThreadLocalStorage *tls = get_task_tls(pool, pool->thread_id); | 
					
						
							|  |  |  |   TaskScheduler *scheduler = pool->scheduler; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (atomic_fetch_and_and_uint8((uint8_t *)&pool->is_suspended, 0)) { | 
					
						
							|  |  |  |     if (pool->num_suspended) { | 
					
						
							|  |  |  |       task_pool_num_increase(pool, pool->num_suspended); | 
					
						
							|  |  |  |       BLI_mutex_lock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2017-03-07 17:29:39 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       BLI_movelisttolist(&scheduler->queue, &pool->suspended_queue); | 
					
						
							| 
									
										
										
										
											2017-03-07 17:29:39 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       BLI_condition_notify_all(&scheduler->queue_cond); | 
					
						
							|  |  |  |       BLI_mutex_unlock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2018-12-03 22:55:18 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       pool->num_suspended = 0; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2017-03-07 17:29:39 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pool->do_work = true; | 
					
						
							| 
									
										
										
										
											2017-03-06 15:40:05 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   ASSERT_THREAD_ID(pool->scheduler, pool->thread_id); | 
					
						
							| 
									
										
										
										
											2017-03-06 11:21:50 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   handle_local_queue(tls, pool->thread_id); | 
					
						
							| 
									
										
										
										
											2018-12-03 22:55:18 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_lock(&pool->num_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   while (pool->num != 0) { | 
					
						
							|  |  |  |     Task *task, *work_task = NULL; | 
					
						
							|  |  |  |     bool found_task = false; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     BLI_mutex_unlock(&pool->num_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     BLI_mutex_lock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     /* find task from this pool. if we get a task from another pool,
 | 
					
						
							|  |  |  |      * we can get into deadlock */ | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     for (task = scheduler->queue.first; task; task = task->next) { | 
					
						
							|  |  |  |       if (task->pool == pool) { | 
					
						
							|  |  |  |         work_task = task; | 
					
						
							|  |  |  |         found_task = true; | 
					
						
							|  |  |  |         BLI_remlink(&scheduler->queue, task); | 
					
						
							|  |  |  |         break; | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     BLI_mutex_unlock(&scheduler->queue_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     /* if found task, do it, otherwise wait until other tasks are done */ | 
					
						
							|  |  |  |     if (found_task) { | 
					
						
							|  |  |  |       /* run task */ | 
					
						
							|  |  |  |       BLI_assert(!tls->do_delayed_push); | 
					
						
							|  |  |  |       work_task->run(pool, work_task->taskdata, pool->thread_id); | 
					
						
							|  |  |  |       BLI_assert(!tls->do_delayed_push); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       /* delete task */ | 
					
						
							|  |  |  |       task_free(pool, task, pool->thread_id); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       /* Handle all tasks from local queue. */ | 
					
						
							|  |  |  |       handle_local_queue(tls, pool->thread_id); | 
					
						
							| 
									
										
										
										
											2017-03-06 15:40:05 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |       /* notify pool task was done */ | 
					
						
							|  |  |  |       task_pool_num_decrease(pool, 1); | 
					
						
							|  |  |  |     } | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     BLI_mutex_lock(&pool->num_mutex); | 
					
						
							|  |  |  |     if (pool->num == 0) { | 
					
						
							|  |  |  |       break; | 
					
						
							|  |  |  |     } | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |     if (!found_task) { | 
					
						
							|  |  |  |       BLI_condition_wait(&pool->num_cond, &pool->num_mutex); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_mutex_unlock(&pool->num_mutex); | 
					
						
							| 
									
										
										
										
											2017-03-07 17:29:39 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_assert(tls->num_local_queue == 0); | 
					
						
							| 
									
										
										
										
											2018-12-03 22:55:18 +03:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void BLI_task_pool_work_wait_and_reset(TaskPool *pool) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   BLI_task_pool_work_and_wait(pool); | 
					
						
							| 
									
										
										
										
											2018-12-03 22:55:18 +03:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pool->do_work = false; | 
					
						
							|  |  |  |   pool->is_suspended = pool->start_suspended; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void BLI_task_pool_cancel(TaskPool *pool) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pool->do_cancel = true; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   task_scheduler_clear(pool->scheduler, pool); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   /* wait until all entries are cleared */ | 
					
						
							|  |  |  |   BLI_mutex_lock(&pool->num_mutex); | 
					
						
							|  |  |  |   while (pool->num) { | 
					
						
							|  |  |  |     BLI_condition_wait(&pool->num_cond, &pool->num_mutex); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   BLI_mutex_unlock(&pool->num_mutex); | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   pool->do_cancel = false; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2013-10-26 01:06:19 +00:00
										 |  |  | bool BLI_task_pool_canceled(TaskPool *pool) | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return pool->do_cancel; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void *BLI_task_pool_userdata(TaskPool *pool) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return pool->userdata; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | ThreadMutex *BLI_task_pool_user_mutex(TaskPool *pool) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return &pool->user_mutex; | 
					
						
							| 
									
										
										
										
											2013-10-12 14:08:59 +00:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | void BLI_task_pool_delayed_push_begin(TaskPool *pool, int thread_id) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (task_can_use_local_queues(pool, thread_id)) { | 
					
						
							|  |  |  |     ASSERT_THREAD_ID(pool->scheduler, thread_id); | 
					
						
							|  |  |  |     TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id); | 
					
						
							|  |  |  |     tls->do_delayed_push = true; | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | void BLI_task_pool_delayed_push_end(TaskPool *pool, int thread_id) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (task_can_use_local_queues(pool, thread_id)) { | 
					
						
							|  |  |  |     ASSERT_THREAD_ID(pool->scheduler, thread_id); | 
					
						
							|  |  |  |     TaskThreadLocalStorage *tls = get_task_tls(pool, thread_id); | 
					
						
							|  |  |  |     BLI_assert(tls->do_delayed_push); | 
					
						
							|  |  |  |     task_scheduler_push_all(pool->scheduler, pool, tls->delayed_queue, tls->num_delayed_queue); | 
					
						
							|  |  |  |     tls->do_delayed_push = false; | 
					
						
							|  |  |  |     tls->num_delayed_queue = 0; | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2017-05-31 15:24:09 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | /* Parallel range routines */ | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * | 
					
						
							|  |  |  |  * Main functions: | 
					
						
							|  |  |  |  * - #BLI_task_parallel_range | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  |  * - #BLI_task_parallel_listbase (#ListBase - double linked list) | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  |  * | 
					
						
							|  |  |  |  * TODO: | 
					
						
							|  |  |  |  * - #BLI_task_parallel_foreach_link (#Link - single linked list) | 
					
						
							|  |  |  |  * - #BLI_task_parallel_foreach_ghash/gset (#GHash/#GSet - hash & set) | 
					
						
							|  |  |  |  * - #BLI_task_parallel_foreach_mempool (#BLI_mempool - iterate over mempools) | 
					
						
							|  |  |  |  */ | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-11-25 11:01:59 +01:00
										 |  |  | /* Allows to avoid using malloc for userdata_chunk in tasks, when small enough. */ | 
					
						
							|  |  |  | #define MALLOCA(_size) ((_size) <= 8192) ? alloca((_size)) : MEM_mallocN((_size), __func__)
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | #define MALLOCA_FREE(_mem, _size) \
 | 
					
						
							|  |  |  |   if (((_mem) != NULL) && ((_size) > 8192)) \ | 
					
						
							|  |  |  |   MEM_freeN((_mem)) | 
					
						
							| 
									
										
										
										
											2015-11-25 11:01:59 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | typedef struct ParallelRangeState { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   int start, stop; | 
					
						
							|  |  |  |   void *userdata; | 
					
						
							| 
									
										
										
										
											2016-01-16 15:59:37 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskParallelRangeFunc func; | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   int iter; | 
					
						
							|  |  |  |   int chunk_size; | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | } ParallelRangeState; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | BLI_INLINE bool parallel_range_next_iter_get(ParallelRangeState *__restrict state, | 
					
						
							|  |  |  |                                              int *__restrict iter, | 
					
						
							|  |  |  |                                              int *__restrict count) | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   int previter = atomic_fetch_and_add_int32(&state->iter, state->chunk_size); | 
					
						
							| 
									
										
										
										
											2016-05-16 15:57:19 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   *iter = previter; | 
					
						
							|  |  |  |   *count = max_ii(0, min_ii(state->chunk_size, state->stop - previter)); | 
					
						
							| 
									
										
										
										
											2016-05-16 15:57:19 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   return (previter < state->stop); | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | static void parallel_range_func(TaskPool *__restrict pool, void *userdata_chunk, int thread_id) | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   ParallelRangeState *__restrict state = BLI_task_pool_userdata(pool); | 
					
						
							|  |  |  |   ParallelRangeTLS tls = { | 
					
						
							|  |  |  |       .thread_id = thread_id, | 
					
						
							|  |  |  |       .userdata_chunk = userdata_chunk, | 
					
						
							|  |  |  |   }; | 
					
						
							|  |  |  |   int iter, count; | 
					
						
							|  |  |  |   while (parallel_range_next_iter_get(state, &iter, &count)) { | 
					
						
							|  |  |  |     for (int i = 0; i < count; ++i) { | 
					
						
							|  |  |  |       state->func(state->userdata, iter + i, &tls); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | static void parallel_range_single_thread(const int start, | 
					
						
							|  |  |  |                                          int const stop, | 
					
						
							| 
									
										
										
										
											2018-01-08 12:07:09 +01:00
										 |  |  |                                          void *userdata, | 
					
						
							|  |  |  |                                          TaskParallelRangeFunc func, | 
					
						
							|  |  |  |                                          const ParallelRangeSettings *settings) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   void *userdata_chunk = settings->userdata_chunk; | 
					
						
							|  |  |  |   const size_t userdata_chunk_size = settings->userdata_chunk_size; | 
					
						
							|  |  |  |   void *userdata_chunk_local = NULL; | 
					
						
							|  |  |  |   const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL); | 
					
						
							|  |  |  |   if (use_userdata_chunk) { | 
					
						
							|  |  |  |     userdata_chunk_local = MALLOCA(userdata_chunk_size); | 
					
						
							|  |  |  |     memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   ParallelRangeTLS tls = { | 
					
						
							|  |  |  |       .thread_id = 0, | 
					
						
							|  |  |  |       .userdata_chunk = userdata_chunk_local, | 
					
						
							|  |  |  |   }; | 
					
						
							|  |  |  |   for (int i = start; i < stop; ++i) { | 
					
						
							|  |  |  |     func(userdata, i, &tls); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   if (settings->func_finalize != NULL) { | 
					
						
							|  |  |  |     settings->func_finalize(userdata, userdata_chunk_local); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   MALLOCA_FREE(userdata_chunk_local, userdata_chunk_size); | 
					
						
							| 
									
										
										
										
											2018-01-08 12:07:09 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-11-25 11:01:59 +01:00
										 |  |  | /**
 | 
					
						
							| 
									
										
										
										
											2019-04-22 00:54:27 +10:00
										 |  |  |  * This function allows to parallelized for loops in a similar way to OpenMP's | 
					
						
							|  |  |  |  * 'parallel for' statement. | 
					
						
							| 
									
										
										
										
											2015-11-25 11:01:59 +01:00
										 |  |  |  * | 
					
						
							| 
									
										
										
										
											2018-01-08 12:07:09 +01:00
										 |  |  |  * See public API doc of ParallelRangeSettings for description of all settings. | 
					
						
							| 
									
										
										
										
											2015-11-25 11:01:59 +01:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | void BLI_task_parallel_range(const int start, | 
					
						
							|  |  |  |                              const int stop, | 
					
						
							| 
									
										
										
										
											2018-01-08 12:07:09 +01:00
										 |  |  |                              void *userdata, | 
					
						
							|  |  |  |                              TaskParallelRangeFunc func, | 
					
						
							|  |  |  |                              const ParallelRangeSettings *settings) | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskScheduler *task_scheduler; | 
					
						
							|  |  |  |   TaskPool *task_pool; | 
					
						
							|  |  |  |   ParallelRangeState state; | 
					
						
							|  |  |  |   int i, num_threads, num_tasks; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   void *userdata_chunk = settings->userdata_chunk; | 
					
						
							|  |  |  |   const size_t userdata_chunk_size = settings->userdata_chunk_size; | 
					
						
							|  |  |  |   void *userdata_chunk_local = NULL; | 
					
						
							|  |  |  |   void *userdata_chunk_array = NULL; | 
					
						
							|  |  |  |   const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (start == stop) { | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_assert(start < stop); | 
					
						
							|  |  |  |   if (userdata_chunk_size != 0) { | 
					
						
							|  |  |  |     BLI_assert(userdata_chunk != NULL); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* If it's not enough data to be crunched, don't bother with tasks at all,
 | 
					
						
							|  |  |  |    * do everything from the main thread. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   if (!settings->use_threading) { | 
					
						
							|  |  |  |     parallel_range_single_thread(start, stop, userdata, func, settings); | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   task_scheduler = BLI_task_scheduler_get(); | 
					
						
							|  |  |  |   num_threads = BLI_task_scheduler_num_threads(task_scheduler); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* The idea here is to prevent creating task for each of the loop iterations
 | 
					
						
							|  |  |  |    * and instead have tasks which are evenly distributed across CPU cores and | 
					
						
							|  |  |  |    * pull next iter to be crunched using the queue. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   num_tasks = num_threads + 2; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   state.start = start; | 
					
						
							|  |  |  |   state.stop = stop; | 
					
						
							|  |  |  |   state.userdata = userdata; | 
					
						
							|  |  |  |   state.func = func; | 
					
						
							|  |  |  |   state.iter = start; | 
					
						
							|  |  |  |   switch (settings->scheduling_mode) { | 
					
						
							|  |  |  |     case TASK_SCHEDULING_STATIC: | 
					
						
							|  |  |  |       state.chunk_size = max_ii(settings->min_iter_per_thread, (stop - start) / (num_tasks)); | 
					
						
							|  |  |  |       break; | 
					
						
							|  |  |  |     case TASK_SCHEDULING_DYNAMIC: | 
					
						
							|  |  |  |       /* TODO(sergey): Make it configurable from min_iter_per_thread. */ | 
					
						
							|  |  |  |       state.chunk_size = 32; | 
					
						
							|  |  |  |       break; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   num_tasks = min_ii(num_tasks, max_ii(1, (stop - start) / state.chunk_size)); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (num_tasks == 1) { | 
					
						
							|  |  |  |     parallel_range_single_thread(start, stop, userdata, func, settings); | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   task_pool = BLI_task_pool_create_suspended(task_scheduler, &state); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* NOTE: This way we are adding a memory barrier and ensure all worker
 | 
					
						
							|  |  |  |    * threads can read and modify the value, without any locks. */ | 
					
						
							|  |  |  |   atomic_fetch_and_add_int32(&state.iter, 0); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (use_userdata_chunk) { | 
					
						
							|  |  |  |     userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   for (i = 0; i < num_tasks; i++) { | 
					
						
							|  |  |  |     if (use_userdata_chunk) { | 
					
						
							|  |  |  |       userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i); | 
					
						
							|  |  |  |       memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     /* Use this pool's pre-allocated tasks. */ | 
					
						
							|  |  |  |     BLI_task_pool_push_from_thread(task_pool, | 
					
						
							|  |  |  |                                    parallel_range_func, | 
					
						
							|  |  |  |                                    userdata_chunk_local, | 
					
						
							|  |  |  |                                    false, | 
					
						
							|  |  |  |                                    TASK_PRIORITY_HIGH, | 
					
						
							|  |  |  |                                    task_pool->thread_id); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_task_pool_work_and_wait(task_pool); | 
					
						
							|  |  |  |   BLI_task_pool_free(task_pool); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (use_userdata_chunk) { | 
					
						
							|  |  |  |     if (settings->func_finalize != NULL) { | 
					
						
							|  |  |  |       for (i = 0; i < num_tasks; i++) { | 
					
						
							|  |  |  |         userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i); | 
					
						
							|  |  |  |         settings->func_finalize(userdata, userdata_chunk_local); | 
					
						
							|  |  |  |       } | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     MALLOCA_FREE(userdata_chunk_array, userdata_chunk_size * num_tasks); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2014-10-22 11:56:52 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2015-11-25 11:01:59 +01:00
										 |  |  | #undef MALLOCA
 | 
					
						
							|  |  |  | #undef MALLOCA_FREE
 | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | typedef struct ParallelListbaseState { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   void *userdata; | 
					
						
							|  |  |  |   TaskParallelListbaseFunc func; | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   int chunk_size; | 
					
						
							|  |  |  |   int index; | 
					
						
							|  |  |  |   Link *link; | 
					
						
							|  |  |  |   SpinLock lock; | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | } ParallelListState; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | BLI_INLINE Link *parallel_listbase_next_iter_get(ParallelListState *__restrict state, | 
					
						
							|  |  |  |                                                  int *__restrict index, | 
					
						
							|  |  |  |                                                  int *__restrict count) | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   int task_count = 0; | 
					
						
							|  |  |  |   BLI_spin_lock(&state->lock); | 
					
						
							|  |  |  |   Link *result = state->link; | 
					
						
							|  |  |  |   if (LIKELY(result != NULL)) { | 
					
						
							|  |  |  |     *index = state->index; | 
					
						
							|  |  |  |     while (state->link != NULL && task_count < state->chunk_size) { | 
					
						
							|  |  |  |       ++task_count; | 
					
						
							|  |  |  |       state->link = state->link->next; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     state->index += task_count; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   BLI_spin_unlock(&state->lock); | 
					
						
							|  |  |  |   *count = task_count; | 
					
						
							|  |  |  |   return result; | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | static void parallel_listbase_func(TaskPool *__restrict pool, | 
					
						
							|  |  |  |                                    void *UNUSED(taskdata), | 
					
						
							|  |  |  |                                    int UNUSED(threadid)) | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   ParallelListState *__restrict state = BLI_task_pool_userdata(pool); | 
					
						
							|  |  |  |   Link *link; | 
					
						
							|  |  |  |   int index, count; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   while ((link = parallel_listbase_next_iter_get(state, &index, &count)) != NULL) { | 
					
						
							|  |  |  |     for (int i = 0; i < count; ++i) { | 
					
						
							|  |  |  |       state->func(state->userdata, link, index + i); | 
					
						
							|  |  |  |       link = link->next; | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | static void task_parallel_listbase_no_threads(struct ListBase *listbase, | 
					
						
							|  |  |  |                                               void *userdata, | 
					
						
							|  |  |  |                                               TaskParallelListbaseFunc func) | 
					
						
							| 
									
										
										
										
											2018-11-20 12:17:03 +01:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   int i = 0; | 
					
						
							|  |  |  |   for (Link *link = listbase->first; link != NULL; link = link->next, ++i) { | 
					
						
							|  |  |  |     func(userdata, link, i); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2018-11-20 12:17:03 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /* NOTE: The idea here is to compensate for rather measurable threading
 | 
					
						
							|  |  |  |  * overhead caused by fetching tasks. With too many CPU threads we are starting | 
					
						
							|  |  |  |  * to spend too much time in those overheads. */ | 
					
						
							|  |  |  | BLI_INLINE int task_parallel_listbasecalc_chunk_size(const int num_threads) | 
					
						
							|  |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (num_threads > 32) { | 
					
						
							|  |  |  |     return 128; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   else if (num_threads > 16) { | 
					
						
							|  |  |  |     return 64; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   return 32; | 
					
						
							| 
									
										
										
										
											2018-11-20 12:17:03 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | /**
 | 
					
						
							|  |  |  |  * This function allows to parallelize for loops over ListBase items. | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2018-12-12 12:50:58 +11:00
										 |  |  |  * \param listbase: The double linked list to loop over. | 
					
						
							|  |  |  |  * \param userdata: Common userdata passed to all instances of \a func. | 
					
						
							|  |  |  |  * \param func: Callback function. | 
					
						
							| 
									
										
										
										
											2019-04-22 00:54:27 +10:00
										 |  |  |  * \param use_threading: If \a true, actually split-execute loop in threads, | 
					
						
							|  |  |  |  * else just do a sequential forloop | 
					
						
							|  |  |  |  * (allows caller to use any kind of test to switch on parallelization or not). | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  |  * | 
					
						
							| 
									
										
										
										
											2019-04-22 00:54:27 +10:00
										 |  |  |  * \note There is no static scheduling here, | 
					
						
							|  |  |  |  * since it would need another full loop over items to count them. | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  |  */ | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | void BLI_task_parallel_listbase(struct ListBase *listbase, | 
					
						
							|  |  |  |                                 void *userdata, | 
					
						
							|  |  |  |                                 TaskParallelListbaseFunc func, | 
					
						
							|  |  |  |                                 const bool use_threading) | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   if (BLI_listbase_is_empty(listbase)) { | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   if (!use_threading) { | 
					
						
							|  |  |  |     task_parallel_listbase_no_threads(listbase, userdata, func); | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  |   TaskScheduler *task_scheduler = BLI_task_scheduler_get(); | 
					
						
							|  |  |  |   const int num_threads = BLI_task_scheduler_num_threads(task_scheduler); | 
					
						
							|  |  |  |   /* TODO(sergey): Consider making chunk size configurable. */ | 
					
						
							|  |  |  |   const int chunk_size = task_parallel_listbasecalc_chunk_size(num_threads); | 
					
						
							|  |  |  |   const int num_tasks = min_ii(num_threads, BLI_listbase_count(listbase) / chunk_size); | 
					
						
							|  |  |  |   if (num_tasks <= 1) { | 
					
						
							|  |  |  |     task_parallel_listbase_no_threads(listbase, userdata, func); | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   ParallelListState state; | 
					
						
							|  |  |  |   TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, &state); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   state.index = 0; | 
					
						
							|  |  |  |   state.link = listbase->first; | 
					
						
							|  |  |  |   state.userdata = userdata; | 
					
						
							|  |  |  |   state.func = func; | 
					
						
							|  |  |  |   state.chunk_size = chunk_size; | 
					
						
							|  |  |  |   BLI_spin_init(&state.lock); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_assert(num_tasks > 0); | 
					
						
							|  |  |  |   for (int i = 0; i < num_tasks; i++) { | 
					
						
							|  |  |  |     /* Use this pool's pre-allocated tasks. */ | 
					
						
							|  |  |  |     BLI_task_pool_push_from_thread( | 
					
						
							|  |  |  |         task_pool, parallel_listbase_func, NULL, false, TASK_PRIORITY_HIGH, task_pool->thread_id); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_task_pool_work_and_wait(task_pool); | 
					
						
							|  |  |  |   BLI_task_pool_free(task_pool); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_spin_end(&state.lock); | 
					
						
							| 
									
										
										
										
											2016-05-13 11:03:04 +02:00
										 |  |  | } | 
					
						
							| 
									
										
										
										
											2017-11-23 21:14:43 +01:00
										 |  |  | 
 | 
					
						
							|  |  |  | typedef struct ParallelMempoolState { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   void *userdata; | 
					
						
							|  |  |  |   TaskParallelMempoolFunc func; | 
					
						
							| 
									
										
										
										
											2017-11-23 21:14:43 +01:00
										 |  |  | } ParallelMempoolState; | 
					
						
							|  |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | static void parallel_mempool_func(TaskPool *__restrict pool, void *taskdata, int UNUSED(threadid)) | 
					
						
							| 
									
										
										
										
											2017-11-23 21:14:43 +01:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   ParallelMempoolState *__restrict state = BLI_task_pool_userdata(pool); | 
					
						
							|  |  |  |   BLI_mempool_iter *iter = taskdata; | 
					
						
							|  |  |  |   MempoolIterData *item; | 
					
						
							| 
									
										
										
										
											2017-11-23 21:14:43 +01:00
										 |  |  | 
 | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   while ((item = BLI_mempool_iterstep(iter)) != NULL) { | 
					
						
							|  |  |  |     state->func(state->userdata, item); | 
					
						
							|  |  |  |   } | 
					
						
							| 
									
										
										
										
											2017-11-23 21:14:43 +01:00
										 |  |  | } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  | /**
 | 
					
						
							|  |  |  |  * This function allows to parallelize for loops over Mempool items. | 
					
						
							|  |  |  |  * | 
					
						
							| 
									
										
										
										
											2018-03-14 01:58:46 +11:00
										 |  |  |  * \param mempool: The iterable BLI_mempool to loop over. | 
					
						
							|  |  |  |  * \param userdata: Common userdata passed to all instances of \a func. | 
					
						
							|  |  |  |  * \param func: Callback function. | 
					
						
							| 
									
										
										
										
											2019-04-22 00:54:27 +10:00
										 |  |  |  * \param use_threading: If \a true, actually split-execute loop in threads, | 
					
						
							|  |  |  |  * else just do a sequential for loop | 
					
						
							| 
									
										
										
										
											2018-03-14 01:58:46 +11:00
										 |  |  |  * (allows caller to use any kind of test to switch on parallelization or not). | 
					
						
							| 
									
										
										
										
											2017-11-23 21:14:43 +01:00
										 |  |  |  * | 
					
						
							|  |  |  |  * \note There is no static scheduling here. | 
					
						
							|  |  |  |  */ | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  | void BLI_task_parallel_mempool(BLI_mempool *mempool, | 
					
						
							|  |  |  |                                void *userdata, | 
					
						
							|  |  |  |                                TaskParallelMempoolFunc func, | 
					
						
							|  |  |  |                                const bool use_threading) | 
					
						
							| 
									
										
										
										
											2017-11-23 21:14:43 +01:00
										 |  |  | { | 
					
						
							| 
									
										
										
										
											2019-04-17 06:17:24 +02:00
										 |  |  |   TaskScheduler *task_scheduler; | 
					
						
							|  |  |  |   TaskPool *task_pool; | 
					
						
							|  |  |  |   ParallelMempoolState state; | 
					
						
							|  |  |  |   int i, num_threads, num_tasks; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (BLI_mempool_len(mempool) == 0) { | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   if (!use_threading) { | 
					
						
							|  |  |  |     BLI_mempool_iter iter; | 
					
						
							|  |  |  |     BLI_mempool_iternew(mempool, &iter); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |     for (void *item = BLI_mempool_iterstep(&iter); item != NULL; | 
					
						
							|  |  |  |          item = BLI_mempool_iterstep(&iter)) { | 
					
						
							|  |  |  |       func(userdata, item); | 
					
						
							|  |  |  |     } | 
					
						
							|  |  |  |     return; | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   task_scheduler = BLI_task_scheduler_get(); | 
					
						
							|  |  |  |   task_pool = BLI_task_pool_create_suspended(task_scheduler, &state); | 
					
						
							|  |  |  |   num_threads = BLI_task_scheduler_num_threads(task_scheduler); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   /* The idea here is to prevent creating task for each of the loop iterations
 | 
					
						
							|  |  |  |    * and instead have tasks which are evenly distributed across CPU cores and | 
					
						
							|  |  |  |    * pull next item to be crunched using the threaded-aware BLI_mempool_iter. | 
					
						
							|  |  |  |    */ | 
					
						
							|  |  |  |   num_tasks = num_threads + 2; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   state.userdata = userdata; | 
					
						
							|  |  |  |   state.func = func; | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool, | 
					
						
							|  |  |  |                                                                            (size_t)num_tasks); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   for (i = 0; i < num_tasks; i++) { | 
					
						
							|  |  |  |     /* Use this pool's pre-allocated tasks. */ | 
					
						
							|  |  |  |     BLI_task_pool_push_from_thread(task_pool, | 
					
						
							|  |  |  |                                    parallel_mempool_func, | 
					
						
							|  |  |  |                                    &mempool_iterators[i], | 
					
						
							|  |  |  |                                    false, | 
					
						
							|  |  |  |                                    TASK_PRIORITY_HIGH, | 
					
						
							|  |  |  |                                    task_pool->thread_id); | 
					
						
							|  |  |  |   } | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_task_pool_work_and_wait(task_pool); | 
					
						
							|  |  |  |   BLI_task_pool_free(task_pool); | 
					
						
							|  |  |  | 
 | 
					
						
							|  |  |  |   BLI_mempool_iter_threadsafe_free(mempool_iterators); | 
					
						
							| 
									
										
										
										
											2017-11-23 21:14:43 +01:00
										 |  |  | } |