This repository has been archived on 2023-10-09. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
blender-archive/source/blender/windowmanager/intern/wm_jobs.c

741 lines
18 KiB
C
Raw Normal View History

/*
2009-01-22 14:59:49 +00:00
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
2009-01-22 14:59:49 +00:00
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
2010-02-12 13:34:04 +00:00
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
2009-01-22 14:59:49 +00:00
*
* The Original Code is Copyright (C) 2009 Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup wm
*
* Threaded job manager (high level job access).
2011-02-25 14:04:21 +00:00
*/
#include <string.h>
2009-01-22 14:59:49 +00:00
#include "DNA_windowmanager_types.h"
#include "MEM_guardedalloc.h"
#include "BLI_blenlib.h"
#include "BLI_threads.h"
#include "BLI_utildefines.h"
2009-01-22 14:59:49 +00:00
#include "BKE_context.h"
#include "BKE_global.h"
#include "BKE_sequencer.h"
2009-01-22 14:59:49 +00:00
#include "WM_api.h"
#include "WM_types.h"
#include "wm.h"
#include "wm_event_types.h"
2009-01-22 14:59:49 +00:00
#include "PIL_time.h"
2009-01-22 14:59:49 +00:00
/*
2012-03-09 18:28:30 +00:00
* Add new job
* - register in WM
* - configure callbacks
*
* Start or re-run job
* - if job running
* - signal job to end
* - add timer notifier to verify when it has ended, to start it
* - else
* - start job
* - add timer notifier to handle progress
*
* Stop job
2018-09-02 18:51:31 +10:00
* - signal job to end
* on end, job will tag itself as sleeping
2012-03-09 18:28:30 +00:00
*
* Remove job
* - signal job to end
2018-09-02 18:51:31 +10:00
* on end, job will remove itself
2012-03-09 18:28:30 +00:00
*
* When job is done:
* - it puts timer to sleep (or removes?)
2009-01-22 14:59:49 +00:00
*/
2014-02-22 11:14:15 +11:00
2009-01-22 14:59:49 +00:00
struct wmJob {
struct wmJob *next, *prev;
/** Job originating from, keep track of this when deleting windows */
wmWindow *win;
/** Should store entire own context, for start, update, free */
void *customdata;
2020-03-13 15:40:42 +01:00
/**
* To prevent cpu overhead, use this one which only gets called when job really starts.
* Executed in main thread.
*/
void (*initjob)(void *);
2020-03-13 15:40:42 +01:00
/**
* This performs the actual parallel work.
* Executed in worker thread(s).
*/
void (*startjob)(void *, short *stop, short *do_update, float *progress);
2020-03-13 15:40:42 +01:00
/**
* Called if thread defines so (see `do_update` flag), and max once per timer step.
* Executed in main thread.
*/
void (*update)(void *);
2020-03-13 15:40:42 +01:00
/**
* Free callback (typically for customdata).
* Executed in main thread.
*/
void (*free)(void *);
2020-03-13 15:40:42 +01:00
/**
* Called when job is stopped.
* Executed in main thread.
*/
void (*endjob)(void *);
/** Running jobs each have own timer */
double timestep;
wmTimer *wt;
2019-07-01 15:23:42 +02:00
/** Only start job after specified time delay */
double start_delay_time;
/** The notifier event timers should send */
unsigned int note, endnote;
/* internal */
void *owner;
int flag;
short suspended, running, ready, do_update, stop, job_type;
float progress;
/** For display in header, identification */
char name[128];
/** Once running, we store this separately */
void *run_customdata;
void (*run_free)(void *);
/** We use BLI_threads api, but per job only 1 thread runs */
ListBase threads;
double start_time;
/** Ticket mutex for main thread locking while some job accesses
* data that the main thread might modify at the same time */
TicketMutex *main_thread_mutex;
2009-01-22 14:59:49 +00:00
};
/* Main thread locking */
void WM_job_main_thread_lock_acquire(wmJob *wm_job)
{
BLI_ticket_mutex_lock(wm_job->main_thread_mutex);
}
void WM_job_main_thread_lock_release(wmJob *wm_job)
{
BLI_ticket_mutex_unlock(wm_job->main_thread_mutex);
}
static void wm_job_main_thread_yield(wmJob *wm_job)
{
/* unlock and lock the ticket mutex. because it's a fair mutex any job that
* is waiting to acquire the lock will get it first, before we can lock */
BLI_ticket_mutex_unlock(wm_job->main_thread_mutex);
BLI_ticket_mutex_lock(wm_job->main_thread_mutex);
}
2015-06-01 14:56:07 +10:00
/**
* Finds if type or owner, compare for it, otherwise any matching job.
*/
static wmJob *wm_job_find(wmWindowManager *wm, void *owner, const int job_type)
{
wmJob *wm_job;
if (owner && job_type) {
for (wm_job = wm->jobs.first; wm_job; wm_job = wm_job->next) {
if (wm_job->owner == owner && wm_job->job_type == job_type) {
return wm_job;
}
}
}
else if (owner) {
for (wm_job = wm->jobs.first; wm_job; wm_job = wm_job->next) {
if (wm_job->owner == owner) {
return wm_job;
}
}
}
else if (job_type) {
for (wm_job = wm->jobs.first; wm_job; wm_job = wm_job->next) {
if (wm_job->job_type == job_type) {
return wm_job;
}
}
}
return NULL;
}
2009-01-22 14:59:49 +00:00
/* ******************* public API ***************** */
2015-06-01 14:56:07 +10:00
/**
* \return current job or adds new job, but doesn't run it.
2015-06-01 14:56:07 +10:00
*
* \note every owner only gets a single job,
* adding a new one will stop running job and when stopped it starts the new one.
*/
wmJob *WM_jobs_get(
wmWindowManager *wm, wmWindow *win, void *owner, const char *name, int flag, int job_type)
2009-01-22 14:59:49 +00:00
{
wmJob *wm_job = wm_job_find(wm, owner, job_type);
if (wm_job == NULL) {
wm_job = MEM_callocN(sizeof(wmJob), "new job");
BLI_addtail(&wm->jobs, wm_job);
wm_job->win = win;
wm_job->owner = owner;
wm_job->flag = flag;
wm_job->job_type = job_type;
BLI_strncpy(wm_job->name, name, sizeof(wm_job->name));
wm_job->main_thread_mutex = BLI_ticket_mutex_alloc();
WM_job_main_thread_lock_acquire(wm_job);
}
/* else: a running job, be careful */
/* prevent creating a job with an invalid type */
BLI_assert(wm_job->job_type != WM_JOB_TYPE_ANY);
return wm_job;
2009-01-22 14:59:49 +00:00
}
/* returns true if job runs, for UI (progress) indicators */
bool WM_jobs_test(wmWindowManager *wm, void *owner, int job_type)
{
wmJob *wm_job;
/* job can be running or about to run (suspended) */
for (wm_job = wm->jobs.first; wm_job; wm_job = wm_job->next) {
if (wm_job->owner == owner) {
if (job_type == WM_JOB_TYPE_ANY || (wm_job->job_type == job_type)) {
if (wm_job->running || wm_job->suspended) {
return true;
}
}
}
}
return false;
}
float WM_jobs_progress(wmWindowManager *wm, void *owner)
{
wmJob *wm_job = wm_job_find(wm, owner, WM_JOB_TYPE_ANY);
if (wm_job && wm_job->flag & WM_JOB_PROGRESS) {
return wm_job->progress;
}
return 0.0;
}
static void wm_jobs_update_progress_bars(wmWindowManager *wm)
{
float total_progress = 0.f;
float jobs_progress = 0;
LISTBASE_FOREACH (wmJob *, wm_job, &wm->jobs) {
if (wm_job->threads.first && !wm_job->ready) {
if (wm_job->flag & WM_JOB_PROGRESS) {
/* accumulate global progress for running jobs */
jobs_progress++;
total_progress += wm_job->progress;
}
}
}
/* if there are running jobs, set the global progress indicator */
if (jobs_progress > 0) {
wmWindow *win;
float progress = total_progress / (float)jobs_progress;
for (win = wm->windows.first; win; win = win->next) {
WM_progress_set(win, progress);
}
}
else {
wmWindow *win;
for (win = wm->windows.first; win; win = win->next) {
WM_progress_clear(win);
}
}
}
/* time that job started */
double WM_jobs_starttime(wmWindowManager *wm, void *owner)
{
wmJob *wm_job = wm_job_find(wm, owner, WM_JOB_TYPE_ANY);
if (wm_job && wm_job->flag & WM_JOB_PROGRESS) {
return wm_job->start_time;
}
return 0;
}
char *WM_jobs_name(wmWindowManager *wm, void *owner)
{
wmJob *wm_job = wm_job_find(wm, owner, WM_JOB_TYPE_ANY);
if (wm_job) {
return wm_job->name;
}
return NULL;
}
void *WM_jobs_customdata(wmWindowManager *wm, void *owner)
{
wmJob *wm_job = wm_job_find(wm, owner, WM_JOB_TYPE_ANY);
if (wm_job) {
return WM_jobs_customdata_get(wm_job);
}
return NULL;
}
void *WM_jobs_customdata_from_type(wmWindowManager *wm, int job_type)
{
wmJob *wm_job = wm_job_find(wm, NULL, job_type);
if (wm_job) {
return WM_jobs_customdata_get(wm_job);
}
return NULL;
}
bool WM_jobs_is_running(wmJob *wm_job)
{
return wm_job->running;
}
bool WM_jobs_is_stopped(wmWindowManager *wm, void *owner)
{
wmJob *wm_job = wm_job_find(wm, owner, WM_JOB_TYPE_ANY);
return wm_job ? wm_job->stop : true; /* XXX to be redesigned properly. */
}
void *WM_jobs_customdata_get(wmJob *wm_job)
{
if (!wm_job->customdata) {
return wm_job->run_customdata;
}
else {
return wm_job->customdata;
}
}
void WM_jobs_customdata_set(wmJob *wm_job, void *customdata, void (*free)(void *))
2009-01-22 14:59:49 +00:00
{
/* pending job? just free */
if (wm_job->customdata) {
wm_job->free(wm_job->customdata);
}
wm_job->customdata = customdata;
wm_job->free = free;
if (wm_job->running) {
/* signal job to end */
wm_job->stop = true;
}
2009-01-22 14:59:49 +00:00
}
void WM_jobs_timer(wmJob *wm_job, double timestep, unsigned int note, unsigned int endnote)
2009-01-22 14:59:49 +00:00
{
wm_job->timestep = timestep;
wm_job->note = note;
wm_job->endnote = endnote;
2009-01-22 14:59:49 +00:00
}
2019-07-01 15:23:42 +02:00
void WM_jobs_delay_start(wmJob *wm_job, double delay_time)
{
wm_job->start_delay_time = delay_time;
}
void WM_jobs_callbacks(wmJob *wm_job,
void (*startjob)(void *, short *, short *, float *),
void (*initjob)(void *),
2012-04-24 22:50:49 +00:00
void (*update)(void *),
void (*endjob)(void *))
2009-01-22 14:59:49 +00:00
{
wm_job->startjob = startjob;
wm_job->initjob = initjob;
wm_job->update = update;
wm_job->endjob = endjob;
2009-01-22 14:59:49 +00:00
}
static void *do_job_thread(void *job_v)
{
wmJob *wm_job = job_v;
BLI_thread_put_thread_on_fast_node();
wm_job->startjob(wm_job->run_customdata, &wm_job->stop, &wm_job->do_update, &wm_job->progress);
wm_job->ready = true;
return NULL;
2009-01-22 14:59:49 +00:00
}
2012-03-18 07:38:51 +00:00
/* don't allow same startjob to be executed twice */
Render & Compositing Thread Fixes * Rendering twice or more could crash layer/pass buttons. * Compositing would crash while drawing the image. * Rendering animations could also crash drawing the image. * Compositing could crash * Starting to rendering while preview render / compo was still running could crash. * Exiting while rendering an animation would not abort the renderer properly, making Blender seemingly freeze. * Fixes theoretically possible issue with setting malloc lock with nested threads. * Drawing previews inside nodes could crash when those nodes were being rendered at the same time. There's more crashes, manipulating the scene data or undo can still crash, this commit only focuses on making sure the image buffer and render result access is thread safe. Implementation: * Rather than assuming the render result does not get freed during render, which seems to be quite difficult to do given that e.g. the compositor is allowed to change the size of the buffer or output different passes, the render result is now protected with a read/write mutex. * The read/write mutex allows multiple readers (and pixel writers) at the same time, but only allows one writer to manipulate the data structure. * Added BKE_image_acquire_ibuf/BKE_image_release_ibuf to access images being rendered, cases where this is not needed (most code) can still use BKE_image_get_ibuf. * The job manager now allows only one rendering job at the same time, rather than the G.rendering check which was not reliable.
2009-09-30 18:18:32 +00:00
static void wm_jobs_test_suspend_stop(wmWindowManager *wm, wmJob *test)
{
wmJob *wm_job;
bool suspend = false;
/* job added with suspend flag, we wait 1 timer step before activating it */
2019-07-01 15:23:42 +02:00
if (test->start_delay_time > 0.0) {
suspend = true;
2019-07-01 15:23:42 +02:00
test->start_delay_time = 0.0;
}
else {
/* check other jobs */
for (wm_job = wm->jobs.first; wm_job; wm_job = wm_job->next) {
/* obvious case, no test needed */
if (wm_job == test || !wm_job->running) {
continue;
}
/* if new job is not render, then check for same startjob */
if (0 == (test->flag & WM_JOB_EXCL_RENDER)) {
if (wm_job->startjob != test->startjob) {
continue;
}
}
/* if new job is render, any render job should be stopped */
if (test->flag & WM_JOB_EXCL_RENDER) {
if (0 == (wm_job->flag & WM_JOB_EXCL_RENDER)) {
continue;
}
}
suspend = true;
/* if this job has higher priority, stop others */
if (test->flag & WM_JOB_PRIORITY) {
wm_job->stop = true;
// printf("job stopped: %s\n", wm_job->name);
}
}
}
/* Possible suspend ourselves, waiting for other jobs, or de-suspend. */
test->suspended = suspend;
// if (suspend) printf("job suspended: %s\n", test->name);
}
2015-06-01 14:56:07 +10:00
/**
* if job running, the same owner gave it a new job.
* if different owner starts existing startjob, it suspends itself
*/
void WM_jobs_start(wmWindowManager *wm, wmJob *wm_job)
2009-01-22 14:59:49 +00:00
{
if (wm_job->running) {
/* signal job to end and restart */
wm_job->stop = true;
// printf("job started a running job, ending... %s\n", wm_job->name);
}
else {
if (wm_job->customdata && wm_job->startjob) {
2019-07-01 15:23:42 +02:00
const double timestep = (wm_job->start_delay_time > 0.0) ? wm_job->start_delay_time :
wm_job->timestep;
wm_jobs_test_suspend_stop(wm, wm_job);
if (wm_job->suspended == false) {
/* copy to ensure proper free in end */
wm_job->run_customdata = wm_job->customdata;
wm_job->run_free = wm_job->free;
wm_job->free = NULL;
wm_job->customdata = NULL;
wm_job->running = true;
if (wm_job->initjob) {
wm_job->initjob(wm_job->run_customdata);
}
wm_job->stop = false;
wm_job->ready = false;
wm_job->progress = 0.0;
// printf("job started: %s\n", wm_job->name);
BLI_threadpool_init(&wm_job->threads, do_job_thread, 1);
BLI_threadpool_insert(&wm_job->threads, wm_job);
}
/* restarted job has timer already */
2019-07-01 15:23:42 +02:00
if (wm_job->wt && (wm_job->wt->timestep > timestep)) {
WM_event_remove_timer(wm, wm_job->win, wm_job->wt);
wm_job->wt = WM_event_add_timer(wm, wm_job->win, TIMERJOBS, timestep);
}
if (wm_job->wt == NULL) {
2019-07-01 15:23:42 +02:00
wm_job->wt = WM_event_add_timer(wm, wm_job->win, TIMERJOBS, timestep);
}
wm_job->start_time = PIL_check_seconds_timer();
}
else {
printf("job fails, not initialized\n");
}
}
2009-01-22 14:59:49 +00:00
}
static void wm_job_free(wmWindowManager *wm, wmJob *wm_job)
{
BLI_remlink(&wm->jobs, wm_job);
WM_job_main_thread_lock_release(wm_job);
BLI_ticket_mutex_free(wm_job->main_thread_mutex);
MEM_freeN(wm_job);
}
/* stop job, end thread, free data completely */
static void wm_jobs_kill_job(wmWindowManager *wm, wmJob *wm_job)
{
bool update_progress = (wm_job->flag & WM_JOB_PROGRESS) != 0;
if (wm_job->running) {
/* signal job to end */
wm_job->stop = true;
WM_job_main_thread_lock_release(wm_job);
BLI_threadpool_end(&wm_job->threads);
WM_job_main_thread_lock_acquire(wm_job);
if (wm_job->endjob) {
wm_job->endjob(wm_job->run_customdata);
}
}
if (wm_job->wt) {
WM_event_remove_timer(wm, wm_job->win, wm_job->wt);
}
if (wm_job->customdata) {
wm_job->free(wm_job->customdata);
}
if (wm_job->run_customdata) {
wm_job->run_free(wm_job->run_customdata);
}
/* remove wm_job */
wm_job_free(wm, wm_job);
/* Update progress bars in windows. */
if (update_progress) {
wm_jobs_update_progress_bars(wm);
}
}
/* wait until every job ended */
void WM_jobs_kill_all(wmWindowManager *wm)
{
wmJob *wm_job;
while ((wm_job = wm->jobs.first)) {
wm_jobs_kill_job(wm, wm_job);
}
/* This job will be automatically restarted */
BKE_sequencer_prefetch_stop_all();
}
/* wait until every job ended, except for one owner (used in undo to keep screen job alive) */
void WM_jobs_kill_all_except(wmWindowManager *wm, void *owner)
{
wmJob *wm_job, *next_job;
for (wm_job = wm->jobs.first; wm_job; wm_job = next_job) {
next_job = wm_job->next;
if (wm_job->owner != owner) {
wm_jobs_kill_job(wm, wm_job);
}
}
}
void WM_jobs_kill_type(struct wmWindowManager *wm, void *owner, int job_type)
{
wmJob *wm_job, *next_job;
for (wm_job = wm->jobs.first; wm_job; wm_job = next_job) {
next_job = wm_job->next;
if (!owner || wm_job->owner == owner) {
if (job_type == WM_JOB_TYPE_ANY || wm_job->job_type == job_type) {
wm_jobs_kill_job(wm, wm_job);
}
}
}
}
/* signal job(s) from this owner or callback to stop, timer is required to get handled */
void WM_jobs_stop(wmWindowManager *wm, void *owner, void *startjob)
{
wmJob *wm_job;
for (wm_job = wm->jobs.first; wm_job; wm_job = wm_job->next) {
if (wm_job->owner == owner || wm_job->startjob == startjob) {
if (wm_job->running) {
wm_job->stop = true;
}
}
}
}
/* actually terminate thread and job timer */
void WM_jobs_kill(wmWindowManager *wm,
void *owner,
void (*startjob)(void *, short int *, short int *, float *))
{
wmJob *wm_job;
wm_job = wm->jobs.first;
while (wm_job) {
if (wm_job->owner == owner || wm_job->startjob == startjob) {
wmJob *wm_job_kill = wm_job;
wm_job = wm_job->next;
wm_jobs_kill_job(wm, wm_job_kill);
}
else {
wm_job = wm_job->next;
}
}
}
/* kill job entirely, also removes timer itself */
void wm_jobs_timer_ended(wmWindowManager *wm, wmTimer *wt)
{
wmJob *wm_job;
for (wm_job = wm->jobs.first; wm_job; wm_job = wm_job->next) {
if (wm_job->wt == wt) {
wm_jobs_kill_job(wm, wm_job);
return;
}
}
}
2009-01-22 14:59:49 +00:00
/* hardcoded to event TIMERJOBS */
void wm_jobs_timer(wmWindowManager *wm, wmTimer *wt)
2009-01-22 14:59:49 +00:00
{
wmJob *wm_job, *wm_jobnext;
for (wm_job = wm->jobs.first; wm_job; wm_job = wm_jobnext) {
wm_jobnext = wm_job->next;
if (wm_job->wt == wt) {
/* running threads */
if (wm_job->threads.first) {
/* let threads get temporary lock over main thread if needed */
wm_job_main_thread_yield(wm_job);
/* always call note and update when ready */
if (wm_job->do_update || wm_job->ready) {
if (wm_job->update) {
wm_job->update(wm_job->run_customdata);
}
if (wm_job->note) {
WM_event_add_notifier_ex(wm, wm_job->win, wm_job->note, NULL);
}
if (wm_job->flag & WM_JOB_PROGRESS) {
WM_event_add_notifier_ex(wm, wm_job->win, NC_WM | ND_JOB, NULL);
}
wm_job->do_update = false;
}
if (wm_job->ready) {
if (wm_job->endjob) {
wm_job->endjob(wm_job->run_customdata);
}
/* free own data */
wm_job->run_free(wm_job->run_customdata);
wm_job->run_customdata = NULL;
wm_job->run_free = NULL;
// if (wm_job->stop) printf("job ready but stopped %s\n", wm_job->name);
// else printf("job finished %s\n", wm_job->name);
if (G.debug & G_DEBUG_JOBS) {
printf("Job '%s' finished in %f seconds\n",
wm_job->name,
PIL_check_seconds_timer() - wm_job->start_time);
}
wm_job->running = false;
WM_job_main_thread_lock_release(wm_job);
BLI_threadpool_end(&wm_job->threads);
WM_job_main_thread_lock_acquire(wm_job);
if (wm_job->endnote) {
WM_event_add_notifier_ex(wm, wm_job->win, wm_job->endnote, NULL);
}
WM_event_add_notifier_ex(wm, wm_job->win, NC_WM | ND_JOB, NULL);
/* new job added for wm_job? */
if (wm_job->customdata) {
// printf("job restarted with new data %s\n", wm_job->name);
WM_jobs_start(wm, wm_job);
}
else {
WM_event_remove_timer(wm, wm_job->win, wm_job->wt);
wm_job->wt = NULL;
/* remove wm_job */
wm_job_free(wm, wm_job);
}
}
}
else if (wm_job->suspended) {
WM_jobs_start(wm, wm_job);
}
}
}
/* Update progress bars in windows. */
wm_jobs_update_progress_bars(wm);
2009-01-22 14:59:49 +00:00
}
bool WM_jobs_has_running(wmWindowManager *wm)
{
wmJob *wm_job;
for (wm_job = wm->jobs.first; wm_job; wm_job = wm_job->next) {
if (wm_job->running) {
return true;
}
}
return false;
}