Merge branch 'master' into blender2.8

Conflicts:
	source/blender/editors/mask/mask_draw.c
This commit is contained in:
2017-11-26 20:29:56 +01:00
11 changed files with 645 additions and 500 deletions

View File

@@ -59,6 +59,11 @@ ccl_device_forceinline bool kernel_path_scene_intersect(
{
uint visibility = path_state_ray_visibility(kg, state);
if(path_state_ao_bounce(kg, state)) {
visibility = PATH_RAY_SHADOW;
ray->t = kernel_data.background.ao_distance;
}
#ifdef __HAIR__
float difl = 0.0f, extmax = 0.0f;
uint lcg_state = 0;
@@ -74,11 +79,6 @@ ccl_device_forceinline bool kernel_path_scene_intersect(
lcg_state = lcg_state_init_addrspace(state, 0x51633e2d);
}
if(path_state_ao_bounce(kg, state)) {
visibility = PATH_RAY_SHADOW;
ray->t = kernel_data.background.ao_distance;
}
bool hit = scene_intersect(kg, *ray, visibility, isect, &lcg_state, difl, extmax);
#else
bool hit = scene_intersect(kg, *ray, visibility, isect, NULL, 0.0f, 0.0f);

View File

@@ -80,6 +80,7 @@
#include "BLI_math.h"
#include "BLI_rect.h"
#include "BLI_task.h"
#include "BLI_listbase.h"
#include "BLI_linklist.h"
@@ -1423,15 +1424,39 @@ float BKE_maskrasterize_handle_sample(MaskRasterHandle *mr_handle, const float x
return value;
}
typedef struct MaskRasterizeBufferData {
MaskRasterHandle *mr_handle;
float x_inv, y_inv;
float x_px_ofs, y_px_ofs;
uint width;
float *buffer;
} MaskRasterizeBufferData;
static void maskrasterize_buffer_cb(void *userdata, int y)
{
MaskRasterizeBufferData *data = userdata;
MaskRasterHandle *mr_handle = data->mr_handle;
float *buffer = data->buffer;
const uint width = data->width;
const float x_inv = data->x_inv;
const float x_px_ofs = data->x_px_ofs;
uint i = (uint)y * width;
float xy[2];
xy[1] = ((float)y * data->y_inv) + data->y_px_ofs;
for (uint x = 0; x < width; x++, i++) {
xy[0] = ((float)x * x_inv) + x_px_ofs;
buffer[i] = BKE_maskrasterize_handle_sample(mr_handle, xy);
}
}
/**
* \brief Rasterize a buffer from a single mask
*
* We could get some speedup by inlining #BKE_maskrasterize_handle_sample
* and calculating each layer then blending buffers, but this function is only
* used by the sequencer - so better have the caller thread.
*
* Since #BKE_maskrasterize_handle_sample is used threaded elsewhere,
* we can simply use openmp here for some speedup.
* \brief Rasterize a buffer from a single mask (threaded execution).
*/
void BKE_maskrasterize_buffer(MaskRasterHandle *mr_handle,
const unsigned int width, const unsigned int height,
@@ -1439,33 +1464,15 @@ void BKE_maskrasterize_buffer(MaskRasterHandle *mr_handle,
{
const float x_inv = 1.0f / (float)width;
const float y_inv = 1.0f / (float)height;
const float x_px_ofs = x_inv * 0.5f;
const float y_px_ofs = y_inv * 0.5f;
#ifdef _MSC_VER
int y; /* msvc requires signed for some reason */
/* ignore sign mismatch */
# pragma warning(push)
# pragma warning(disable:4018)
#else
unsigned int y;
#endif
#pragma omp parallel for private(y)
for (y = 0; y < height; y++) {
unsigned int i = y * width;
unsigned int x;
float xy[2];
xy[1] = ((float)y * y_inv) + y_px_ofs;
for (x = 0; x < width; x++, i++) {
xy[0] = ((float)x * x_inv) + x_px_ofs;
buffer[i] = BKE_maskrasterize_handle_sample(mr_handle, xy);
}
}
#ifdef _MSC_VER
# pragma warning(pop)
#endif
MaskRasterizeBufferData data = {
.mr_handle = mr_handle,
.x_inv = x_inv,
.y_inv = y_inv,
.x_px_ofs = x_inv * 0.5f,
.y_px_ofs = y_inv * 0.5f,
.width = width,
.buffer = buffer
};
BLI_task_parallel_range(0, (int)height, &data, maskrasterize_buffer_cb, height * width > 10000);
}

View File

@@ -30,6 +30,7 @@
*/
#include <stdlib.h>
#include "atomic_ops.h"
#include "MEM_guardedalloc.h"
@@ -38,6 +39,7 @@
#include "BLI_utildefines.h"
#include "BLI_listbase.h"
#include "BLI_task.h"
#include "BLI_threads.h"
#include "BLI_math.h"
@@ -86,6 +88,8 @@ typedef struct AutoTrackContext {
int sync_frame;
bool first_sync;
SpinLock spin_lock;
bool step_ok;
} AutoTrackContext;
static void normalized_to_libmv_frame(const float normalized[2],
@@ -379,81 +383,89 @@ AutoTrackContext *BKE_autotrack_context_new(MovieClip *clip,
return context;
}
static void autotrack_context_step_cb(void *userdata, int track)
{
AutoTrackContext *context = userdata;
const int frame_delta = context->backwards ? -1 : 1;
AutoTrackOptions *options = &context->options[track];
if (options->is_failed) {
return;
}
libmv_Marker libmv_current_marker,
libmv_reference_marker,
libmv_tracked_marker;
libmv_TrackRegionResult libmv_result;
const int frame = BKE_movieclip_remap_scene_to_clip_frame(
context->clips[options->clip_index],
context->user.framenr);
BLI_spin_lock(&context->spin_lock);
const bool has_marker = libmv_autoTrackGetMarker(context->autotrack,
options->clip_index,
frame,
options->track_index,
&libmv_current_marker);
BLI_spin_unlock(&context->spin_lock);
/* Check whether we've got marker to sync with. */
if (!has_marker) {
return;
}
/* Check whether marker is going outside of allowed frame margin. */
if (!tracking_check_marker_margin(&libmv_current_marker,
options->track->margin,
context->frame_width,
context->frame_height))
{
return;
}
libmv_tracked_marker = libmv_current_marker;
libmv_tracked_marker.frame = frame + frame_delta;
/* Update reference frame. */
if (options->use_keyframe_match) {
libmv_tracked_marker.reference_frame =
libmv_current_marker.reference_frame;
libmv_autoTrackGetMarker(context->autotrack,
options->clip_index,
libmv_tracked_marker.reference_frame,
options->track_index,
&libmv_reference_marker);
}
else {
libmv_tracked_marker.reference_frame = frame;
libmv_reference_marker = libmv_current_marker;
}
/* Perform actual tracking. */
if (libmv_autoTrackMarker(context->autotrack,
&options->track_region_options,
&libmv_tracked_marker,
&libmv_result))
{
BLI_spin_lock(&context->spin_lock);
libmv_autoTrackAddMarker(context->autotrack, &libmv_tracked_marker);
BLI_spin_unlock(&context->spin_lock);
}
else {
options->is_failed = true;
options->failed_frame = frame + frame_delta;
}
/* Note: Atomic is probably not actually needed here, I doubt we could get any other result than a true bool anyway.
* But for sake of consistency, and since it costs nothing... */
atomic_fetch_and_or_uint8((uint8_t *)&context->step_ok, true);
}
bool BKE_autotrack_context_step(AutoTrackContext *context)
{
const int frame_delta = context->backwards ? -1 : 1;
bool ok = false;
int track;
context->step_ok = false;
BLI_task_parallel_range(0, context->num_tracks, context, autotrack_context_step_cb, context->num_tracks > 1);
#pragma omp parallel for if (context->num_tracks > 1)
for (track = 0; track < context->num_tracks; ++track) {
AutoTrackOptions *options = &context->options[track];
if (options->is_failed) {
continue;
}
libmv_Marker libmv_current_marker,
libmv_reference_marker,
libmv_tracked_marker;
libmv_TrackRegionResult libmv_result;
const int frame = BKE_movieclip_remap_scene_to_clip_frame(
context->clips[options->clip_index],
context->user.framenr);
BLI_spin_lock(&context->spin_lock);
const bool has_marker = libmv_autoTrackGetMarker(context->autotrack,
options->clip_index,
frame,
options->track_index,
&libmv_current_marker);
BLI_spin_unlock(&context->spin_lock);
/* Check whether we've got marker to sync with. */
if (!has_marker) {
continue;
}
/* Check whether marker is going outside of allowed frame margin. */
if (!tracking_check_marker_margin(&libmv_current_marker,
options->track->margin,
context->frame_width,
context->frame_height))
{
continue;
}
libmv_tracked_marker = libmv_current_marker;
libmv_tracked_marker.frame = frame + frame_delta;
/* Update reference frame. */
if (options->use_keyframe_match) {
libmv_tracked_marker.reference_frame =
libmv_current_marker.reference_frame;
libmv_autoTrackGetMarker(context->autotrack,
options->clip_index,
libmv_tracked_marker.reference_frame,
options->track_index,
&libmv_reference_marker);
}
else {
libmv_tracked_marker.reference_frame = frame;
libmv_reference_marker = libmv_current_marker;
}
/* Perform actual tracking. */
if (libmv_autoTrackMarker(context->autotrack,
&options->track_region_options,
&libmv_tracked_marker,
&libmv_result))
{
BLI_spin_lock(&context->spin_lock);
libmv_autoTrackAddMarker(context->autotrack, &libmv_tracked_marker);
BLI_spin_unlock(&context->spin_lock);
}
else {
options->is_failed = true;
options->failed_frame = frame + frame_delta;
}
ok = true;
}
/* Advance the frame. */
BLI_spin_lock(&context->spin_lock);
context->user.framenr += frame_delta;
BLI_spin_unlock(&context->spin_lock);
return ok;
return context->step_ok;
}
void BKE_autotrack_context_sync(AutoTrackContext *context)

View File

@@ -40,6 +40,7 @@ set(INC_SYS
)
set(SRC
# Naming convention for BMesh operators is: bmo_*action*_*details*.c
operators/bmo_beautify.c
operators/bmo_bevel.c
operators/bmo_bisect_plane.c
@@ -68,6 +69,7 @@ set(SRC
operators/bmo_poke.c
operators/bmo_primitive.c
operators/bmo_removedoubles.c
operators/bmo_rotate_edges.c
operators/bmo_similar.c
operators/bmo_smooth_laplacian.c
operators/bmo_split_edges.c

View File

@@ -36,12 +36,13 @@
#include "DNA_meshdata_types.h"
#include "BLI_alloca.h"
#include "BLI_linklist.h"
#include "BLI_math.h"
#include "BLI_memarena.h"
#include "BLI_task.h"
#include "BKE_customdata.h"
#include "BKE_multires.h"
#include "BLI_memarena.h"
#include "BLI_linklist.h"
#include "bmesh.h"
#include "intern/bmesh_private.h"
@@ -401,13 +402,78 @@ static void bm_loop_flip_disp(
disp[1] = (mat[0][0] * b[1] - b[0] * mat[1][0]) / d;
}
typedef struct BMLoopInterpMultiresData {
BMLoop *l_dst;
BMLoop *l_src_first;
int cd_loop_mdisp_offset;
MDisps *md_dst;
const float *f_src_center;
float *axis_x, *axis_y;
float *v1, *v4;
float *e1, *e2;
int res;
float d;
} BMLoopInterpMultiresData;
static void loop_interp_multires_cb(void *userdata, int ix)
{
BMLoopInterpMultiresData *data = userdata;
BMLoop *l_first = data->l_src_first;
BMLoop *l_dst = data->l_dst;
const int cd_loop_mdisp_offset = data->cd_loop_mdisp_offset;
MDisps *md_dst = data->md_dst;
const float *f_src_center = data->f_src_center;
float *axis_x = data->axis_x;
float *axis_y = data->axis_y;
float *v1 = data->v1;
float *v4 = data->v4;
float *e1 = data->e1;
float *e2 = data->e2;
const int res = data->res;
const float d = data->d;
float x = d * ix, y;
int iy;
for (y = 0.0f, iy = 0; iy < res; y += d, iy++) {
BMLoop *l_iter = l_first;
float co1[3], co2[3], co[3];
madd_v3_v3v3fl(co1, v1, e1, y);
madd_v3_v3v3fl(co2, v4, e2, y);
interp_v3_v3v3(co, co1, co2, x);
do {
MDisps *md_src;
float src_axis_x[3], src_axis_y[3];
float uv[2];
md_src = BM_ELEM_CD_GET_VOID_P(l_iter, cd_loop_mdisp_offset);
if (mdisp_in_mdispquad(l_dst, l_iter, f_src_center, co, res, src_axis_x, src_axis_y, uv)) {
old_mdisps_bilinear(md_dst->disps[iy * res + ix], md_src->disps, res, uv[0], uv[1]);
bm_loop_flip_disp(src_axis_x, src_axis_y, axis_x, axis_y, md_dst->disps[iy * res + ix]);
break;
}
} while ((l_iter = l_iter->next) != l_first);
}
}
void BM_loop_interp_multires_ex(
BMesh *UNUSED(bm), BMLoop *l_dst, const BMFace *f_src,
const float f_dst_center[3], const float f_src_center[3], const int cd_loop_mdisp_offset)
{
MDisps *md_dst;
float d, v1[3], v2[3], v3[3], v4[3] = {0.0f, 0.0f, 0.0f}, e1[3], e2[3];
int ix, res;
float v1[3], v2[3], v3[3], v4[3] = {0.0f, 0.0f, 0.0f}, e1[3], e2[3];
float axis_x[3], axis_y[3];
/* ignore 2-edged faces */
@@ -433,38 +499,15 @@ void BM_loop_interp_multires_ex(
mdisp_axis_from_quad(v1, v2, v3, v4, axis_x, axis_y);
res = (int)sqrt(md_dst->totdisp);
d = 1.0f / (float)(res - 1);
#pragma omp parallel for if (res > 3)
for (ix = 0; ix < res; ix++) {
float x = d * ix, y;
int iy;
for (y = 0.0f, iy = 0; iy < res; y += d, iy++) {
BMLoop *l_iter;
BMLoop *l_first;
float co1[3], co2[3], co[3];
madd_v3_v3v3fl(co1, v1, e1, y);
madd_v3_v3v3fl(co2, v4, e2, y);
interp_v3_v3v3(co, co1, co2, x);
l_iter = l_first = BM_FACE_FIRST_LOOP(f_src);
do {
MDisps *md_src;
float src_axis_x[3], src_axis_y[3];
float uv[2];
md_src = BM_ELEM_CD_GET_VOID_P(l_iter, cd_loop_mdisp_offset);
if (mdisp_in_mdispquad(l_dst, l_iter, f_src_center, co, res, src_axis_x, src_axis_y, uv)) {
old_mdisps_bilinear(md_dst->disps[iy * res + ix], md_src->disps, res, uv[0], uv[1]);
bm_loop_flip_disp(src_axis_x, src_axis_y, axis_x, axis_y, md_dst->disps[iy * res + ix]);
break;
}
} while ((l_iter = l_iter->next) != l_first);
}
}
const int res = (int)sqrt(md_dst->totdisp);
BMLoopInterpMultiresData data = {
.l_dst = l_dst, .l_src_first = BM_FACE_FIRST_LOOP(f_src),
.cd_loop_mdisp_offset = cd_loop_mdisp_offset,
.md_dst = md_dst, .f_src_center = f_src_center,
.axis_x = axis_x, .axis_y = axis_y, .v1 = v1, .v4 = v4, .e1 = e1, .e2 = e2,
.res = res, .d = 1.0f / (float)(res - 1)
};
BLI_task_parallel_range(0, res, &data, loop_interp_multires_cb, res > 5);
}
/**

View File

@@ -115,37 +115,24 @@ void BM_mesh_elem_toolflags_ensure(BMesh *bm)
bm->etoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer), bm->totedge, 512, BLI_MEMPOOL_NOP);
bm->ftoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer), bm->totface, 512, BLI_MEMPOOL_NOP);
#pragma omp parallel sections if (bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT)
{
#pragma omp section
{
BLI_mempool *toolflagpool = bm->vtoolflagpool;
BMIter iter;
BMVert_OFlag *ele;
BM_ITER_MESH (ele, &iter, bm, BM_VERTS_OF_MESH) {
ele->oflags = BLI_mempool_calloc(toolflagpool);
}
}
#pragma omp section
{
BLI_mempool *toolflagpool = bm->etoolflagpool;
BMIter iter;
BMEdge_OFlag *ele;
BM_ITER_MESH (ele, &iter, bm, BM_EDGES_OF_MESH) {
ele->oflags = BLI_mempool_calloc(toolflagpool);
}
}
#pragma omp section
{
BLI_mempool *toolflagpool = bm->ftoolflagpool;
BMIter iter;
BMFace_OFlag *ele;
BM_ITER_MESH (ele, &iter, bm, BM_FACES_OF_MESH) {
ele->oflags = BLI_mempool_calloc(toolflagpool);
}
}
BMIter iter;
BMVert_OFlag *v_olfag;
BLI_mempool *toolflagpool = bm->vtoolflagpool;
BM_ITER_MESH (v_olfag, &iter, bm, BM_VERTS_OF_MESH) {
v_olfag->oflags = BLI_mempool_calloc(toolflagpool);
}
BMEdge_OFlag *e_olfag;
toolflagpool = bm->etoolflagpool;
BM_ITER_MESH (e_olfag, &iter, bm, BM_EDGES_OF_MESH) {
e_olfag->oflags = BLI_mempool_calloc(toolflagpool);
}
BMFace_OFlag *f_olfag;
toolflagpool = bm->ftoolflagpool;
BM_ITER_MESH (f_olfag, &iter, bm, BM_FACES_OF_MESH) {
f_olfag->oflags = BLI_mempool_calloc(toolflagpool);
}
bm->totflags = 1;
}
@@ -412,18 +399,26 @@ static void mesh_verts_calc_normals_accum_cb(void *userdata, MempoolIterData *mp
* It also assumes that collisions between threads are highly unlikely,
* else performances would be quite bad here. */
float virtual_lock = v_no[0];
while ((virtual_lock = atomic_cas_float(&v_no[0], virtual_lock, FLT_MAX)) == FLT_MAX) {
while (true) {
/* This loops until following conditions are met:
* - v_no[0] has same value as virtual_lock (i.e. it did not change since last try).
* - v_no_[0] was not FLT_MAX, i.e. it was not locked by another thread.
* - v_no[0] was not FLT_MAX, i.e. it was not locked by another thread.
*/
const float vl = atomic_cas_float(&v_no[0], virtual_lock, FLT_MAX);
if (vl == virtual_lock && vl != FLT_MAX) {
break;
}
virtual_lock = vl;
}
BLI_assert(v_no[0] == FLT_MAX);
/* Now we own that normal value, and can change it.
* But first scalar of the vector must not be changed yet, it's our lock! */
virtual_lock += f_no[0] * fac;
v_no[1] += f_no[1] * fac;
v_no[2] += f_no[2] * fac;
/* Second atomic operation to 'release' our lock on that vector and set its first scalar value. */
/* Note that we do not need to loop here, since we 'locked' v_no[0],
* nobody should have changed it in the mean time. */
virtual_lock = atomic_cas_float(&v_no[0], FLT_MAX, virtual_lock);
BLI_assert(virtual_lock == FLT_MAX);
@@ -1150,93 +1145,77 @@ void BM_mesh_elem_index_ensure(BMesh *bm, const char htype)
BM_ELEM_INDEX_VALIDATE(bm, "Should Never Fail!", __func__);
#endif
if (htype_needed == 0) {
if (0 && htype_needed == 0) {
goto finally;
}
/* skip if we only need to operate on one element */
#pragma omp parallel sections if ((!ELEM(htype_needed, BM_VERT, BM_EDGE, BM_FACE, BM_LOOP, BM_FACE | BM_LOOP)) && \
(bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT))
{
#pragma omp section
if (htype & BM_VERT) {
if (bm->elem_index_dirty & BM_VERT) {
BMIter iter;
BMElem *ele;
{
if (htype & BM_VERT) {
if (bm->elem_index_dirty & BM_VERT) {
BMIter iter;
BMElem *ele;
int index;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_VERTS_OF_MESH, index) {
BM_elem_index_set(ele, index); /* set_ok */
}
BLI_assert(index == bm->totvert);
}
else {
// printf("%s: skipping vert index calc!\n", __func__);
}
int index;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_VERTS_OF_MESH, index) {
BM_elem_index_set(ele, index); /* set_ok */
}
BLI_assert(index == bm->totvert);
}
#pragma omp section
{
if (htype & BM_EDGE) {
if (bm->elem_index_dirty & BM_EDGE) {
BMIter iter;
BMElem *ele;
int index;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_EDGES_OF_MESH, index) {
BM_elem_index_set(ele, index); /* set_ok */
}
BLI_assert(index == bm->totedge);
}
else {
// printf("%s: skipping edge index calc!\n", __func__);
}
}
}
#pragma omp section
{
if (htype & (BM_FACE | BM_LOOP)) {
if (bm->elem_index_dirty & (BM_FACE | BM_LOOP)) {
BMIter iter;
BMElem *ele;
const bool update_face = (htype & BM_FACE) && (bm->elem_index_dirty & BM_FACE);
const bool update_loop = (htype & BM_LOOP) && (bm->elem_index_dirty & BM_LOOP);
int index;
int index_loop = 0;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_FACES_OF_MESH, index) {
if (update_face) {
BM_elem_index_set(ele, index); /* set_ok */
}
if (update_loop) {
BMLoop *l_iter, *l_first;
l_iter = l_first = BM_FACE_FIRST_LOOP((BMFace *)ele);
do {
BM_elem_index_set(l_iter, index_loop++); /* set_ok */
} while ((l_iter = l_iter->next) != l_first);
}
}
BLI_assert(index == bm->totface);
if (update_loop) {
BLI_assert(index_loop == bm->totloop);
}
}
else {
// printf("%s: skipping face/loop index calc!\n", __func__);
}
}
else {
// printf("%s: skipping vert index calc!\n", __func__);
}
}
if (htype & BM_EDGE) {
if (bm->elem_index_dirty & BM_EDGE) {
BMIter iter;
BMElem *ele;
int index;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_EDGES_OF_MESH, index) {
BM_elem_index_set(ele, index); /* set_ok */
}
BLI_assert(index == bm->totedge);
}
else {
// printf("%s: skipping edge index calc!\n", __func__);
}
}
if (htype & (BM_FACE | BM_LOOP)) {
if (bm->elem_index_dirty & (BM_FACE | BM_LOOP)) {
BMIter iter;
BMElem *ele;
const bool update_face = (htype & BM_FACE) && (bm->elem_index_dirty & BM_FACE);
const bool update_loop = (htype & BM_LOOP) && (bm->elem_index_dirty & BM_LOOP);
int index;
int index_loop = 0;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_FACES_OF_MESH, index) {
if (update_face) {
BM_elem_index_set(ele, index); /* set_ok */
}
if (update_loop) {
BMLoop *l_iter, *l_first;
l_iter = l_first = BM_FACE_FIRST_LOOP((BMFace *)ele);
do {
BM_elem_index_set(l_iter, index_loop++); /* set_ok */
} while ((l_iter = l_iter->next) != l_first);
}
}
BLI_assert(index == bm->totface);
if (update_loop) {
BLI_assert(index_loop == bm->totloop);
}
}
else {
// printf("%s: skipping face/loop index calc!\n", __func__);
}
}
finally:
bm->elem_index_dirty &= ~htype;
@@ -1409,28 +1388,16 @@ void BM_mesh_elem_table_ensure(BMesh *bm, const char htype)
}
}
/* skip if we only need to operate on one element */
#pragma omp parallel sections if ((!ELEM(htype_needed, BM_VERT, BM_EDGE, BM_FACE)) && \
(bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT))
{
#pragma omp section
{
if (htype_needed & BM_VERT) {
BM_iter_as_array(bm, BM_VERTS_OF_MESH, NULL, (void **)bm->vtable, bm->totvert);
}
}
#pragma omp section
{
if (htype_needed & BM_EDGE) {
BM_iter_as_array(bm, BM_EDGES_OF_MESH, NULL, (void **)bm->etable, bm->totedge);
}
}
#pragma omp section
{
if (htype_needed & BM_FACE) {
BM_iter_as_array(bm, BM_FACES_OF_MESH, NULL, (void **)bm->ftable, bm->totface);
}
}
if (htype_needed & BM_VERT) {
BM_iter_as_array(bm, BM_VERTS_OF_MESH, NULL, (void **)bm->vtable, bm->totvert);
}
if (htype_needed & BM_EDGE) {
BM_iter_as_array(bm, BM_EDGES_OF_MESH, NULL, (void **)bm->etable, bm->totedge);
}
if (htype_needed & BM_FACE) {
BM_iter_as_array(bm, BM_FACES_OF_MESH, NULL, (void **)bm->ftable, bm->totface);
}
finally:

View File

@@ -1221,57 +1221,38 @@ static void bmo_flag_layer_alloc(BMesh *bm)
bm->etoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer) * bm->totflags, bm->totedge, 512, BLI_MEMPOOL_NOP);
bm->ftoolflagpool = BLI_mempool_create(sizeof(BMFlagLayer) * bm->totflags, bm->totface, 512, BLI_MEMPOOL_NOP);
#pragma omp parallel sections if (bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT)
{
#pragma omp section
{
BMIter iter;
BMVert_OFlag *ele;
int i;
/* now go through and memcpy all the flags. Loops don't get a flag layer at this time.. */
BMIter iter;
int i;
BLI_mempool *newpool = bm->vtoolflagpool;
BMVert_OFlag *v_oflag;
BLI_mempool *newpool = bm->vtoolflagpool;
BM_ITER_MESH_INDEX (v_oflag, &iter, bm, BM_VERTS_OF_MESH, i) {
void *oldflags = v_oflag->oflags;
v_oflag->oflags = BLI_mempool_calloc(newpool);
memcpy(v_oflag->oflags, oldflags, old_totflags_size);
BM_elem_index_set(&v_oflag->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)v_oflag);
}
/* now go through and memcpy all the flags. Loops don't get a flag layer at this time.. */
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_VERTS_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_calloc(newpool);
memcpy(ele->oflags, oldflags, old_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
#pragma omp section
{
BMIter iter;
BMEdge_OFlag *ele;
int i;
BMEdge_OFlag *e_oflag;
newpool = bm->etoolflagpool;
BM_ITER_MESH_INDEX (e_oflag, &iter, bm, BM_EDGES_OF_MESH, i) {
void *oldflags = e_oflag->oflags;
e_oflag->oflags = BLI_mempool_calloc(newpool);
memcpy(e_oflag->oflags, oldflags, old_totflags_size);
BM_elem_index_set(&e_oflag->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)e_oflag);
}
BLI_mempool *newpool = bm->etoolflagpool;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_EDGES_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_calloc(newpool);
memcpy(ele->oflags, oldflags, old_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
#pragma omp section
{
BMIter iter;
BMFace_OFlag *ele;
int i;
BLI_mempool *newpool = bm->ftoolflagpool;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_FACES_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_calloc(newpool);
memcpy(ele->oflags, oldflags, old_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
BMFace_OFlag *f_oflag;
newpool = bm->ftoolflagpool;
BM_ITER_MESH_INDEX (f_oflag, &iter, bm, BM_FACES_OF_MESH, i) {
void *oldflags = f_oflag->oflags;
f_oflag->oflags = BLI_mempool_calloc(newpool);
memcpy(f_oflag->oflags, oldflags, old_totflags_size);
BM_elem_index_set(&f_oflag->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)f_oflag);
}
BLI_mempool_destroy(voldpool);
@@ -1300,57 +1281,38 @@ static void bmo_flag_layer_free(BMesh *bm)
bm->etoolflagpool = BLI_mempool_create(new_totflags_size, bm->totedge, 512, BLI_MEMPOOL_NOP);
bm->ftoolflagpool = BLI_mempool_create(new_totflags_size, bm->totface, 512, BLI_MEMPOOL_NOP);
#pragma omp parallel sections if (bm->totvert + bm->totedge + bm->totface >= BM_OMP_LIMIT)
{
#pragma omp section
{
BMIter iter;
BMVert_OFlag *ele;
int i;
/* now go through and memcpy all the flag */
BMIter iter;
int i;
BLI_mempool *newpool = bm->vtoolflagpool;
BMVert_OFlag *v_oflag;
BLI_mempool *newpool = bm->vtoolflagpool;
BM_ITER_MESH_INDEX (v_oflag, &iter, bm, BM_VERTS_OF_MESH, i) {
void *oldflags = v_oflag->oflags;
v_oflag->oflags = BLI_mempool_alloc(newpool);
memcpy(v_oflag->oflags, oldflags, new_totflags_size);
BM_elem_index_set(&v_oflag->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)v_oflag);
}
/* now go through and memcpy all the flag */
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_VERTS_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_alloc(newpool);
memcpy(ele->oflags, oldflags, new_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
#pragma omp section
{
BMIter iter;
BMEdge_OFlag *ele;
int i;
BMEdge_OFlag *e_oflag;
newpool = bm->etoolflagpool;
BM_ITER_MESH_INDEX (e_oflag, &iter, bm, BM_EDGES_OF_MESH, i) {
void *oldflags = e_oflag->oflags;
e_oflag->oflags = BLI_mempool_alloc(newpool);
memcpy(e_oflag->oflags, oldflags, new_totflags_size);
BM_elem_index_set(&e_oflag->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)e_oflag);
}
BLI_mempool *newpool = bm->etoolflagpool;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_EDGES_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_alloc(newpool);
memcpy(ele->oflags, oldflags, new_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
#pragma omp section
{
BMIter iter;
BMFace_OFlag *ele;
int i;
BLI_mempool *newpool = bm->ftoolflagpool;
BM_ITER_MESH_INDEX (ele, &iter, bm, BM_FACES_OF_MESH, i) {
void *oldflags = ele->oflags;
ele->oflags = BLI_mempool_alloc(newpool);
memcpy(ele->oflags, oldflags, new_totflags_size);
BM_elem_index_set(&ele->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)ele);
}
}
BMFace_OFlag *f_oflag;
newpool = bm->ftoolflagpool;
BM_ITER_MESH_INDEX (f_oflag, &iter, bm, BM_FACES_OF_MESH, i) {
void *oldflags = f_oflag->oflags;
f_oflag->oflags = BLI_mempool_alloc(newpool);
memcpy(f_oflag->oflags, oldflags, new_totflags_size);
BM_elem_index_set(&f_oflag->base, i); /* set_inline */
BM_ELEM_API_FLAG_CLEAR((BMElemF *)f_oflag);
}
BLI_mempool_destroy(voldpool);

View File

@@ -0,0 +1,272 @@
/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* ***** END GPL LICENSE BLOCK *****
*/
/** \file blender/bmesh/operators/bmo_rotate_edge.c
* \ingroup bmesh
*
* Rotate edges topology that share two faces.
*/
#include "MEM_guardedalloc.h"
#include "BLI_math.h"
#include "BLI_heap.h"
#include "bmesh.h"
#include "intern/bmesh_operators_private.h" /* own include */
#define EDGE_OUT 1
#define FACE_MARK 1
/**
* Rotate edges where every edge has it's own faces (we can rotate in any order).
*/
static void bm_rotate_edges_simple(
BMesh *bm, BMOperator *op,
const short check_flag, const bool use_ccw)
{
BMOIter siter;
BMEdge *e;
BMO_ITER (e, &siter, op->slots_in, "edges", BM_EDGE) {
/**
* this ends up being called twice, could add option to not to call check in
* #BM_edge_rotate to get some extra speed */
if (BM_edge_rotate_check(e)) {
BMEdge *e_rotate = BM_edge_rotate(bm, e, use_ccw, check_flag);
if (e_rotate != NULL) {
BMO_edge_flag_enable(bm, e_rotate, EDGE_OUT);
}
}
}
}
/**
* Edge length is just a way of ordering that's independent of order in the edges argument,
* we could use some other method since ideally all edges will be rotated,
* this just happens to be simple to calculate.
*/
static float bm_edge_calc_rotate_cost(const BMEdge *e)
{
return -BM_edge_calc_length_squared(e);
}
/**
* Check if this edge is a boundary: Are more than one of the connected faces edges rotating too?
*/
static float bm_edge_rotate_is_boundary(const BMEdge *e)
{
/* Number of adjacent shared faces. */
int count = 0;
BMLoop *l_radial_iter = e->l;
do {
/* Skip this edge. */
BMLoop *l_iter = l_radial_iter->next;
do {
BMEdge *e_iter = l_iter->e;
const int e_iter_index = BM_elem_index_get(e_iter);
if ((e_iter_index != -1)) {
if (count == 1) {
return false;
}
count += 1;
break;
}
} while ((l_iter = l_iter->next) != l_radial_iter);
} while ((l_radial_iter = l_radial_iter->radial_next) != e->l);
return true;
}
/**
* Rotate edges where edges share faces,
* edges which could not rotate need to be re-considered after neighbors are rotated.
*/
static void bm_rotate_edges_shared(
BMesh *bm, BMOperator *op,
short check_flag, const bool use_ccw, const int edges_len)
{
Heap *heap = BLI_heap_new_ex(edges_len);
HeapNode **eheap_table = MEM_mallocN(sizeof(*eheap_table) * edges_len, __func__);
int edges_len_rotate = 0;
{
BMIter iter;
BMEdge *e;
BM_ITER_MESH (e, &iter, bm, BM_EDGES_OF_MESH) {
BM_elem_index_set(e, -1); /* set_dirty! */
}
bm->elem_index_dirty |= BM_EDGE;
}
{
BMOIter siter;
BMEdge *e;
uint i;
BMO_ITER_INDEX (e, &siter, op->slots_in, "edges", BM_EDGE, i) {
BM_elem_index_set(e, BM_edge_is_manifold(e) ? i : -1); /* set_dirty! */
eheap_table[i] = NULL;
}
}
/* First operate on boundary edges, this is often all that's needed,
* regions that have no boundaries are handles after. */
enum {
PASS_TYPE_BOUNDARY = 0,
PASS_TYPE_ALL = 1,
PASS_TYPE_DONE = 2,
};
uint pass_type = PASS_TYPE_BOUNDARY;
while ((pass_type != PASS_TYPE_DONE) && (edges_len_rotate != edges_len)) {
BLI_assert(BLI_heap_is_empty(heap));
{
BMOIter siter;
BMEdge *e;
uint i;
BMO_ITER_INDEX (e, &siter, op->slots_in, "edges", BM_EDGE, i) {
BLI_assert(eheap_table[i] == NULL);
bool ok = (BM_elem_index_get(e) != -1) && BM_edge_rotate_check(e);
if (ok) {
if (pass_type == PASS_TYPE_BOUNDARY) {
ok = bm_edge_rotate_is_boundary(e);
}
}
if (ok) {
float cost = bm_edge_calc_rotate_cost(e);
if (pass_type == PASS_TYPE_BOUNDARY) {
/* Trick to ensure once started, non boundaries are handled before other boundary edges.
* This means the first longest boundary defines the starting point which is rotated
* until all its connected edges are exhausted and the next boundary is popped off the heap.
*
* Without this we may rotate from different starting points and meet in the middle
* with obviously uneven topology.
*
* Move from negative to positive value, inverting so large values are still handled first.
*/
cost = cost != 0.0f ? -1.0f / cost : FLT_MAX;
}
eheap_table[i] = BLI_heap_insert(heap, cost, e);
}
}
}
if (BLI_heap_is_empty(heap)) {
pass_type += 1;
continue;
}
const int edges_len_rotate_prev = edges_len_rotate;
while (!BLI_heap_is_empty(heap)) {
BMEdge *e_best = BLI_heap_popmin(heap);
eheap_table[BM_elem_index_get(e_best)] = NULL;
/* No problem if this fails, re-evaluate if faces connected to this edge are touched. */
if (BM_edge_rotate_check(e_best)) {
BMEdge *e_rotate = BM_edge_rotate(bm, e_best, use_ccw, check_flag);
if (e_rotate != NULL) {
BMO_edge_flag_enable(bm, e_rotate, EDGE_OUT);
/* invalidate so we don't try touch this again. */
BM_elem_index_set(e_rotate, -1); /* set_dirty! */
edges_len_rotate += 1;
/* Note: we could validate all edges which have not been rotated
* (not just previously degenerate edges).
* However there is no real need - they can be left until they're popped off the queue. */
/* We don't know the exact topology after rotating the edge,
* so loop over all faces attached to the new edge, typically this will only be two faces. */
BMLoop *l_radial_iter = e_rotate->l;
do {
/* Skip this edge. */
BMLoop *l_iter = l_radial_iter->next;
do {
BMEdge *e_iter = l_iter->e;
const int e_iter_index = BM_elem_index_get(e_iter);
if ((e_iter_index != -1) && (eheap_table[e_iter_index] == NULL)) {
if (BM_edge_rotate_check(e_iter)) {
/* Previously degenerate, now valid. */
float cost = bm_edge_calc_rotate_cost(e_iter);
eheap_table[e_iter_index] = BLI_heap_insert(heap, cost, e_iter);
}
}
} while ((l_iter = l_iter->next) != l_radial_iter);
} while ((l_radial_iter = l_radial_iter->radial_next) != e_rotate->l);
}
}
}
/* If no actions were taken, move onto the next pass. */
if (edges_len_rotate == edges_len_rotate_prev) {
pass_type += 1;
continue;
}
}
BLI_heap_free(heap, NULL);
MEM_freeN(eheap_table);
}
void bmo_rotate_edges_exec(BMesh *bm, BMOperator *op)
{
BMOIter siter;
BMEdge *e;
const int edges_len = BMO_slot_buffer_count(op->slots_in, "edges");
const bool use_ccw = BMO_slot_bool_get(op->slots_in, "use_ccw");
const bool is_single = (edges_len == 1);
short check_flag = is_single ?
BM_EDGEROT_CHECK_EXISTS :
BM_EDGEROT_CHECK_EXISTS | BM_EDGEROT_CHECK_DEGENERATE;
bool is_simple = true;
if (is_single == false) {
BMO_ITER (e, &siter, op->slots_in, "edges", BM_EDGE) {
BMFace *f_pair[2];
if (BM_edge_face_pair(e, &f_pair[0], &f_pair[1])) {
for (uint i = 0; i < ARRAY_SIZE(f_pair); i += 1) {
if (BMO_face_flag_test(bm, f_pair[i], FACE_MARK)) {
is_simple = false;
break;
}
BMO_face_flag_enable(bm, f_pair[i], FACE_MARK);
}
if (is_simple == false) {
break;
}
}
}
}
if (is_simple) {
bm_rotate_edges_simple(bm, op, use_ccw, check_flag);
}
else {
bm_rotate_edges_shared(bm, op, check_flag, use_ccw, edges_len);
}
BMO_slot_buffer_from_enabled_flag(bm, op, op->slots_out, "edges.out", BM_EDGE, EDGE_OUT);
}

View File

@@ -121,60 +121,6 @@ void bmo_reverse_faces_exec(BMesh *bm, BMOperator *op)
}
}
void bmo_rotate_edges_exec(BMesh *bm, BMOperator *op)
{
BMOIter siter;
BMEdge *e, *e2;
const bool use_ccw = BMO_slot_bool_get(op->slots_in, "use_ccw");
const bool is_single = BMO_slot_buffer_count(op->slots_in, "edges") == 1;
short check_flag = is_single ?
BM_EDGEROT_CHECK_EXISTS :
BM_EDGEROT_CHECK_EXISTS | BM_EDGEROT_CHECK_DEGENERATE;
#define EDGE_OUT 1
#define FACE_TAINT 1
BMO_ITER (e, &siter, op->slots_in, "edges", BM_EDGE) {
/**
* this ends up being called twice, could add option to not to call check in
* #BM_edge_rotate to get some extra speed */
if (BM_edge_rotate_check(e)) {
BMFace *fa, *fb;
if (BM_edge_face_pair(e, &fa, &fb)) {
/* check we're untouched */
if (BMO_face_flag_test(bm, fa, FACE_TAINT) == false &&
BMO_face_flag_test(bm, fb, FACE_TAINT) == false)
{
/* don't touch again (faces will be freed so run before rotating the edge) */
BMO_face_flag_enable(bm, fa, FACE_TAINT);
BMO_face_flag_enable(bm, fb, FACE_TAINT);
if (!(e2 = BM_edge_rotate(bm, e, use_ccw, check_flag))) {
BMO_face_flag_disable(bm, fa, FACE_TAINT);
BMO_face_flag_disable(bm, fb, FACE_TAINT);
#if 0
BMO_error_raise(bm, op, BMERR_INVALID_SELECTION, "Could not rotate edge");
return;
#endif
continue;
}
BMO_edge_flag_enable(bm, e2, EDGE_OUT);
}
}
}
}
BMO_slot_buffer_from_enabled_flag(bm, op, op->slots_out, "edges.out", BM_EDGE, EDGE_OUT);
#undef EDGE_OUT
#undef FACE_TAINT
}
#define SEL_FLAG 1
#define SEL_ORIG 2

View File

@@ -2768,6 +2768,8 @@ static const EnumPropertyItem prop_gpencil_drawmodes[] = {
void GPENCIL_OT_draw(wmOperatorType *ot)
{
PropertyRNA *prop;
/* identifiers */
ot->name = "Grease Pencil Draw";
ot->idname = "GPENCIL_OT_draw";
@@ -2784,11 +2786,12 @@ void GPENCIL_OT_draw(wmOperatorType *ot)
ot->flag = OPTYPE_UNDO | OPTYPE_BLOCKING;
/* settings for drawing */
PropertyRNA *prop;
ot->prop = RNA_def_enum(ot->srna, "mode", prop_gpencil_drawmodes, 0, "Mode", "Way to interpret mouse movements");
prop = RNA_def_collection_runtime(ot->srna, "stroke", &RNA_OperatorStrokeElement, "Stroke", "");
RNA_def_property_flag(prop, PROP_HIDDEN | PROP_SKIP_SAVE);
/* NOTE: wait for input is enabled by default, so that all UI code can work properly without needing users to know about this */
RNA_def_boolean(ot->srna, "wait_for_input", true, "Wait for Input", "Wait for first click instead of painting immediately");
prop = RNA_def_boolean(ot->srna, "wait_for_input", true, "Wait for Input", "Wait for first click instead of painting immediately");
RNA_def_property_flag(prop, PROP_SKIP_SAVE);
}

View File

@@ -610,87 +610,18 @@ void ED_mask_draw(const bContext *C,
draw_masklays(C, mask, draw_flag, draw_type, width, height);
}
typedef struct ThreadedMaskRasterizeState {
MaskRasterHandle *handle;
float *buffer;
int width, height;
} ThreadedMaskRasterizeState;
typedef struct ThreadedMaskRasterizeData {
int start_scanline;
int num_scanlines;
} ThreadedMaskRasterizeData;
static void mask_rasterize_func(TaskPool * __restrict pool, void *taskdata, int UNUSED(threadid))
static float *mask_rasterize(Mask *mask, const int width, const int height)
{
ThreadedMaskRasterizeState *state = (ThreadedMaskRasterizeState *) BLI_task_pool_userdata(pool);
ThreadedMaskRasterizeData *data = (ThreadedMaskRasterizeData *) taskdata;
int scanline;
const float x_inv = 1.0f / (float)state->width;
const float y_inv = 1.0f / (float)state->height;
const float x_px_ofs = x_inv * 0.5f;
const float y_px_ofs = y_inv * 0.5f;
for (scanline = 0; scanline < data->num_scanlines; scanline++) {
float xy[2];
int x, y = data->start_scanline + scanline;
xy[1] = ((float)y * y_inv) + y_px_ofs;
for (x = 0; x < state->width; x++) {
int index = y * state->width + x;
xy[0] = ((float)x * x_inv) + x_px_ofs;
state->buffer[index] = BKE_maskrasterize_handle_sample(state->handle, xy);
}
}
}
static float *threaded_mask_rasterize(Mask *mask, const int width, const int height)
{
TaskScheduler *task_scheduler = BLI_task_scheduler_get();
TaskPool *task_pool;
MaskRasterHandle *handle;
ThreadedMaskRasterizeState state;
float *buffer;
int i, num_threads = BLI_task_scheduler_num_threads(task_scheduler), scanlines_per_thread;
buffer = MEM_mallocN(sizeof(float) * height * width, "rasterized mask buffer");
float *buffer = MEM_mallocN(sizeof(float) * height * width, "rasterized mask buffer");
/* Initialize rasterization handle. */
handle = BKE_maskrasterize_handle_new();
BKE_maskrasterize_handle_init(handle, mask, width, height, true, true, true);
state.handle = handle;
state.buffer = buffer;
state.width = width;
state.height = height;
task_pool = BLI_task_pool_create(task_scheduler, &state);
scanlines_per_thread = height / num_threads;
for (i = 0; i < num_threads; i++) {
ThreadedMaskRasterizeData *data = MEM_mallocN(sizeof(ThreadedMaskRasterizeData),
"threaded mask rasterize data");
data->start_scanline = i * scanlines_per_thread;
if (i < num_threads - 1) {
data->num_scanlines = scanlines_per_thread;
}
else {
data->num_scanlines = height - data->start_scanline;
}
BLI_task_pool_push(task_pool, mask_rasterize_func, data, true, TASK_PRIORITY_LOW);
}
/* work and wait until tasks are done */
BLI_task_pool_work_and_wait(task_pool);
BKE_maskrasterize_buffer(handle, width, height, buffer);
/* Free memory. */
BLI_task_pool_free(task_pool);
BKE_maskrasterize_handle_free(handle);
return buffer;
@@ -755,7 +686,7 @@ void ED_mask_draw_region(Mask *mask, ARegion *ar,
if (draw_flag & MASK_DRAWFLAG_OVERLAY) {
float red[4] = {1.0f, 0.0f, 0.0f, 0.0f};
float *buffer = threaded_mask_rasterize(mask, width, height);
float *buffer = mask_rasterize(mask, width, height);
if (overlay_mode != MASK_OVERLAY_ALPHACHANNEL) {
/* More blending types could be supported in the future. */