2018-07-31 10:22:19 +02:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*
|
2019-01-23 11:29:18 +11:00
|
|
|
* Copyright 2017, Blender Foundation.
|
2018-07-31 10:22:19 +02:00
|
|
|
*/
|
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
* \ingroup draw
|
2018-07-31 10:22:19 +02:00
|
|
|
*/
|
|
|
|
|
2018-10-21 16:22:31 +11:00
|
|
|
#include "DRW_engine.h"
|
2018-07-31 10:22:19 +02:00
|
|
|
#include "DRW_render.h"
|
|
|
|
|
|
|
|
#include "ED_gpencil.h"
|
|
|
|
#include "ED_view3d.h"
|
|
|
|
|
|
|
|
#include "DNA_gpencil_types.h"
|
|
|
|
#include "DNA_view3d_types.h"
|
|
|
|
|
2019-02-27 08:50:29 +01:00
|
|
|
#include "BKE_library.h"
|
2018-11-20 19:26:16 +01:00
|
|
|
#include "BKE_gpencil.h"
|
2019-04-13 13:03:22 +02:00
|
|
|
#include "BKE_object.h"
|
2018-11-20 19:26:16 +01:00
|
|
|
|
2018-07-31 10:22:19 +02:00
|
|
|
#include "gpencil_engine.h"
|
|
|
|
|
|
|
|
#include "draw_cache_impl.h"
|
|
|
|
|
2018-10-19 20:39:21 +02:00
|
|
|
#include "DEG_depsgraph.h"
|
|
|
|
|
2019-04-13 13:03:22 +02:00
|
|
|
/* verify if exist a non instanced version of the object */
|
|
|
|
static bool gpencil_has_noninstanced_object(Object *ob_instance)
|
|
|
|
{
|
|
|
|
const DRWContextState *draw_ctx = DRW_context_state_get();
|
|
|
|
const ViewLayer *view_layer = draw_ctx->view_layer;
|
|
|
|
Object *ob = NULL;
|
|
|
|
for (Base *base = view_layer->object_bases.first; base; base = base->next) {
|
|
|
|
ob = base->object;
|
|
|
|
if (ob->type != OB_GPENCIL) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* object must be visible (invisible objects don't create VBO data) */
|
|
|
|
if (!(DRW_object_visibility_in_active_context(ob) & OB_VISIBLE_SELF)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
/* is not duplicated and the name is equals */
|
|
|
|
if ((ob->base_flag & BASE_FROM_DUPLI) == 0) {
|
|
|
|
if (STREQ(ob->id.name, ob_instance->id.name)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* add a gpencil object to cache to defer drawing */
|
2018-07-31 20:11:55 +10:00
|
|
|
tGPencilObjectCache *gpencil_object_cache_add(
|
2018-08-15 16:07:16 +02:00
|
|
|
tGPencilObjectCache *cache_array, Object *ob,
|
2018-07-31 20:11:55 +10:00
|
|
|
int *gp_cache_size, int *gp_cache_used)
|
2018-07-31 10:22:19 +02:00
|
|
|
{
|
|
|
|
const DRWContextState *draw_ctx = DRW_context_state_get();
|
|
|
|
tGPencilObjectCache *cache_elem = NULL;
|
|
|
|
RegionView3D *rv3d = draw_ctx->rv3d;
|
2019-02-26 16:55:21 +01:00
|
|
|
View3D *v3d = draw_ctx->v3d;
|
2018-07-31 10:22:19 +02:00
|
|
|
tGPencilObjectCache *p = NULL;
|
|
|
|
|
|
|
|
/* By default a cache is created with one block with a predefined number of free slots,
|
|
|
|
if the size is not enough, the cache is reallocated adding a new block of free slots.
|
|
|
|
This is done in order to keep cache small */
|
|
|
|
if (*gp_cache_used + 1 > *gp_cache_size) {
|
|
|
|
if ((*gp_cache_size == 0) || (cache_array == NULL)) {
|
|
|
|
p = MEM_callocN(sizeof(struct tGPencilObjectCache) * GP_CACHE_BLOCK_SIZE, "tGPencilObjectCache");
|
|
|
|
*gp_cache_size = GP_CACHE_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
*gp_cache_size += GP_CACHE_BLOCK_SIZE;
|
|
|
|
p = MEM_recallocN(cache_array, sizeof(struct tGPencilObjectCache) * *gp_cache_size);
|
|
|
|
}
|
|
|
|
cache_array = p;
|
|
|
|
}
|
|
|
|
/* zero out all pointers */
|
|
|
|
cache_elem = &cache_array[*gp_cache_used];
|
|
|
|
memset(cache_elem, 0, sizeof(*cache_elem));
|
|
|
|
|
2019-01-30 16:26:06 +01:00
|
|
|
cache_elem->ob = ob;
|
|
|
|
cache_elem->gpd = (bGPdata *)ob->data;
|
2019-02-27 08:50:29 +01:00
|
|
|
cache_elem->name = BKE_id_to_unique_string_key(&ob->id);
|
2019-02-26 20:28:06 +01:00
|
|
|
|
2019-01-24 10:46:10 +01:00
|
|
|
copy_v3_v3(cache_elem->loc, ob->obmat[3]);
|
2018-08-15 16:07:16 +02:00
|
|
|
copy_m4_m4(cache_elem->obmat, ob->obmat);
|
2018-07-31 10:22:19 +02:00
|
|
|
cache_elem->idx = *gp_cache_used;
|
|
|
|
|
2018-11-20 19:26:16 +01:00
|
|
|
/* object is duplicated (particle) */
|
2019-04-13 13:03:22 +02:00
|
|
|
if (ob->base_flag & BASE_FROM_DUPLI) {
|
|
|
|
/* Check if the original object is not in the viewlayer
|
|
|
|
* and cannot be managed as dupli. This is slower, but required to keep
|
|
|
|
* the particle drawing FPS and display instanced objects in scene
|
|
|
|
* without the original object */
|
|
|
|
bool has_original = gpencil_has_noninstanced_object(ob);
|
|
|
|
cache_elem->is_dup_ob = (has_original) ? ob->base_flag & BASE_FROM_DUPLI : false;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
cache_elem->is_dup_ob = false;
|
|
|
|
}
|
|
|
|
|
2019-01-25 20:40:56 +01:00
|
|
|
cache_elem->scale = mat4_to_scale(ob->obmat);
|
2018-08-15 16:07:16 +02:00
|
|
|
|
|
|
|
/* save FXs */
|
|
|
|
cache_elem->pixfactor = cache_elem->gpd->pixfactor;
|
2019-01-30 16:26:06 +01:00
|
|
|
cache_elem->shader_fx = ob->shader_fx;
|
2018-08-15 16:07:16 +02:00
|
|
|
|
2019-02-26 16:55:21 +01:00
|
|
|
/* save wire mode (object mode is always primary option) */
|
|
|
|
if (ob->dt == OB_WIRE) {
|
2019-03-02 13:58:59 +01:00
|
|
|
cache_elem->shading_type[0] = (int)OB_WIRE;
|
2019-02-26 16:55:21 +01:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
if (v3d) {
|
2019-03-02 13:58:59 +01:00
|
|
|
cache_elem->shading_type[0] = (int)v3d->shading.type;
|
2019-02-26 16:55:21 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-26 18:12:39 +01:00
|
|
|
/* shgrp array */
|
|
|
|
cache_elem->tot_layers = 0;
|
|
|
|
int totgpl = BLI_listbase_count(&cache_elem->gpd->layers);
|
|
|
|
if (totgpl > 0) {
|
|
|
|
cache_elem->shgrp_array = MEM_callocN(sizeof(tGPencilObjectCache_shgrp) * totgpl, __func__);
|
|
|
|
}
|
2018-07-31 10:22:19 +02:00
|
|
|
|
|
|
|
/* calculate zdepth from point of view */
|
|
|
|
float zdepth = 0.0;
|
|
|
|
if (rv3d) {
|
|
|
|
if (rv3d->is_persp) {
|
2019-01-24 10:46:10 +01:00
|
|
|
zdepth = ED_view3d_calc_zfac(rv3d, ob->obmat[3], NULL);
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
else {
|
2019-01-24 10:46:10 +01:00
|
|
|
zdepth = -dot_v3v3(rv3d->viewinv[2], ob->obmat[3]);
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* In render mode, rv3d is not available, so use the distance to camera.
|
|
|
|
* The real distance is not important, but the relative distance to the camera plane
|
|
|
|
* in order to sort by z_depth of the objects
|
|
|
|
*/
|
|
|
|
float vn[3] = { 0.0f, 0.0f, -1.0f }; /* always face down */
|
|
|
|
float plane_cam[4];
|
|
|
|
struct Object *camera = draw_ctx->scene->camera;
|
|
|
|
if (camera) {
|
|
|
|
mul_m4_v3(camera->obmat, vn);
|
|
|
|
normalize_v3(vn);
|
|
|
|
plane_from_point_normal_v3(plane_cam, camera->loc, vn);
|
2019-01-24 10:46:10 +01:00
|
|
|
zdepth = dist_squared_to_plane_v3(ob->obmat[3], plane_cam);
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
cache_elem->zdepth = zdepth;
|
|
|
|
/* increase slots used in cache */
|
|
|
|
(*gp_cache_used)++;
|
|
|
|
|
|
|
|
return cache_array;
|
|
|
|
}
|
|
|
|
|
2018-11-20 19:26:16 +01:00
|
|
|
/* add a shading group to the cache to create later */
|
|
|
|
GpencilBatchGroup *gpencil_group_cache_add(
|
|
|
|
GpencilBatchGroup *cache_array,
|
|
|
|
bGPDlayer *gpl, bGPDframe *gpf, bGPDstroke *gps,
|
|
|
|
const short type, const bool onion,
|
|
|
|
const int vertex_idx,
|
|
|
|
int *grp_size, int *grp_used)
|
|
|
|
{
|
|
|
|
GpencilBatchGroup *cache_elem = NULL;
|
|
|
|
GpencilBatchGroup *p = NULL;
|
|
|
|
|
|
|
|
/* By default a cache is created with one block with a predefined number of free slots,
|
|
|
|
if the size is not enough, the cache is reallocated adding a new block of free slots.
|
|
|
|
This is done in order to keep cache small */
|
|
|
|
if (*grp_used + 1 > *grp_size) {
|
|
|
|
if ((*grp_size == 0) || (cache_array == NULL)) {
|
|
|
|
p = MEM_callocN(sizeof(struct GpencilBatchGroup) * GPENCIL_GROUPS_BLOCK_SIZE, "GpencilBatchGroup");
|
|
|
|
*grp_size = GPENCIL_GROUPS_BLOCK_SIZE;
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
*grp_size += GPENCIL_GROUPS_BLOCK_SIZE;
|
|
|
|
p = MEM_recallocN(cache_array, sizeof(struct GpencilBatchGroup) * *grp_size);
|
|
|
|
}
|
|
|
|
cache_array = p;
|
|
|
|
}
|
|
|
|
/* zero out all data */
|
|
|
|
cache_elem = &cache_array[*grp_used];
|
|
|
|
memset(cache_elem, 0, sizeof(*cache_elem));
|
|
|
|
|
|
|
|
cache_elem->gpl = gpl;
|
|
|
|
cache_elem->gpf = gpf;
|
|
|
|
cache_elem->gps = gps;
|
|
|
|
cache_elem->type = type;
|
|
|
|
cache_elem->onion = onion;
|
|
|
|
cache_elem->vertex_idx = vertex_idx;
|
|
|
|
|
|
|
|
/* increase slots used in cache */
|
|
|
|
(*grp_used)++;
|
|
|
|
|
|
|
|
return cache_array;
|
|
|
|
}
|
|
|
|
|
2018-07-31 10:22:19 +02:00
|
|
|
/* get current cache data */
|
2018-08-15 22:11:44 +02:00
|
|
|
static GpencilBatchCache *gpencil_batch_get_element(Object *ob)
|
2018-07-31 10:22:19 +02:00
|
|
|
{
|
2019-01-30 16:26:06 +01:00
|
|
|
return ob->runtime.gpencil_cache;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* verify if cache is valid */
|
2018-10-19 20:39:21 +02:00
|
|
|
static bool gpencil_batch_cache_valid(GpencilBatchCache *cache, bGPdata *gpd, int cfra)
|
2018-07-31 10:22:19 +02:00
|
|
|
{
|
2018-11-20 19:26:16 +01:00
|
|
|
bool valid = true;
|
2018-07-31 10:22:19 +02:00
|
|
|
if (cache == NULL) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
cache->is_editmode = GPENCIL_ANY_EDIT_MODE(gpd);
|
|
|
|
if (cfra != cache->cache_frame) {
|
2018-11-20 19:26:16 +01:00
|
|
|
valid = false;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
2018-11-20 19:26:16 +01:00
|
|
|
else if (gpd->flag & GP_DATA_CACHE_IS_DIRTY) {
|
|
|
|
valid = false;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
2018-12-20 16:52:03 +01:00
|
|
|
else if (gpd->flag & GP_DATA_PYTHON_UPDATED) {
|
|
|
|
gpd->flag &= ~GP_DATA_PYTHON_UPDATED;
|
|
|
|
valid = false;
|
|
|
|
}
|
2018-11-20 19:26:16 +01:00
|
|
|
else if (cache->is_editmode) {
|
|
|
|
valid = false;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
2018-11-20 19:26:16 +01:00
|
|
|
else if (cache->is_dirty) {
|
|
|
|
valid = false;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
2018-11-20 19:26:16 +01:00
|
|
|
|
|
|
|
return valid;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* cache init */
|
2018-10-19 20:39:21 +02:00
|
|
|
static GpencilBatchCache *gpencil_batch_cache_init(Object *ob, int cfra)
|
2018-07-31 10:22:19 +02:00
|
|
|
{
|
2019-01-30 16:26:06 +01:00
|
|
|
bGPdata *gpd = (bGPdata *)ob->data;
|
2018-10-19 20:39:21 +02:00
|
|
|
|
2018-07-31 10:22:19 +02:00
|
|
|
GpencilBatchCache *cache = gpencil_batch_get_element(ob);
|
|
|
|
|
|
|
|
if (!cache) {
|
|
|
|
cache = MEM_callocN(sizeof(*cache), __func__);
|
2019-01-30 16:26:06 +01:00
|
|
|
ob->runtime.gpencil_cache = cache;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
memset(cache, 0, sizeof(*cache));
|
|
|
|
}
|
|
|
|
|
|
|
|
cache->is_editmode = GPENCIL_ANY_EDIT_MODE(gpd);
|
|
|
|
|
|
|
|
cache->is_dirty = true;
|
2018-11-20 19:26:16 +01:00
|
|
|
|
2018-07-31 10:22:19 +02:00
|
|
|
cache->cache_frame = cfra;
|
2018-10-19 20:39:21 +02:00
|
|
|
|
2018-11-20 19:26:16 +01:00
|
|
|
/* create array of derived frames equal to number of layers */
|
|
|
|
cache->tot_layers = BLI_listbase_count(&gpd->layers);
|
|
|
|
CLAMP_MIN(cache->tot_layers, 1);
|
|
|
|
cache->derived_array = MEM_callocN(sizeof(struct bGPDframe) * cache->tot_layers, "Derived GPF");
|
|
|
|
|
2018-10-19 20:39:21 +02:00
|
|
|
return cache;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* clear cache */
|
2018-08-15 16:07:16 +02:00
|
|
|
static void gpencil_batch_cache_clear(GpencilBatchCache *cache)
|
2018-07-31 10:22:19 +02:00
|
|
|
{
|
|
|
|
if (!cache) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-11-20 19:26:16 +01:00
|
|
|
GPU_BATCH_DISCARD_SAFE(cache->b_stroke.batch);
|
|
|
|
GPU_BATCH_DISCARD_SAFE(cache->b_point.batch);
|
|
|
|
GPU_BATCH_DISCARD_SAFE(cache->b_fill.batch);
|
|
|
|
GPU_BATCH_DISCARD_SAFE(cache->b_edit.batch);
|
|
|
|
GPU_BATCH_DISCARD_SAFE(cache->b_edlin.batch);
|
|
|
|
|
|
|
|
MEM_SAFE_FREE(cache->b_stroke.batch);
|
|
|
|
MEM_SAFE_FREE(cache->b_point.batch);
|
|
|
|
MEM_SAFE_FREE(cache->b_fill.batch);
|
|
|
|
MEM_SAFE_FREE(cache->b_edit.batch);
|
|
|
|
MEM_SAFE_FREE(cache->b_edlin.batch);
|
|
|
|
|
|
|
|
MEM_SAFE_FREE(cache->grp_cache);
|
|
|
|
cache->grp_size = 0;
|
|
|
|
cache->grp_used = 0;
|
|
|
|
|
|
|
|
/* clear all frames derived data */
|
|
|
|
for (int i = 0; i < cache->tot_layers; i++) {
|
|
|
|
bGPDframe *derived_gpf = &cache->derived_array[i];
|
|
|
|
BKE_gpencil_free_frame_runtime_data(derived_gpf);
|
|
|
|
derived_gpf = NULL;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
2018-11-20 19:26:16 +01:00
|
|
|
cache->tot_layers = 0;
|
|
|
|
MEM_SAFE_FREE(cache->derived_array);
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* get cache */
|
|
|
|
GpencilBatchCache *gpencil_batch_cache_get(Object *ob, int cfra)
|
|
|
|
{
|
2019-01-30 16:26:06 +01:00
|
|
|
bGPdata *gpd = (bGPdata *)ob->data;
|
2018-07-31 10:22:19 +02:00
|
|
|
|
2018-10-19 20:39:21 +02:00
|
|
|
GpencilBatchCache *cache = gpencil_batch_get_element(ob);
|
|
|
|
if (!gpencil_batch_cache_valid(cache, gpd, cfra)) {
|
2018-07-31 10:22:19 +02:00
|
|
|
if (cache) {
|
2018-08-15 16:07:16 +02:00
|
|
|
gpencil_batch_cache_clear(cache);
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
2018-10-19 20:39:21 +02:00
|
|
|
return gpencil_batch_cache_init(ob, cfra);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
return cache;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* set cache as dirty */
|
2018-08-23 10:14:29 -03:00
|
|
|
void DRW_gpencil_batch_cache_dirty_tag(bGPdata *gpd)
|
2018-07-31 10:22:19 +02:00
|
|
|
{
|
2019-01-30 16:26:06 +01:00
|
|
|
gpd->flag |= GP_DATA_CACHE_IS_DIRTY;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/* free batch cache */
|
2018-10-21 16:22:31 +11:00
|
|
|
void DRW_gpencil_batch_cache_free(bGPdata *UNUSED(gpd))
|
2018-07-31 10:22:19 +02:00
|
|
|
{
|
2018-10-19 20:39:21 +02:00
|
|
|
return;
|
|
|
|
}
|
2018-07-31 10:22:19 +02:00
|
|
|
|
2018-10-19 20:39:21 +02:00
|
|
|
/* wrapper to clear cache */
|
|
|
|
void DRW_gpencil_freecache(struct Object *ob)
|
|
|
|
{
|
|
|
|
if ((ob) && (ob->type == OB_GPENCIL)) {
|
|
|
|
gpencil_batch_cache_clear(ob->runtime.gpencil_cache);
|
|
|
|
MEM_SAFE_FREE(ob->runtime.gpencil_cache);
|
|
|
|
bGPdata *gpd = (bGPdata *)ob->data;
|
|
|
|
if (gpd) {
|
|
|
|
gpd->flag |= GP_DATA_CACHE_IS_DIRTY;
|
2018-07-31 10:22:19 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|