279 lines
7.3 KiB
C
279 lines
7.3 KiB
C
/* SPDX-License-Identifier: GPL-2.0-or-later
|
|
* Copyright 2014 Blender Foundation. All rights reserved. */
|
|
|
|
/** \file
|
|
* \ingroup gpu
|
|
*
|
|
* Interface for accessing GPU-related methods for selection. The semantics are
|
|
* similar to `glRenderMode(GL_SELECT)` from older OpenGL versions.
|
|
*/
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include "GPU_select.h"
|
|
|
|
#include "BLI_rect.h"
|
|
|
|
#include "BLI_utildefines.h"
|
|
|
|
#include "gpu_select_private.h"
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
/** \name Internal Types
|
|
* \{ */
|
|
|
|
/* Internal algorithm used */
|
|
typedef enum eGPUSelectAlgo {
|
|
/** glBegin/EndQuery(GL_SAMPLES_PASSED... ), `gpu_select_query.c`
|
|
* Only sets 4th component (ID) correctly. */
|
|
ALGO_GL_QUERY = 1,
|
|
/** Read depth buffer for every drawing pass and extract depths, `gpu_select_pick.c`
|
|
* Only sets 4th component (ID) correctly. */
|
|
ALGO_GL_PICK = 2,
|
|
} eGPUSelectAlgo;
|
|
|
|
typedef struct GPUSelectState {
|
|
/* To ignore selection id calls when not initialized */
|
|
bool select_is_active;
|
|
/* mode of operation */
|
|
eGPUSelectMode mode;
|
|
/* internal algorithm for selection */
|
|
eGPUSelectAlgo algorithm;
|
|
/* allow GPU_select_begin/end without drawing */
|
|
bool use_cache;
|
|
/**
|
|
* Signifies that #GPU_select_cache_begin has been called,
|
|
* future calls to #GPU_select_begin should initialize the cache.
|
|
*
|
|
* \note #GPU_select_cache_begin could perform initialization but doesn't as it's inconvenient
|
|
* for callers making the cache begin/end calls outside lower level selection logic
|
|
* where the `mode` to pass to #GPU_select_begin yet isn't known.
|
|
*/
|
|
bool use_cache_needs_init;
|
|
} GPUSelectState;
|
|
|
|
static GPUSelectState g_select_state = {0};
|
|
|
|
/** \} */
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
/** \name Public API
|
|
* \{ */
|
|
|
|
void GPU_select_begin(GPUSelectResult *buffer,
|
|
const uint buffer_len,
|
|
const rcti *input,
|
|
eGPUSelectMode mode,
|
|
int oldhits)
|
|
{
|
|
if (mode == GPU_SELECT_NEAREST_SECOND_PASS) {
|
|
/* In the case hits was '-1',
|
|
* don't start the second pass since it's not going to give useful results.
|
|
* As well as buffer overflow in 'gpu_select_query_load_id'. */
|
|
BLI_assert(oldhits != -1);
|
|
}
|
|
|
|
g_select_state.select_is_active = true;
|
|
g_select_state.mode = mode;
|
|
|
|
if (ELEM(g_select_state.mode, GPU_SELECT_PICK_ALL, GPU_SELECT_PICK_NEAREST)) {
|
|
g_select_state.algorithm = ALGO_GL_PICK;
|
|
}
|
|
else {
|
|
g_select_state.algorithm = ALGO_GL_QUERY;
|
|
}
|
|
|
|
/* This function is called when cache has already been initialized,
|
|
* so only manipulate cache values when cache is pending. */
|
|
if (g_select_state.use_cache_needs_init) {
|
|
g_select_state.use_cache_needs_init = false;
|
|
|
|
switch (g_select_state.algorithm) {
|
|
case ALGO_GL_QUERY: {
|
|
g_select_state.use_cache = false;
|
|
break;
|
|
}
|
|
default: {
|
|
g_select_state.use_cache = true;
|
|
gpu_select_pick_cache_begin();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
switch (g_select_state.algorithm) {
|
|
case ALGO_GL_QUERY: {
|
|
gpu_select_query_begin(buffer, buffer_len, input, mode, oldhits);
|
|
break;
|
|
}
|
|
default: /* ALGO_GL_PICK */
|
|
{
|
|
gpu_select_pick_begin(buffer, buffer_len, input, mode);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
bool GPU_select_load_id(uint id)
|
|
{
|
|
/* if no selection mode active, ignore */
|
|
if (!g_select_state.select_is_active) {
|
|
return true;
|
|
}
|
|
|
|
switch (g_select_state.algorithm) {
|
|
case ALGO_GL_QUERY: {
|
|
return gpu_select_query_load_id(id);
|
|
}
|
|
default: /* ALGO_GL_PICK */
|
|
{
|
|
return gpu_select_pick_load_id(id, false);
|
|
}
|
|
}
|
|
}
|
|
|
|
uint GPU_select_end(void)
|
|
{
|
|
uint hits = 0;
|
|
|
|
switch (g_select_state.algorithm) {
|
|
case ALGO_GL_QUERY: {
|
|
hits = gpu_select_query_end();
|
|
break;
|
|
}
|
|
default: /* ALGO_GL_PICK */
|
|
{
|
|
hits = gpu_select_pick_end();
|
|
break;
|
|
}
|
|
}
|
|
|
|
g_select_state.select_is_active = false;
|
|
|
|
return hits;
|
|
}
|
|
|
|
/** \} */
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
/** \name Caching
|
|
*
|
|
* Support multiple begin/end's as long as they are within the initial region.
|
|
* Currently only used by #ALGO_GL_PICK.
|
|
* \{ */
|
|
|
|
void GPU_select_cache_begin(void)
|
|
{
|
|
BLI_assert(g_select_state.select_is_active == false);
|
|
/* Ensure #GPU_select_cache_end is always called. */
|
|
BLI_assert(g_select_state.use_cache_needs_init == false);
|
|
|
|
/* Signal that cache should be used, instead of calling the algorithms cache-begin function.
|
|
* This is more convenient as the exact method of selection may not be known by the caller. */
|
|
g_select_state.use_cache_needs_init = true;
|
|
}
|
|
|
|
void GPU_select_cache_load_id(void)
|
|
{
|
|
BLI_assert(g_select_state.use_cache == true);
|
|
if (g_select_state.algorithm == ALGO_GL_PICK) {
|
|
gpu_select_pick_cache_load_id();
|
|
}
|
|
}
|
|
|
|
void GPU_select_cache_end(void)
|
|
{
|
|
if (g_select_state.algorithm == ALGO_GL_PICK) {
|
|
BLI_assert(g_select_state.use_cache == true);
|
|
gpu_select_pick_cache_end();
|
|
}
|
|
g_select_state.use_cache = false;
|
|
/* Paranoid assignment, should already be false. */
|
|
g_select_state.use_cache_needs_init = false;
|
|
}
|
|
|
|
bool GPU_select_is_cached(void)
|
|
{
|
|
return g_select_state.use_cache && gpu_select_pick_is_cached();
|
|
}
|
|
|
|
/** \} */
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
/** \name Utilities
|
|
* \{ */
|
|
|
|
const GPUSelectResult *GPU_select_buffer_near(const GPUSelectResult *buffer, int hits)
|
|
{
|
|
const GPUSelectResult *buffer_near = NULL;
|
|
uint depth_min = (uint)-1;
|
|
for (int i = 0; i < hits; i++) {
|
|
if (buffer->depth < depth_min) {
|
|
BLI_assert(buffer->id != -1);
|
|
depth_min = buffer->depth;
|
|
buffer_near = buffer;
|
|
}
|
|
buffer++;
|
|
}
|
|
return buffer_near;
|
|
}
|
|
|
|
uint GPU_select_buffer_remove_by_id(GPUSelectResult *buffer, int hits, uint select_id)
|
|
{
|
|
GPUSelectResult *buffer_src = buffer;
|
|
GPUSelectResult *buffer_dst = buffer;
|
|
int hits_final = 0;
|
|
for (int i = 0; i < hits; i++) {
|
|
if (buffer_src->id != select_id) {
|
|
if (buffer_dst != buffer_src) {
|
|
memcpy(buffer_dst, buffer_src, sizeof(GPUSelectResult));
|
|
}
|
|
buffer_dst++;
|
|
hits_final += 1;
|
|
}
|
|
buffer_src++;
|
|
}
|
|
return hits_final;
|
|
}
|
|
|
|
void GPU_select_buffer_stride_realign(const rcti *src, const rcti *dst, uint *r_buf)
|
|
{
|
|
const int x = dst->xmin - src->xmin;
|
|
const int y = dst->ymin - src->ymin;
|
|
|
|
BLI_assert(src->xmin <= dst->xmin && src->ymin <= dst->ymin && src->xmax >= dst->xmax &&
|
|
src->ymax >= dst->ymax);
|
|
BLI_assert(x >= 0 && y >= 0);
|
|
|
|
const int src_x = BLI_rcti_size_x(src);
|
|
const int src_y = BLI_rcti_size_y(src);
|
|
const int dst_x = BLI_rcti_size_x(dst);
|
|
const int dst_y = BLI_rcti_size_y(dst);
|
|
|
|
int last_px_id = src_x * (y + dst_y - 1) + (x + dst_x - 1);
|
|
memset(&r_buf[last_px_id + 1], 0, (src_x * src_y - (last_px_id + 1)) * sizeof(*r_buf));
|
|
|
|
if (last_px_id < 0) {
|
|
/* Nothing to write. */
|
|
BLI_assert(last_px_id == -1);
|
|
return;
|
|
}
|
|
|
|
int last_px_written = dst_x * dst_y - 1;
|
|
const int skip = src_x - dst_x;
|
|
|
|
while (true) {
|
|
for (int i = dst_x; i--;) {
|
|
r_buf[last_px_id--] = r_buf[last_px_written--];
|
|
}
|
|
if (last_px_written < 0) {
|
|
break;
|
|
}
|
|
last_px_id -= skip;
|
|
memset(&r_buf[last_px_id + 1], 0, skip * sizeof(*r_buf));
|
|
}
|
|
memset(r_buf, 0, (last_px_id + 1) * sizeof(*r_buf));
|
|
}
|
|
|
|
/** \} */
|