2016-09-15 18:41:28 +02:00
|
|
|
/*
|
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
|
*
|
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
|
*
|
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
|
*
|
2018-07-17 21:11:23 +02:00
|
|
|
* The Original Code is Copyright (C) 2016 by Mike Erwin.
|
2016-09-15 18:41:28 +02:00
|
|
|
* All rights reserved.
|
|
|
|
|
*/
|
|
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
|
* \ingroup gpu
|
2018-07-17 21:11:23 +02:00
|
|
|
*
|
2018-07-18 00:12:21 +02:00
|
|
|
* GPU geometry batch
|
2018-07-17 21:11:23 +02:00
|
|
|
* Contains VAOs + VBOs + Shader representing a drawable entity.
|
2018-01-15 16:21:23 +11:00
|
|
|
*/
|
|
|
|
|
|
2018-07-31 16:54:58 +02:00
|
|
|
#include "MEM_guardedalloc.h"
|
|
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
#include "BLI_math_base.h"
|
|
|
|
|
|
2018-07-17 21:11:23 +02:00
|
|
|
#include "GPU_batch.h"
|
2018-07-15 10:34:31 +02:00
|
|
|
#include "GPU_batch_presets.h"
|
2019-07-31 16:35:06 -03:00
|
|
|
#include "GPU_extensions.h"
|
2018-07-17 21:11:23 +02:00
|
|
|
#include "GPU_matrix.h"
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "GPU_platform.h"
|
2018-07-17 21:11:23 +02:00
|
|
|
#include "GPU_shader.h"
|
|
|
|
|
|
2020-08-10 11:41:22 +02:00
|
|
|
#include "gpu_backend.hh"
|
2020-08-09 01:21:34 +02:00
|
|
|
#include "gpu_batch_private.hh"
|
2020-08-08 03:01:45 +02:00
|
|
|
#include "gpu_context_private.hh"
|
2018-07-17 21:11:23 +02:00
|
|
|
#include "gpu_primitive_private.h"
|
2016-09-15 18:41:28 +02:00
|
|
|
#include "gpu_shader_private.h"
|
2020-07-27 23:56:43 +02:00
|
|
|
#include "gpu_vertex_format_private.h"
|
2016-09-15 18:41:28 +02:00
|
|
|
|
2020-03-19 09:33:03 +01:00
|
|
|
#include <limits.h>
|
2018-07-17 21:11:23 +02:00
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <string.h>
|
|
|
|
|
|
2020-08-10 11:41:22 +02:00
|
|
|
using namespace blender::gpu;
|
|
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
void GPU_batch_vao_cache_clear(GPUBatch *UNUSED(batch))
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-11 01:31:40 +02:00
|
|
|
/* TODO remove */
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Creation & Deletion
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2020-08-10 11:41:22 +02:00
|
|
|
GPUBatch *GPU_batch_calloc(void)
|
2020-08-06 01:00:02 +02:00
|
|
|
{
|
2020-08-10 11:41:22 +02:00
|
|
|
GPUBatch *batch = GPUBackend::get()->batch_alloc();
|
|
|
|
|
memset(batch, 0, sizeof(*batch));
|
|
|
|
|
return batch;
|
2020-08-06 01:00:02 +02:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
GPUBatch *GPU_batch_create_ex(GPUPrimType prim_type,
|
|
|
|
|
GPUVertBuf *verts,
|
|
|
|
|
GPUIndexBuf *elem,
|
2020-08-10 03:17:35 +02:00
|
|
|
eGPUBatchFlag owns_flag)
|
2019-04-17 06:17:24 +02:00
|
|
|
{
|
2020-08-10 11:41:22 +02:00
|
|
|
GPUBatch *batch = GPU_batch_calloc();
|
2019-04-17 06:17:24 +02:00
|
|
|
GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
|
|
|
|
|
return batch;
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-10 03:17:35 +02:00
|
|
|
void GPU_batch_init_ex(GPUBatch *batch,
|
|
|
|
|
GPUPrimType prim_type,
|
|
|
|
|
GPUVertBuf *verts,
|
|
|
|
|
GPUIndexBuf *elem,
|
|
|
|
|
eGPUBatchFlag owns_flag)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-10 03:17:35 +02:00
|
|
|
BLI_assert(verts != NULL);
|
|
|
|
|
/* Do not pass any other flag */
|
|
|
|
|
BLI_assert((owns_flag & ~(GPU_BATCH_OWNS_VBO | GPU_BATCH_OWNS_INDEX)) == 0);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
batch->verts[0] = verts;
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
2019-04-17 06:17:24 +02:00
|
|
|
batch->verts[v] = NULL;
|
|
|
|
|
}
|
2019-12-02 01:40:58 +01:00
|
|
|
for (int v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
|
|
|
|
|
batch->inst[v] = NULL;
|
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
batch->elem = elem;
|
2020-08-08 23:37:43 +02:00
|
|
|
batch->prim_type = prim_type;
|
2020-08-10 03:17:35 +02:00
|
|
|
batch->flag = owns_flag | GPU_BATCH_INIT | GPU_BATCH_DIRTY;
|
|
|
|
|
batch->shader = NULL;
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* This will share the VBOs with the new batch. */
|
2018-12-18 13:08:08 +01:00
|
|
|
void GPU_batch_copy(GPUBatch *batch_dst, GPUBatch *batch_src)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-10 03:17:35 +02:00
|
|
|
GPU_batch_init_ex(
|
|
|
|
|
batch_dst, GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, GPU_BATCH_INVALID);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2020-08-08 23:37:43 +02:00
|
|
|
batch_dst->prim_type = batch_src->prim_type;
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
2019-04-17 06:17:24 +02:00
|
|
|
batch_dst->verts[v] = batch_src->verts[v];
|
|
|
|
|
}
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-12-14 00:07:59 +01:00
|
|
|
void GPU_batch_clear(GPUBatch *batch)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-10 03:17:35 +02:00
|
|
|
if (batch->flag & GPU_BATCH_OWNS_INDEX) {
|
2019-04-17 06:17:24 +02:00
|
|
|
GPU_indexbuf_discard(batch->elem);
|
|
|
|
|
}
|
2020-08-10 03:17:35 +02:00
|
|
|
if (batch->flag & GPU_BATCH_OWNS_VBO_ANY) {
|
|
|
|
|
for (int v = 0; (v < GPU_BATCH_VBO_MAX_LEN) && batch->verts[v]; v++) {
|
|
|
|
|
if (batch->flag & (GPU_BATCH_OWNS_VBO << v)) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(batch->verts[v]);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2020-08-10 03:17:35 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if (batch->flag & GPU_BATCH_OWNS_INST_VBO_ANY) {
|
|
|
|
|
for (int v = 0; (v < GPU_BATCH_INST_VBO_MAX_LEN) && batch->inst[v]; v++) {
|
|
|
|
|
if (batch->flag & (GPU_BATCH_OWNS_INST_VBO << v)) {
|
|
|
|
|
GPU_VERTBUF_DISCARD_SAFE(batch->inst[v]);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2020-08-10 03:17:35 +02:00
|
|
|
batch->flag = GPU_BATCH_INVALID;
|
2018-12-14 00:07:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GPU_batch_discard(GPUBatch *batch)
|
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GPU_batch_clear(batch);
|
2020-08-10 11:41:22 +02:00
|
|
|
|
|
|
|
|
delete static_cast<Batch *>(batch);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Buffers Management
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2020-08-10 03:17:35 +02:00
|
|
|
/* NOTE: Override ONLY the first instance vbo (and free them if owned). */
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-10 03:17:35 +02:00
|
|
|
BLI_assert(inst);
|
2020-08-11 01:31:40 +02:00
|
|
|
batch->flag |= GPU_BATCH_DIRTY;
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2020-08-10 03:17:35 +02:00
|
|
|
if (batch->inst[0] && (batch->flag & GPU_BATCH_OWNS_INST_VBO)) {
|
2019-12-02 01:40:58 +01:00
|
|
|
GPU_vertbuf_discard(batch->inst[0]);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2019-12-02 01:40:58 +01:00
|
|
|
batch->inst[0] = inst;
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2020-08-10 03:17:35 +02:00
|
|
|
SET_FLAG_FROM_TEST(batch->flag, own_vbo, GPU_BATCH_OWNS_INST_VBO);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-10 03:17:35 +02:00
|
|
|
/* NOTE: Override any previously assigned elem (and free it if owned). */
|
2019-07-14 16:49:44 +02:00
|
|
|
void GPU_batch_elembuf_set(GPUBatch *batch, GPUIndexBuf *elem, bool own_ibo)
|
|
|
|
|
{
|
2020-08-10 03:17:35 +02:00
|
|
|
BLI_assert(elem);
|
2020-08-11 01:31:40 +02:00
|
|
|
batch->flag |= GPU_BATCH_DIRTY;
|
2019-07-14 16:49:44 +02:00
|
|
|
|
2020-08-10 03:17:35 +02:00
|
|
|
if (batch->elem && (batch->flag & GPU_BATCH_OWNS_INDEX)) {
|
2019-07-14 16:49:44 +02:00
|
|
|
GPU_indexbuf_discard(batch->elem);
|
|
|
|
|
}
|
|
|
|
|
batch->elem = elem;
|
|
|
|
|
|
2020-08-10 03:17:35 +02:00
|
|
|
SET_FLAG_FROM_TEST(batch->flag, own_ibo, GPU_BATCH_OWNS_INDEX);
|
2019-07-14 16:49:44 +02:00
|
|
|
}
|
|
|
|
|
|
2019-12-02 01:40:58 +01:00
|
|
|
int GPU_batch_instbuf_add_ex(GPUBatch *batch, GPUVertBuf *insts, bool own_vbo)
|
|
|
|
|
{
|
2020-08-10 03:17:35 +02:00
|
|
|
BLI_assert(insts);
|
2020-08-11 01:31:40 +02:00
|
|
|
batch->flag |= GPU_BATCH_DIRTY;
|
2019-12-02 01:40:58 +01:00
|
|
|
|
|
|
|
|
for (uint v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
|
|
|
|
|
if (batch->inst[v] == NULL) {
|
|
|
|
|
/* for now all VertexBuffers must have same vertex_len */
|
2020-08-10 03:17:35 +02:00
|
|
|
if (batch->inst[0]) {
|
|
|
|
|
/* Allow for different size of vertex buf (will choose the smallest
|
|
|
|
|
* number of verts). */
|
|
|
|
|
// BLI_assert(insts->vertex_len == batch->inst[0]->vertex_len);
|
2019-12-02 01:40:58 +01:00
|
|
|
}
|
2020-08-10 03:17:35 +02:00
|
|
|
|
2019-12-02 01:40:58 +01:00
|
|
|
batch->inst[v] = insts;
|
2020-08-10 03:17:35 +02:00
|
|
|
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_INST_VBO << v));
|
2019-12-02 01:40:58 +01:00
|
|
|
return v;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
/* we only make it this far if there is no room for another GPUVertBuf */
|
2020-08-10 03:17:35 +02:00
|
|
|
BLI_assert(0 && "Not enough Instance VBO slot in batch");
|
2019-12-02 01:40:58 +01:00
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
|
2018-07-17 21:11:23 +02:00
|
|
|
/* Returns the index of verts in the batch. */
|
2019-04-17 06:17:24 +02:00
|
|
|
int GPU_batch_vertbuf_add_ex(GPUBatch *batch, GPUVertBuf *verts, bool own_vbo)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-10 03:17:35 +02:00
|
|
|
BLI_assert(verts);
|
2020-08-11 01:31:40 +02:00
|
|
|
batch->flag |= GPU_BATCH_DIRTY;
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2019-09-08 00:12:26 +10:00
|
|
|
for (uint v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
|
2019-04-17 06:17:24 +02:00
|
|
|
if (batch->verts[v] == NULL) {
|
|
|
|
|
/* for now all VertexBuffers must have same vertex_len */
|
2019-10-16 19:28:10 +02:00
|
|
|
if (batch->verts[0] != NULL) {
|
2020-08-10 03:17:35 +02:00
|
|
|
BLI_assert(verts->vertex_len == batch->verts[0]->vertex_len);
|
2019-10-16 19:28:10 +02:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
batch->verts[v] = verts;
|
2020-08-10 03:17:35 +02:00
|
|
|
SET_FLAG_FROM_TEST(batch->flag, own_vbo, (eGPUBatchFlag)(GPU_BATCH_OWNS_VBO << v));
|
2019-04-17 06:17:24 +02:00
|
|
|
return v;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
/* we only make it this far if there is no room for another GPUVertBuf */
|
2020-08-10 03:17:35 +02:00
|
|
|
BLI_assert(0 && "Not enough VBO slot in batch");
|
2019-04-17 06:17:24 +02:00
|
|
|
return -1;
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
/** \} */
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Uniform setters
|
|
|
|
|
*
|
|
|
|
|
* TODO(fclem) port this to GPUShader.
|
|
|
|
|
* \{ */
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2020-08-09 00:52:45 +02:00
|
|
|
void GPU_batch_set_shader(GPUBatch *batch, GPUShader *shader)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-09 00:52:45 +02:00
|
|
|
batch->shader = shader;
|
2020-08-09 03:01:13 +02:00
|
|
|
GPU_shader_bind(batch->shader);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-09 01:49:06 +02:00
|
|
|
#define GET_UNIFORM \
|
2020-08-10 03:17:35 +02:00
|
|
|
const GPUShaderInput *uniform = GPU_shaderinterface_uniform(batch->shader->interface, name); \
|
2020-08-09 01:49:06 +02:00
|
|
|
BLI_assert(uniform);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_1i(GPUBatch *batch, const char *name, int value)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GET_UNIFORM
|
2020-08-09 01:49:06 +02:00
|
|
|
GPU_shader_uniform_int(batch->shader, uniform->location, value);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_1b(GPUBatch *batch, const char *name, bool value)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-09 01:49:06 +02:00
|
|
|
GPU_batch_uniform_1i(batch, name, value ? GL_TRUE : GL_FALSE);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_2f(GPUBatch *batch, const char *name, float x, float y)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-09 01:49:06 +02:00
|
|
|
const float data[2] = {x, y};
|
|
|
|
|
GPU_batch_uniform_2fv(batch, name, data);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_3f(GPUBatch *batch, const char *name, float x, float y, float z)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-09 01:49:06 +02:00
|
|
|
const float data[3] = {x, y, z};
|
|
|
|
|
GPU_batch_uniform_3fv(batch, name, data);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_4f(GPUBatch *batch, const char *name, float x, float y, float z, float w)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-09 01:49:06 +02:00
|
|
|
const float data[4] = {x, y, z, w};
|
|
|
|
|
GPU_batch_uniform_4fv(batch, name, data);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_1f(GPUBatch *batch, const char *name, float x)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GET_UNIFORM
|
2020-08-09 01:49:06 +02:00
|
|
|
GPU_shader_uniform_float(batch->shader, uniform->location, x);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_2fv(GPUBatch *batch, const char *name, const float data[2])
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GET_UNIFORM
|
2020-08-09 01:49:06 +02:00
|
|
|
GPU_shader_uniform_vector(batch->shader, uniform->location, 2, 1, data);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_3fv(GPUBatch *batch, const char *name, const float data[3])
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GET_UNIFORM
|
2020-08-09 01:49:06 +02:00
|
|
|
GPU_shader_uniform_vector(batch->shader, uniform->location, 3, 1, data);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_4fv(GPUBatch *batch, const char *name, const float data[4])
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GET_UNIFORM
|
2020-08-09 01:49:06 +02:00
|
|
|
GPU_shader_uniform_vector(batch->shader, uniform->location, 4, 1, data);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void GPU_batch_uniform_2fv_array(GPUBatch *batch,
|
|
|
|
|
const char *name,
|
|
|
|
|
const int len,
|
|
|
|
|
const float *data)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GET_UNIFORM
|
2020-08-09 01:49:06 +02:00
|
|
|
GPU_shader_uniform_vector(batch->shader, uniform->location, 2, len, data);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void GPU_batch_uniform_4fv_array(GPUBatch *batch,
|
|
|
|
|
const char *name,
|
|
|
|
|
const int len,
|
|
|
|
|
const float *data)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GET_UNIFORM
|
2020-08-09 01:49:06 +02:00
|
|
|
GPU_shader_uniform_vector(batch->shader, uniform->location, 4, len, data);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_uniform_mat4(GPUBatch *batch, const char *name, const float data[4][4])
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GET_UNIFORM
|
2020-08-09 01:49:06 +02:00
|
|
|
GPU_shader_uniform_vector(batch->shader, uniform->location, 16, 1, (const float *)data);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2020-08-09 01:49:06 +02:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Drawing / Drawcall functions
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_batch_draw(GPUBatch *batch)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-09 00:52:45 +02:00
|
|
|
GPU_shader_bind(batch->shader);
|
2019-05-22 11:31:49 +02:00
|
|
|
GPU_batch_draw_advanced(batch, 0, 0, 0, 0);
|
2020-08-09 03:01:13 +02:00
|
|
|
GPU_shader_unbind();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count)
|
|
|
|
|
{
|
|
|
|
|
GPU_shader_bind(batch->shader);
|
|
|
|
|
GPU_batch_draw_advanced(batch, v_first, v_count, 0, 0);
|
|
|
|
|
GPU_shader_unbind();
|
|
|
|
|
}
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2020-08-09 03:01:13 +02:00
|
|
|
/* Draw multiple instance of a batch without having any instance attributes. */
|
|
|
|
|
void GPU_batch_draw_instanced(GPUBatch *batch, int i_count)
|
|
|
|
|
{
|
|
|
|
|
BLI_assert(batch->inst[0] == NULL);
|
|
|
|
|
|
|
|
|
|
GPU_shader_bind(batch->shader);
|
|
|
|
|
GPU_batch_draw_advanced(batch, 0, 0, 0, i_count);
|
2020-08-09 00:52:45 +02:00
|
|
|
GPU_shader_unbind();
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-22 11:31:49 +02:00
|
|
|
void GPU_batch_draw_advanced(GPUBatch *batch, int v_first, int v_count, int i_first, int i_count)
|
2018-07-17 21:11:23 +02:00
|
|
|
{
|
2020-08-09 00:52:45 +02:00
|
|
|
BLI_assert(GPU_context_active_get()->shader != NULL);
|
2018-10-01 14:52:02 +02:00
|
|
|
|
2019-05-22 11:31:49 +02:00
|
|
|
if (v_count == 0) {
|
|
|
|
|
v_count = (batch->elem) ? batch->elem->index_len : batch->verts[0]->vertex_len;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2019-05-22 11:31:49 +02:00
|
|
|
if (i_count == 0) {
|
2019-12-02 01:40:58 +01:00
|
|
|
i_count = (batch->inst[0]) ? batch->inst[0]->vertex_len : 1;
|
|
|
|
|
/* Meh. This is to be able to use different numbers of verts in instance vbos. */
|
2020-08-11 01:31:40 +02:00
|
|
|
if (batch->inst[1] != NULL) {
|
|
|
|
|
i_count = min_ii(i_count, batch->inst[1]->vertex_len);
|
2019-12-02 01:40:58 +01:00
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
if (v_count == 0 || i_count == 0) {
|
|
|
|
|
/* Nothing to draw. */
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
static_cast<Batch *>(batch)->draw(v_first, v_count, i_first, i_count);
|
2018-07-17 21:11:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* just draw some vertices and let shader place them where we want. */
|
2018-07-18 00:12:21 +02:00
|
|
|
void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
|
2018-07-18 23:09:31 +10:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
/* we cannot draw without vao ... annoying ... */
|
|
|
|
|
glBindVertexArray(GPU_vao_default());
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
GLenum type = convert_prim_type_to_gl(prim_type);
|
|
|
|
|
glDrawArrays(type, 0, v_count);
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Performance hog if you are drawing with the same vao multiple time.
|
|
|
|
|
* Only activate for debugging.*/
|
|
|
|
|
// glBindVertexArray(0);
|
2018-07-18 23:09:31 +10:00
|
|
|
}
|
2018-07-17 21:11:23 +02:00
|
|
|
|
2020-08-09 01:49:06 +02:00
|
|
|
/** \} */
|
|
|
|
|
|
2019-05-31 01:45:41 +02:00
|
|
|
/* -------------------------------------------------------------------- */
|
2018-01-15 16:21:23 +11:00
|
|
|
/** \name Utilities
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
|
|
|
|
|
eGPUBuiltinShader shader_id,
|
|
|
|
|
eGPUShaderConfig sh_cfg)
|
2016-09-15 18:41:28 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GPUShader *shader = GPU_shader_get_builtin_shader_with_config(shader_id, sh_cfg);
|
2020-07-30 01:07:29 +02:00
|
|
|
GPU_batch_set_shader(batch, shader);
|
2016-09-15 18:41:28 +02:00
|
|
|
}
|
2017-02-08 00:38:07 +01:00
|
|
|
|
2019-02-06 10:33:14 +11:00
|
|
|
void GPU_batch_program_set_builtin(GPUBatch *batch, eGPUBuiltinShader shader_id)
|
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
GPU_batch_program_set_builtin_with_config(batch, shader_id, GPU_SHADER_CFG_DEFAULT);
|
2019-02-06 10:33:14 +11:00
|
|
|
}
|
|
|
|
|
|
2020-02-11 15:18:55 +01:00
|
|
|
/* Bind program bound to IMM to the batch.
|
|
|
|
|
* XXX Use this with much care. Drawing with the GPUBatch API is not compatible with IMM.
|
|
|
|
|
* DO NOT DRAW WITH THE BATCH BEFORE CALLING immUnbindProgram. */
|
|
|
|
|
void GPU_batch_program_set_imm_shader(GPUBatch *batch)
|
|
|
|
|
{
|
2020-07-30 01:07:29 +02:00
|
|
|
GPU_batch_set_shader(batch, immGetShader());
|
2020-02-11 15:18:55 +01:00
|
|
|
}
|
|
|
|
|
|
2018-01-15 20:38:06 +11:00
|
|
|
/** \} */
|
|
|
|
|
|
|
|
|
|
/* -------------------------------------------------------------------- */
|
|
|
|
|
/** \name Init/Exit
|
|
|
|
|
* \{ */
|
|
|
|
|
|
2017-02-08 00:38:07 +01:00
|
|
|
void gpu_batch_init(void)
|
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
gpu_batch_presets_init();
|
2017-02-08 00:38:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void gpu_batch_exit(void)
|
|
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
gpu_batch_presets_exit();
|
2017-02-08 00:38:07 +01:00
|
|
|
}
|
2018-01-15 16:21:23 +11:00
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
/** \} */
|