2018-07-17 14:46:44 +02:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*
|
|
|
|
* The Original Code is Copyright (C) 2016 by Mike Erwin.
|
|
|
|
* All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
* \ingroup gpu
|
2018-07-17 14:46:44 +02:00
|
|
|
*
|
2018-07-18 00:12:21 +02:00
|
|
|
* GPU shader interface (C --> GLSL)
|
2018-07-17 14:46:44 +02:00
|
|
|
*/
|
2017-03-02 21:27:51 -05:00
|
|
|
|
2019-01-17 19:39:27 +01:00
|
|
|
#include "BKE_global.h"
|
2020-06-02 12:11:39 +02:00
|
|
|
|
2020-06-04 15:28:35 +02:00
|
|
|
#include "BLI_bitmap.h"
|
2020-06-02 12:11:39 +02:00
|
|
|
#include "BLI_math_base.h"
|
|
|
|
|
2020-03-19 09:33:03 +01:00
|
|
|
#include "MEM_guardedalloc.h"
|
2018-07-31 16:54:58 +02:00
|
|
|
|
2018-07-17 21:11:23 +02:00
|
|
|
#include "GPU_shader_interface.h"
|
2018-07-19 15:48:13 +02:00
|
|
|
|
2020-08-09 01:21:34 +02:00
|
|
|
#include "gpu_batch_private.hh"
|
2020-08-08 03:01:45 +02:00
|
|
|
#include "gpu_context_private.hh"
|
2018-07-19 15:48:13 +02:00
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
#include "gl_batch.hh"
|
|
|
|
|
2017-03-03 14:50:35 -05:00
|
|
|
#include <stddef.h>
|
2020-03-19 09:33:03 +01:00
|
|
|
#include <stdlib.h>
|
2017-04-12 17:56:26 -04:00
|
|
|
#include <string.h>
|
2017-03-02 21:27:51 -05:00
|
|
|
|
|
|
|
#define DEBUG_SHADER_INTERFACE 0
|
|
|
|
|
|
|
|
#if DEBUG_SHADER_INTERFACE
|
2018-07-19 16:06:37 +10:00
|
|
|
# include <stdio.h>
|
2017-03-02 21:27:51 -05:00
|
|
|
#endif
|
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
using namespace blender::gpu;
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
static const char *BuiltinUniform_name(GPUUniformBuiltin u)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-07-30 23:49:13 +02:00
|
|
|
switch (u) {
|
|
|
|
case GPU_UNIFORM_MODEL:
|
|
|
|
return "ModelMatrix";
|
|
|
|
case GPU_UNIFORM_VIEW:
|
|
|
|
return "ViewMatrix";
|
|
|
|
case GPU_UNIFORM_MODELVIEW:
|
|
|
|
return "ModelViewMatrix";
|
|
|
|
case GPU_UNIFORM_PROJECTION:
|
|
|
|
return "ProjectionMatrix";
|
|
|
|
case GPU_UNIFORM_VIEWPROJECTION:
|
|
|
|
return "ViewProjectionMatrix";
|
|
|
|
case GPU_UNIFORM_MVP:
|
|
|
|
return "ModelViewProjectionMatrix";
|
|
|
|
|
|
|
|
case GPU_UNIFORM_MODEL_INV:
|
|
|
|
return "ModelMatrixInverse";
|
|
|
|
case GPU_UNIFORM_VIEW_INV:
|
|
|
|
return "ViewMatrixInverse";
|
|
|
|
case GPU_UNIFORM_MODELVIEW_INV:
|
|
|
|
return "ModelViewMatrixInverse";
|
|
|
|
case GPU_UNIFORM_PROJECTION_INV:
|
|
|
|
return "ProjectionMatrixInverse";
|
|
|
|
case GPU_UNIFORM_VIEWPROJECTION_INV:
|
|
|
|
return "ViewProjectionMatrixInverse";
|
|
|
|
|
|
|
|
case GPU_UNIFORM_NORMAL:
|
|
|
|
return "NormalMatrix";
|
|
|
|
case GPU_UNIFORM_ORCO:
|
|
|
|
return "OrcoTexCoFactors";
|
|
|
|
case GPU_UNIFORM_CLIPPLANES:
|
|
|
|
return "WorldClipPlanes";
|
|
|
|
|
|
|
|
case GPU_UNIFORM_COLOR:
|
|
|
|
return "color";
|
|
|
|
case GPU_UNIFORM_BASE_INSTANCE:
|
|
|
|
return "baseInstance";
|
|
|
|
case GPU_UNIFORM_RESOURCE_CHUNK:
|
|
|
|
return "resourceChunk";
|
|
|
|
case GPU_UNIFORM_RESOURCE_ID:
|
|
|
|
return "resourceId";
|
|
|
|
case GPU_UNIFORM_SRGB_TRANSFORM:
|
|
|
|
return "srgbTarget";
|
|
|
|
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-03-02 21:27:51 -05:00
|
|
|
|
2020-06-04 13:43:28 +02:00
|
|
|
static const char *BuiltinUniformBlock_name(GPUUniformBlockBuiltin u)
|
|
|
|
{
|
2020-07-30 23:49:13 +02:00
|
|
|
switch (u) {
|
|
|
|
case GPU_UNIFORM_BLOCK_VIEW:
|
|
|
|
return "viewBlock";
|
|
|
|
case GPU_UNIFORM_BLOCK_MODEL:
|
|
|
|
return "modelBlock";
|
|
|
|
case GPU_UNIFORM_BLOCK_INFO:
|
|
|
|
return "infoBlock";
|
|
|
|
default:
|
|
|
|
return NULL;
|
|
|
|
}
|
2020-06-04 13:43:28 +02:00
|
|
|
}
|
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
GPU_INLINE bool match(const char *a, const char *b)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-08-08 12:14:52 +10:00
|
|
|
return STREQ(a, b);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-04-15 19:19:00 -04:00
|
|
|
|
2018-07-18 00:12:21 +02:00
|
|
|
GPU_INLINE uint hash_string(const char *str)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
uint i = 0, c;
|
|
|
|
while ((c = *str++)) {
|
|
|
|
i = i * 37 + c;
|
|
|
|
}
|
|
|
|
return i;
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-06-01 12:26:27 +02:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
GPU_INLINE uint32_t set_input_name(GPUShaderInterface *shaderface,
|
|
|
|
GPUShaderInput *input,
|
|
|
|
char *name,
|
|
|
|
uint32_t name_len)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-06-02 12:11:39 +02:00
|
|
|
/* remove "[0]" from array name */
|
|
|
|
if (name[name_len - 1] == ']') {
|
|
|
|
name[name_len - 3] = '\0';
|
|
|
|
name_len -= 3;
|
|
|
|
}
|
2017-06-01 12:26:27 +02:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
input->name_offset = (uint32_t)(name - shaderface->name_buffer);
|
|
|
|
input->name_hash = hash_string(name);
|
|
|
|
return name_len + 1; /* include NULL terminator */
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-10-04 17:36:52 +05:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
GPU_INLINE const GPUShaderInput *input_lookup(const GPUShaderInterface *shaderface,
|
|
|
|
const GPUShaderInput *const inputs,
|
|
|
|
const uint inputs_len,
|
|
|
|
const char *name)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
const uint name_hash = hash_string(name);
|
2020-06-02 12:11:39 +02:00
|
|
|
/* Simple linear search for now. */
|
|
|
|
for (int i = inputs_len - 1; i >= 0; i--) {
|
|
|
|
if (inputs[i].name_hash == name_hash) {
|
|
|
|
if ((i > 0) && UNLIKELY(inputs[i - 1].name_hash == name_hash)) {
|
|
|
|
/* Hash colision resolve. */
|
|
|
|
for (; i >= 0 && inputs[i].name_hash == name_hash; i--) {
|
|
|
|
if (match(name, shaderface->name_buffer + inputs[i].name_offset)) {
|
|
|
|
return inputs + i; /* not found */
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL; /* not found */
|
|
|
|
}
|
2020-08-07 12:39:35 +02:00
|
|
|
|
|
|
|
/* This is a bit dangerous since we could have a hash collision.
|
|
|
|
* where the asked uniform that does not exist has the same hash
|
|
|
|
* as a real uniform. */
|
|
|
|
BLI_assert(match(name, shaderface->name_buffer + inputs[i].name_offset));
|
|
|
|
return inputs + i;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL; /* not found */
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-10-04 17:36:52 +05:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
/* Note that this modify the src array. */
|
|
|
|
GPU_INLINE void sort_input_list(GPUShaderInput *dst, GPUShaderInput *src, const uint input_len)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-06-02 12:11:39 +02:00
|
|
|
for (uint i = 0; i < input_len; i++) {
|
|
|
|
GPUShaderInput *input_src = &src[0];
|
|
|
|
for (uint j = 1; j < input_len; j++) {
|
|
|
|
if (src[j].name_hash > input_src->name_hash) {
|
|
|
|
input_src = &src[j];
|
|
|
|
}
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2020-06-02 12:11:39 +02:00
|
|
|
dst[i] = *input_src;
|
|
|
|
input_src->name_hash = 0;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-10-06 01:50:16 +02:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
static int block_binding(int32_t program, uint32_t block_index)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-06-02 12:11:39 +02:00
|
|
|
/* For now just assign a consecutive index. In the future, we should set it in
|
|
|
|
* the shader using layout(binding = i) and query its value. */
|
|
|
|
glUniformBlockBinding(program, block_index, block_index);
|
|
|
|
return block_index;
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-03-02 21:27:51 -05:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
static int sampler_binding(int32_t program,
|
|
|
|
uint32_t uniform_index,
|
|
|
|
int32_t uniform_location,
|
|
|
|
int *sampler_len)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-06-02 12:11:39 +02:00
|
|
|
/* Identify sampler uniforms and asign sampler units to them. */
|
|
|
|
GLint type;
|
|
|
|
glGetActiveUniformsiv(program, 1, &uniform_index, GL_UNIFORM_TYPE, &type);
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case GL_SAMPLER_1D:
|
|
|
|
case GL_SAMPLER_2D:
|
|
|
|
case GL_SAMPLER_3D:
|
|
|
|
case GL_SAMPLER_CUBE:
|
|
|
|
case GL_SAMPLER_CUBE_MAP_ARRAY_ARB: /* OpenGL 4.0 */
|
|
|
|
case GL_SAMPLER_1D_SHADOW:
|
|
|
|
case GL_SAMPLER_2D_SHADOW:
|
|
|
|
case GL_SAMPLER_1D_ARRAY:
|
|
|
|
case GL_SAMPLER_2D_ARRAY:
|
|
|
|
case GL_SAMPLER_1D_ARRAY_SHADOW:
|
|
|
|
case GL_SAMPLER_2D_ARRAY_SHADOW:
|
|
|
|
case GL_SAMPLER_2D_MULTISAMPLE:
|
|
|
|
case GL_SAMPLER_2D_MULTISAMPLE_ARRAY:
|
|
|
|
case GL_SAMPLER_CUBE_SHADOW:
|
|
|
|
case GL_SAMPLER_BUFFER:
|
|
|
|
case GL_INT_SAMPLER_1D:
|
|
|
|
case GL_INT_SAMPLER_2D:
|
|
|
|
case GL_INT_SAMPLER_3D:
|
|
|
|
case GL_INT_SAMPLER_CUBE:
|
|
|
|
case GL_INT_SAMPLER_1D_ARRAY:
|
|
|
|
case GL_INT_SAMPLER_2D_ARRAY:
|
|
|
|
case GL_INT_SAMPLER_2D_MULTISAMPLE:
|
|
|
|
case GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
|
|
|
|
case GL_INT_SAMPLER_BUFFER:
|
|
|
|
case GL_UNSIGNED_INT_SAMPLER_1D:
|
|
|
|
case GL_UNSIGNED_INT_SAMPLER_2D:
|
|
|
|
case GL_UNSIGNED_INT_SAMPLER_3D:
|
|
|
|
case GL_UNSIGNED_INT_SAMPLER_CUBE:
|
|
|
|
case GL_UNSIGNED_INT_SAMPLER_1D_ARRAY:
|
|
|
|
case GL_UNSIGNED_INT_SAMPLER_2D_ARRAY:
|
|
|
|
case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE:
|
|
|
|
case GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY:
|
|
|
|
case GL_UNSIGNED_INT_SAMPLER_BUFFER: {
|
|
|
|
/* For now just assign a consecutive index. In the future, we should set it in
|
|
|
|
* the shader using layout(binding = i) and query its value. */
|
|
|
|
int binding = *sampler_len;
|
|
|
|
glUniform1i(uniform_location, binding);
|
|
|
|
(*sampler_len)++;
|
|
|
|
return binding;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
return -1;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-10-05 18:26:50 +02:00
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
GPUShaderInterface *GPU_shaderinterface_create(int32_t program)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-06-02 17:29:48 +02:00
|
|
|
#ifndef NDEBUG
|
|
|
|
GLint curr_program;
|
|
|
|
glGetIntegerv(GL_CURRENT_PROGRAM, &curr_program);
|
|
|
|
BLI_assert(curr_program == program);
|
|
|
|
#endif
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
GLint max_attr_name_len = 0, attr_len = 0;
|
|
|
|
glGetProgramiv(program, GL_ACTIVE_ATTRIBUTE_MAX_LENGTH, &max_attr_name_len);
|
|
|
|
glGetProgramiv(program, GL_ACTIVE_ATTRIBUTES, &attr_len);
|
2017-10-05 18:26:50 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
GLint max_ubo_name_len = 0, ubo_len = 0;
|
|
|
|
glGetProgramiv(program, GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH, &max_ubo_name_len);
|
|
|
|
glGetProgramiv(program, GL_ACTIVE_UNIFORM_BLOCKS, &ubo_len);
|
2017-10-06 14:57:21 +02:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
GLint max_uniform_name_len = 0, active_uniform_len = 0, uniform_len = 0;
|
|
|
|
glGetProgramiv(program, GL_ACTIVE_UNIFORM_MAX_LENGTH, &max_uniform_name_len);
|
|
|
|
glGetProgramiv(program, GL_ACTIVE_UNIFORMS, &active_uniform_len);
|
|
|
|
uniform_len = active_uniform_len;
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Work around driver bug with Intel HD 4600 on Windows 7/8, where
|
|
|
|
* GL_ACTIVE_UNIFORM_BLOCK_MAX_NAME_LENGTH does not work. */
|
|
|
|
if (attr_len > 0 && max_attr_name_len == 0) {
|
|
|
|
max_attr_name_len = 256;
|
|
|
|
}
|
|
|
|
if (ubo_len > 0 && max_ubo_name_len == 0) {
|
|
|
|
max_ubo_name_len = 256;
|
|
|
|
}
|
2020-06-02 12:11:39 +02:00
|
|
|
if (uniform_len > 0 && max_uniform_name_len == 0) {
|
|
|
|
max_uniform_name_len = 256;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* GL_ACTIVE_UNIFORMS lied to us! Remove the UBO uniforms from the total before
|
|
|
|
* allocating the uniform array. */
|
2020-06-04 15:28:35 +02:00
|
|
|
GLint max_ubo_uni_len = 0;
|
|
|
|
for (int i = 0; i < ubo_len; i++) {
|
|
|
|
GLint ubo_uni_len;
|
|
|
|
glGetActiveUniformBlockiv(program, i, GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS, &ubo_uni_len);
|
|
|
|
max_ubo_uni_len = max_ii(max_ubo_uni_len, ubo_uni_len);
|
|
|
|
uniform_len -= ubo_uni_len;
|
|
|
|
}
|
|
|
|
/* Bit set to true if uniform comes from a uniform block. */
|
|
|
|
BLI_bitmap *uniforms_from_blocks = BLI_BITMAP_NEW(active_uniform_len, __func__);
|
|
|
|
/* Set uniforms from block for exclusion. */
|
2020-07-30 23:49:13 +02:00
|
|
|
GLint *ubo_uni_ids = (GLint *)MEM_mallocN(sizeof(GLint) * max_ubo_uni_len, __func__);
|
2020-06-04 15:28:35 +02:00
|
|
|
for (int i = 0; i < ubo_len; i++) {
|
|
|
|
GLint ubo_uni_len;
|
|
|
|
glGetActiveUniformBlockiv(program, i, GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS, &ubo_uni_len);
|
|
|
|
glGetActiveUniformBlockiv(program, i, GL_UNIFORM_BLOCK_ACTIVE_UNIFORM_INDICES, ubo_uni_ids);
|
|
|
|
for (int u = 0; u < ubo_uni_len; u++) {
|
|
|
|
BLI_BITMAP_ENABLE(uniforms_from_blocks, ubo_uni_ids[u]);
|
2020-06-02 12:11:39 +02:00
|
|
|
}
|
|
|
|
}
|
2020-06-04 15:28:35 +02:00
|
|
|
MEM_freeN(ubo_uni_ids);
|
2020-06-02 12:11:39 +02:00
|
|
|
|
|
|
|
uint32_t name_buffer_offset = 0;
|
|
|
|
const uint32_t name_buffer_len = attr_len * max_attr_name_len + ubo_len * max_ubo_name_len +
|
|
|
|
uniform_len * max_uniform_name_len;
|
|
|
|
|
|
|
|
int input_tot_len = attr_len + ubo_len + uniform_len;
|
|
|
|
size_t interface_size = sizeof(GPUShaderInterface) + sizeof(GPUShaderInput) * input_tot_len;
|
2019-04-06 14:23:25 +02:00
|
|
|
|
2020-07-30 23:49:13 +02:00
|
|
|
GPUShaderInterface *shaderface = (GPUShaderInterface *)MEM_callocN(interface_size,
|
|
|
|
"GPUShaderInterface");
|
2020-06-02 12:11:39 +02:00
|
|
|
shaderface->attribute_len = attr_len;
|
|
|
|
shaderface->ubo_len = ubo_len;
|
|
|
|
shaderface->uniform_len = uniform_len;
|
2020-07-30 23:49:13 +02:00
|
|
|
shaderface->name_buffer = (char *)MEM_mallocN(name_buffer_len, "name_buffer");
|
2020-06-02 12:11:39 +02:00
|
|
|
GPUShaderInput *inputs = shaderface->inputs;
|
|
|
|
|
|
|
|
/* Temp buffer. */
|
|
|
|
int input_tmp_len = max_iii(attr_len, ubo_len, uniform_len);
|
2020-07-30 23:49:13 +02:00
|
|
|
GPUShaderInput *inputs_tmp = (GPUShaderInput *)MEM_mallocN(
|
|
|
|
sizeof(GPUShaderInput) * input_tmp_len, "name_buffer");
|
2017-03-02 21:27:51 -05:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Attributes */
|
2020-04-03 16:59:34 +11:00
|
|
|
shaderface->enabled_attr_mask = 0;
|
2020-06-02 12:11:39 +02:00
|
|
|
for (int i = 0, idx = 0; i < attr_len; i++) {
|
|
|
|
char *name = shaderface->name_buffer + name_buffer_offset;
|
|
|
|
GLsizei remaining_buffer = name_buffer_len - name_buffer_offset;
|
2019-04-17 06:17:24 +02:00
|
|
|
GLsizei name_len = 0;
|
2020-06-02 12:11:39 +02:00
|
|
|
GLenum type;
|
|
|
|
GLint size;
|
2017-03-02 21:27:51 -05:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
glGetActiveAttrib(program, i, remaining_buffer, &name_len, &size, &type, name);
|
|
|
|
GLint location = glGetAttribLocation(program, name);
|
2020-04-16 10:39:30 +02:00
|
|
|
/* Ignore OpenGL names like `gl_BaseInstanceARB`, `gl_InstanceID` and `gl_VertexID`. */
|
2020-06-02 12:11:39 +02:00
|
|
|
if (location == -1) {
|
|
|
|
shaderface->attribute_len--;
|
2020-04-16 10:39:30 +02:00
|
|
|
continue;
|
|
|
|
}
|
2017-04-12 17:56:26 -04:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
GPUShaderInput *input = &inputs_tmp[idx++];
|
|
|
|
input->location = input->binding = location;
|
2017-04-12 17:56:26 -04:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
name_buffer_offset += set_input_name(shaderface, input, name, name_len);
|
|
|
|
shaderface->enabled_attr_mask |= (1 << input->location);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2020-06-02 12:11:39 +02:00
|
|
|
sort_input_list(inputs, inputs_tmp, shaderface->attribute_len);
|
|
|
|
inputs += shaderface->attribute_len;
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Uniform Blocks */
|
2020-06-02 12:11:39 +02:00
|
|
|
for (int i = 0, idx = 0; i < ubo_len; i++) {
|
|
|
|
char *name = shaderface->name_buffer + name_buffer_offset;
|
|
|
|
GLsizei remaining_buffer = name_buffer_len - name_buffer_offset;
|
2019-04-17 06:17:24 +02:00
|
|
|
GLsizei name_len = 0;
|
2017-10-06 14:57:21 +02:00
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
glGetActiveUniformBlockName(program, i, remaining_buffer, &name_len, name);
|
2017-10-06 14:57:21 +02:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
GPUShaderInput *input = &inputs_tmp[idx++];
|
|
|
|
input->binding = input->location = block_binding(program, i);
|
2017-10-06 14:57:21 +02:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
name_buffer_offset += set_input_name(shaderface, input, name, name_len);
|
|
|
|
shaderface->enabled_ubo_mask |= (1 << input->binding);
|
|
|
|
}
|
|
|
|
sort_input_list(inputs, inputs_tmp, shaderface->ubo_len);
|
|
|
|
inputs += shaderface->ubo_len;
|
2017-10-06 14:57:21 +02:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
/* Uniforms */
|
|
|
|
for (int i = 0, idx = 0, sampler = 0; i < active_uniform_len; i++) {
|
2020-06-04 15:28:35 +02:00
|
|
|
if (BLI_BITMAP_TEST(uniforms_from_blocks, i)) {
|
2020-06-02 12:11:39 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
char *name = shaderface->name_buffer + name_buffer_offset;
|
|
|
|
GLsizei remaining_buffer = name_buffer_len - name_buffer_offset;
|
|
|
|
GLsizei name_len = 0;
|
2017-10-06 14:57:21 +02:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
glGetActiveUniformName(program, i, remaining_buffer, &name_len, name);
|
|
|
|
|
|
|
|
GPUShaderInput *input = &inputs_tmp[idx++];
|
|
|
|
input->location = glGetUniformLocation(program, name);
|
|
|
|
input->binding = sampler_binding(program, i, input->location, &sampler);
|
|
|
|
|
|
|
|
name_buffer_offset += set_input_name(shaderface, input, name, name_len);
|
|
|
|
shaderface->enabled_tex_mask |= (input->binding != -1) ? (1lu << input->binding) : 0lu;
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2020-06-02 12:11:39 +02:00
|
|
|
sort_input_list(inputs, inputs_tmp, shaderface->uniform_len);
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Builtin Uniforms */
|
2020-07-30 23:49:13 +02:00
|
|
|
for (int32_t u_int = 0; u_int < GPU_NUM_UNIFORMS; u_int++) {
|
|
|
|
GPUUniformBuiltin u = static_cast<GPUUniformBuiltin>(u_int);
|
2020-06-04 14:15:25 +02:00
|
|
|
shaderface->builtins[u] = glGetUniformLocation(program, BuiltinUniform_name(u));
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2020-06-02 12:11:39 +02:00
|
|
|
|
2020-06-04 13:43:28 +02:00
|
|
|
/* Builtin Uniforms Blocks */
|
2020-07-30 23:49:13 +02:00
|
|
|
for (int32_t u_int = 0; u_int < GPU_NUM_UNIFORM_BLOCKS; u_int++) {
|
|
|
|
GPUUniformBlockBuiltin u = static_cast<GPUUniformBlockBuiltin>(u_int);
|
2020-06-04 13:43:28 +02:00
|
|
|
const GPUShaderInput *block = GPU_shaderinterface_ubo(shaderface, BuiltinUniformBlock_name(u));
|
2020-06-04 14:15:25 +02:00
|
|
|
shaderface->builtin_blocks[u] = (block != NULL) ? block->binding : -1;
|
2020-06-04 13:43:28 +02:00
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Batches ref buffer */
|
|
|
|
shaderface->batches_len = GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
|
2020-08-11 01:31:40 +02:00
|
|
|
shaderface->batches = (void **)MEM_callocN(shaderface->batches_len * sizeof(GPUBatch *),
|
|
|
|
"GPUShaderInterface batches");
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2020-06-04 15:28:35 +02:00
|
|
|
MEM_freeN(uniforms_from_blocks);
|
2020-06-02 12:11:39 +02:00
|
|
|
MEM_freeN(inputs_tmp);
|
|
|
|
|
|
|
|
/* Resize name buffer to save some memory. */
|
|
|
|
if (name_buffer_offset < name_buffer_len) {
|
2020-07-30 23:49:13 +02:00
|
|
|
shaderface->name_buffer = (char *)MEM_reallocN(shaderface->name_buffer, name_buffer_offset);
|
2020-06-02 12:11:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#if DEBUG_SHADER_INTERFACE
|
|
|
|
char *name_buf = shaderface->name_buffer;
|
|
|
|
printf("--- GPUShaderInterface %p, program %d ---\n", shaderface, program);
|
|
|
|
if (shaderface->attribute_len > 0) {
|
|
|
|
printf("Attributes {\n");
|
|
|
|
for (int i = 0; i < shaderface->attribute_len; i++) {
|
|
|
|
GPUShaderInput *input = shaderface->inputs + i;
|
|
|
|
printf("\t(location = %d) %s;\n", input->location, name_buf + input->name_offset);
|
|
|
|
}
|
|
|
|
printf("};\n");
|
|
|
|
}
|
|
|
|
if (shaderface->ubo_len > 0) {
|
|
|
|
printf("Uniform Buffer Objects {\n");
|
|
|
|
for (int i = 0; i < shaderface->ubo_len; i++) {
|
|
|
|
GPUShaderInput *input = shaderface->inputs + shaderface->attribute_len + i;
|
|
|
|
printf("\t(binding = %d) %s;\n", input->binding, name_buf + input->name_offset);
|
|
|
|
}
|
|
|
|
printf("};\n");
|
|
|
|
}
|
|
|
|
if (shaderface->enabled_tex_mask > 0) {
|
|
|
|
printf("Samplers {\n");
|
2020-06-02 22:35:37 +02:00
|
|
|
for (int i = 0; i < shaderface->uniform_len; i++) {
|
2020-06-02 12:11:39 +02:00
|
|
|
GPUShaderInput *input = shaderface->inputs + shaderface->attribute_len +
|
|
|
|
shaderface->ubo_len + i;
|
|
|
|
if (input->binding != -1) {
|
|
|
|
printf("\t(location = %d, binding = %d) %s;\n",
|
|
|
|
input->location,
|
|
|
|
input->binding,
|
|
|
|
name_buf + input->name_offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf("};\n");
|
|
|
|
}
|
|
|
|
if (shaderface->uniform_len > 0) {
|
|
|
|
printf("Uniforms {\n");
|
|
|
|
for (int i = 0; i < shaderface->uniform_len; i++) {
|
|
|
|
GPUShaderInput *input = shaderface->inputs + shaderface->attribute_len +
|
|
|
|
shaderface->ubo_len + i;
|
|
|
|
if (input->binding == -1) {
|
|
|
|
printf("\t(location = %d) %s;\n", input->location, name_buf + input->name_offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
printf("};\n");
|
|
|
|
}
|
|
|
|
printf("--- GPUShaderInterface end ---\n\n");
|
|
|
|
#endif
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
return shaderface;
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-03-02 21:27:51 -05:00
|
|
|
|
2018-07-18 23:09:31 +10:00
|
|
|
void GPU_shaderinterface_discard(GPUShaderInterface *shaderface)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
/* Free memory used by name_buffer. */
|
|
|
|
MEM_freeN(shaderface->name_buffer);
|
|
|
|
/* Remove this interface from all linked Batches vao cache. */
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < shaderface->batches_len; i++) {
|
2019-04-17 06:17:24 +02:00
|
|
|
if (shaderface->batches[i] != NULL) {
|
2020-08-11 01:31:40 +02:00
|
|
|
/* XXX GL specific. to be removed during refactor. */
|
|
|
|
reinterpret_cast<GLVaoCache *>(shaderface->batches[i])->remove(shaderface);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
MEM_freeN(shaderface->batches);
|
|
|
|
/* Free memory used by shader interface by its self. */
|
|
|
|
MEM_freeN(shaderface);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2017-04-12 17:56:26 -04:00
|
|
|
|
2020-06-02 12:11:39 +02:00
|
|
|
const GPUShaderInput *GPU_shaderinterface_attr(const GPUShaderInterface *shaderface,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
uint ofs = 0;
|
|
|
|
return input_lookup(shaderface, shaderface->inputs + ofs, shaderface->attribute_len, name);
|
|
|
|
}
|
|
|
|
|
|
|
|
const GPUShaderInput *GPU_shaderinterface_ubo(const GPUShaderInterface *shaderface,
|
|
|
|
const char *name)
|
|
|
|
{
|
|
|
|
uint ofs = shaderface->attribute_len;
|
|
|
|
return input_lookup(shaderface, shaderface->inputs + ofs, shaderface->ubo_len, name);
|
|
|
|
}
|
|
|
|
|
2019-04-17 06:17:24 +02:00
|
|
|
const GPUShaderInput *GPU_shaderinterface_uniform(const GPUShaderInterface *shaderface,
|
|
|
|
const char *name)
|
2019-01-16 04:41:27 +01:00
|
|
|
{
|
2020-06-02 12:11:39 +02:00
|
|
|
uint ofs = shaderface->attribute_len + shaderface->ubo_len;
|
|
|
|
return input_lookup(shaderface, shaderface->inputs + ofs, shaderface->uniform_len, name);
|
2019-01-16 04:41:27 +01:00
|
|
|
}
|
|
|
|
|
2020-06-04 14:15:25 +02:00
|
|
|
int32_t GPU_shaderinterface_uniform_builtin(const GPUShaderInterface *shaderface,
|
|
|
|
GPUUniformBuiltin builtin)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2020-06-04 13:43:28 +02:00
|
|
|
BLI_assert(builtin >= 0 && builtin < GPU_NUM_UNIFORMS);
|
2020-06-04 14:15:25 +02:00
|
|
|
return shaderface->builtins[builtin];
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2018-02-20 01:55:19 +01:00
|
|
|
|
2020-06-04 14:15:25 +02:00
|
|
|
int32_t GPU_shaderinterface_block_builtin(const GPUShaderInterface *shaderface,
|
|
|
|
GPUUniformBlockBuiltin builtin)
|
2020-06-04 13:43:28 +02:00
|
|
|
{
|
|
|
|
BLI_assert(builtin >= 0 && builtin < GPU_NUM_UNIFORM_BLOCKS);
|
2020-06-04 14:15:25 +02:00
|
|
|
return shaderface->builtin_blocks[builtin];
|
2020-06-04 13:43:28 +02:00
|
|
|
}
|
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *shaderface, void *batch)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2019-04-17 06:17:24 +02:00
|
|
|
int i; /* find first unused slot */
|
2019-09-08 00:12:26 +10:00
|
|
|
for (i = 0; i < shaderface->batches_len; i++) {
|
2019-04-17 06:17:24 +02:00
|
|
|
if (shaderface->batches[i] == NULL) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (i == shaderface->batches_len) {
|
|
|
|
/* Not enough place, realloc the array. */
|
|
|
|
i = shaderface->batches_len;
|
|
|
|
shaderface->batches_len += GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
|
2020-08-11 01:31:40 +02:00
|
|
|
shaderface->batches = (void **)MEM_recallocN(shaderface->batches,
|
|
|
|
sizeof(void *) * shaderface->batches_len);
|
2019-04-17 06:17:24 +02:00
|
|
|
}
|
2020-08-11 01:31:40 +02:00
|
|
|
/** XXX todo cleanup. */
|
|
|
|
shaderface->batches[i] = reinterpret_cast<void *>(batch);
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|
2018-02-20 01:55:19 +01:00
|
|
|
|
2020-08-11 01:31:40 +02:00
|
|
|
void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface *shaderface, void *batch)
|
2018-07-17 14:46:44 +02:00
|
|
|
{
|
2019-09-08 00:12:26 +10:00
|
|
|
for (int i = 0; i < shaderface->batches_len; i++) {
|
2019-04-17 06:17:24 +02:00
|
|
|
if (shaderface->batches[i] == batch) {
|
|
|
|
shaderface->batches[i] = NULL;
|
|
|
|
break; /* cannot have duplicates */
|
|
|
|
}
|
|
|
|
}
|
2018-07-17 14:46:44 +02:00
|
|
|
}
|