Compare commits
4 Commits
temp-py-gp
...
tmp-gpu-co
Author | SHA1 | Date | |
---|---|---|---|
41406fcacf | |||
510d009f53 | |||
1c4203ae72 | |||
ba6ddf9e9e |
@@ -45,10 +45,8 @@
|
||||
# pragma warning(pop)
|
||||
#endif
|
||||
|
||||
extern "C" {
|
||||
#include "GPU_immediate.h"
|
||||
#include "GPU_shader.h"
|
||||
}
|
||||
|
||||
using namespace OCIO_NAMESPACE;
|
||||
|
||||
|
@@ -28,6 +28,7 @@ endif()
|
||||
|
||||
set(INC
|
||||
.
|
||||
./opengl
|
||||
../blenkernel
|
||||
../blenlib
|
||||
../bmesh
|
||||
@@ -68,7 +69,7 @@ set(SRC
|
||||
intern/gpu_init_exit.c
|
||||
intern/gpu_material.c
|
||||
intern/gpu_material_library.c
|
||||
intern/gpu_matrix.c
|
||||
intern/gpu_matrix.cc
|
||||
intern/gpu_node_graph.c
|
||||
intern/gpu_platform.cc
|
||||
intern/gpu_primitive.c
|
||||
@@ -77,7 +78,7 @@ set(SRC
|
||||
intern/gpu_select_sample_query.c
|
||||
intern/gpu_shader.cc
|
||||
intern/gpu_shader_builtin.c
|
||||
intern/gpu_shader_interface.c
|
||||
intern/gpu_shader_interface.cc
|
||||
intern/gpu_state.cc
|
||||
intern/gpu_texture.cc
|
||||
intern/gpu_uniformbuffer.cc
|
||||
@@ -85,6 +86,12 @@ set(SRC
|
||||
intern/gpu_vertex_format.cc
|
||||
intern/gpu_viewport.c
|
||||
|
||||
# opengl/ogl_context.cc
|
||||
opengl/gl_context.hh
|
||||
|
||||
# vulkan/vk_context.cc
|
||||
# vulkan/vk_context.hh
|
||||
|
||||
GPU_attr_binding.h
|
||||
GPU_batch.h
|
||||
GPU_batch_presets.h
|
||||
@@ -118,7 +125,7 @@ set(SRC
|
||||
intern/gpu_attr_binding_private.h
|
||||
intern/gpu_batch_private.h
|
||||
intern/gpu_codegen.h
|
||||
intern/gpu_context_private.h
|
||||
intern/gpu_context_private.hh
|
||||
intern/gpu_material_library.h
|
||||
intern/gpu_matrix_private.h
|
||||
intern/gpu_node_graph.h
|
||||
|
@@ -34,7 +34,7 @@
|
||||
#include "GPU_shader.h"
|
||||
|
||||
#include "gpu_batch_private.h"
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
#include "gpu_primitive_private.h"
|
||||
#include "gpu_shader_private.h"
|
||||
#include "gpu_vertex_format_private.h"
|
||||
@@ -81,7 +81,7 @@ void GPU_batch_vao_cache_clear(GPUBatch *batch)
|
||||
batch->static_vaos.vao_ids[i] = 0;
|
||||
batch->static_vaos.interfaces[i] = NULL;
|
||||
}
|
||||
gpu_context_remove_batch(batch->context, batch);
|
||||
batch->context->batch_remove(batch);
|
||||
batch->context = NULL;
|
||||
}
|
||||
|
||||
@@ -294,7 +294,7 @@ static GLuint batch_vao_get(GPUBatch *batch)
|
||||
* Until then it can only be drawn with this context. */
|
||||
if (batch->context == NULL) {
|
||||
batch->context = GPU_context_active_get();
|
||||
gpu_context_add_batch(batch->context, batch);
|
||||
batch->context->batch_add(batch);
|
||||
}
|
||||
#if TRUST_NO_ONE
|
||||
else {
|
||||
@@ -634,16 +634,6 @@ void GPU_batch_uniform_mat4(GPUBatch *batch, const char *name, const float data[
|
||||
glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
|
||||
}
|
||||
|
||||
static void *elem_offset(const GPUIndexBuf *el, int v_first)
|
||||
{
|
||||
#if GPU_TRACK_INDEX_RANGE
|
||||
if (el->index_type == GPU_INDEX_U16) {
|
||||
return (GLushort *)0 + v_first + el->index_start;
|
||||
}
|
||||
#endif
|
||||
return (GLuint *)0 + v_first + el->index_start;
|
||||
}
|
||||
|
||||
/* Use when drawing with GPU_batch_draw_advanced */
|
||||
void GPU_batch_bind(GPUBatch *batch)
|
||||
{
|
||||
@@ -704,90 +694,13 @@ void GPU_batch_draw_advanced(GPUBatch *batch, int v_first, int v_count, int i_fi
|
||||
return;
|
||||
}
|
||||
|
||||
/* Verify there is enough data do draw. */
|
||||
/* TODO(fclem) Nice to have but this is invalid when using procedural draw-calls.
|
||||
* The right assert would be to check if there is an enabled attribute from each VBO
|
||||
* and check their length. */
|
||||
// BLI_assert(i_first + i_count <= (batch->inst ? batch->inst->vertex_len : INT_MAX));
|
||||
// BLI_assert(v_first + v_count <=
|
||||
// (batch->elem ? batch->elem->index_len : batch->verts[0]->vertex_len));
|
||||
|
||||
#ifdef __APPLE__
|
||||
GLuint vao = 0;
|
||||
#endif
|
||||
|
||||
if (!GPU_arb_base_instance_is_supported()) {
|
||||
if (i_first > 0) {
|
||||
#ifdef __APPLE__
|
||||
/**
|
||||
* There seems to be a nasty bug when drawing using the same VAO reconfiguring. (see T71147)
|
||||
* We just use a throwaway VAO for that. Note that this is likely to degrade performance.
|
||||
**/
|
||||
glGenVertexArrays(1, &vao);
|
||||
glBindVertexArray(vao);
|
||||
#else
|
||||
/* If using offset drawing with instancing, we must
|
||||
* use the default VAO and redo bindings. */
|
||||
glBindVertexArray(GPU_vao_default());
|
||||
#endif
|
||||
batch_update_program_bindings(batch, i_first);
|
||||
}
|
||||
else {
|
||||
/* Previous call could have bind the default vao
|
||||
* see above. */
|
||||
glBindVertexArray(batch->vao_id);
|
||||
}
|
||||
}
|
||||
|
||||
if (batch->elem) {
|
||||
const GPUIndexBuf *el = batch->elem;
|
||||
GLenum index_type = INDEX_TYPE(el);
|
||||
GLint base_index = BASE_INDEX(el);
|
||||
void *v_first_ofs = elem_offset(el, v_first);
|
||||
|
||||
if (GPU_arb_base_instance_is_supported()) {
|
||||
glDrawElementsInstancedBaseVertexBaseInstance(
|
||||
batch->gl_prim_type, v_count, index_type, v_first_ofs, i_count, base_index, i_first);
|
||||
}
|
||||
else {
|
||||
glDrawElementsInstancedBaseVertex(
|
||||
batch->gl_prim_type, v_count, index_type, v_first_ofs, i_count, base_index);
|
||||
}
|
||||
}
|
||||
else {
|
||||
#ifdef __APPLE__
|
||||
glDisable(GL_PRIMITIVE_RESTART);
|
||||
#endif
|
||||
if (GPU_arb_base_instance_is_supported()) {
|
||||
glDrawArraysInstancedBaseInstance(batch->gl_prim_type, v_first, v_count, i_count, i_first);
|
||||
}
|
||||
else {
|
||||
glDrawArraysInstanced(batch->gl_prim_type, v_first, v_count, i_count);
|
||||
}
|
||||
#ifdef __APPLE__
|
||||
glEnable(GL_PRIMITIVE_RESTART);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
if (vao != 0) {
|
||||
glDeleteVertexArrays(1, &vao);
|
||||
}
|
||||
#endif
|
||||
GPU_ctx()->draw_batch(batch, v_first, v_count, i_first, i_count);
|
||||
}
|
||||
|
||||
/* just draw some vertices and let shader place them where we want. */
|
||||
void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
|
||||
{
|
||||
/* we cannot draw without vao ... annoying ... */
|
||||
glBindVertexArray(GPU_vao_default());
|
||||
|
||||
GLenum type = convert_prim_type_to_gl(prim_type);
|
||||
glDrawArrays(type, 0, v_count);
|
||||
|
||||
/* Performance hog if you are drawing with the same vao multiple time.
|
||||
* Only activate for debugging.*/
|
||||
// glBindVertexArray(0);
|
||||
GPU_ctx()->draw_primitive(prim_type, v_count);
|
||||
}
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
@@ -35,137 +35,46 @@
|
||||
#include "GPU_framebuffer.h"
|
||||
|
||||
#include "gpu_batch_private.h"
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
#include "gpu_matrix_private.h"
|
||||
|
||||
#include "gl_context.hh"
|
||||
|
||||
#include <mutex>
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#if TRUST_NO_ONE
|
||||
# if 0
|
||||
extern "C" {
|
||||
extern int BLI_thread_is_main(void); /* Blender-specific function */
|
||||
}
|
||||
|
||||
static bool thread_is_main()
|
||||
{
|
||||
/* "main" here means the GL context's thread */
|
||||
return BLI_thread_is_main();
|
||||
}
|
||||
# endif
|
||||
#endif
|
||||
|
||||
static std::vector<GLuint> orphaned_buffer_ids;
|
||||
static std::vector<GLuint> orphaned_texture_ids;
|
||||
|
||||
static std::mutex orphans_mutex;
|
||||
|
||||
struct GPUContext {
|
||||
GLuint default_vao;
|
||||
GLuint default_framebuffer;
|
||||
GPUFrameBuffer *current_fbo;
|
||||
std::unordered_set<GPUBatch *> batches; /* Batches that have VAOs from this context */
|
||||
#ifdef DEBUG
|
||||
std::unordered_set<GPUFrameBuffer *>
|
||||
framebuffers; /* Framebuffers that have FBO from this context */
|
||||
#endif
|
||||
struct GPUMatrixState *matrix_state;
|
||||
std::vector<GLuint> orphaned_vertarray_ids;
|
||||
std::vector<GLuint> orphaned_framebuffer_ids;
|
||||
std::mutex orphans_mutex; /* todo: try spinlock instead */
|
||||
#if TRUST_NO_ONE
|
||||
pthread_t thread; /* Thread on which this context is active. */
|
||||
bool thread_is_used;
|
||||
#endif
|
||||
|
||||
GPUContext()
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
thread_is_used = false;
|
||||
#endif
|
||||
current_fbo = 0;
|
||||
}
|
||||
};
|
||||
// TODO
|
||||
// using namespace blender::gpu;
|
||||
|
||||
static thread_local GPUContext *active_ctx = NULL;
|
||||
|
||||
static void orphans_add(GPUContext *ctx, std::vector<GLuint> *orphan_list, GLuint id)
|
||||
GPUContext::GPUContext()
|
||||
{
|
||||
std::mutex *mutex = (ctx) ? &ctx->orphans_mutex : &orphans_mutex;
|
||||
|
||||
mutex->lock();
|
||||
orphan_list->emplace_back(id);
|
||||
mutex->unlock();
|
||||
thread_ = pthread_self();
|
||||
matrix_state = GPU_matrix_state_create();
|
||||
}
|
||||
|
||||
static void orphans_clear(GPUContext *ctx)
|
||||
GPUContext *GPU_context_create(GLuint)
|
||||
{
|
||||
/* need at least an active context */
|
||||
BLI_assert(ctx);
|
||||
|
||||
/* context has been activated by another thread! */
|
||||
BLI_assert(pthread_equal(pthread_self(), ctx->thread));
|
||||
|
||||
ctx->orphans_mutex.lock();
|
||||
if (!ctx->orphaned_vertarray_ids.empty()) {
|
||||
uint orphan_len = (uint)ctx->orphaned_vertarray_ids.size();
|
||||
glDeleteVertexArrays(orphan_len, ctx->orphaned_vertarray_ids.data());
|
||||
ctx->orphaned_vertarray_ids.clear();
|
||||
}
|
||||
if (!ctx->orphaned_framebuffer_ids.empty()) {
|
||||
uint orphan_len = (uint)ctx->orphaned_framebuffer_ids.size();
|
||||
glDeleteFramebuffers(orphan_len, ctx->orphaned_framebuffer_ids.data());
|
||||
ctx->orphaned_framebuffer_ids.clear();
|
||||
}
|
||||
|
||||
ctx->orphans_mutex.unlock();
|
||||
|
||||
orphans_mutex.lock();
|
||||
if (!orphaned_buffer_ids.empty()) {
|
||||
uint orphan_len = (uint)orphaned_buffer_ids.size();
|
||||
glDeleteBuffers(orphan_len, orphaned_buffer_ids.data());
|
||||
orphaned_buffer_ids.clear();
|
||||
}
|
||||
if (!orphaned_texture_ids.empty()) {
|
||||
uint orphan_len = (uint)orphaned_texture_ids.size();
|
||||
glDeleteTextures(orphan_len, orphaned_texture_ids.data());
|
||||
orphaned_texture_ids.clear();
|
||||
}
|
||||
orphans_mutex.unlock();
|
||||
}
|
||||
|
||||
GPUContext *GPU_context_create(GLuint default_framebuffer)
|
||||
{
|
||||
/* BLI_assert(thread_is_main()); */
|
||||
GPUContext *ctx = new GPUContext;
|
||||
glGenVertexArrays(1, &ctx->default_vao);
|
||||
ctx->default_framebuffer = default_framebuffer;
|
||||
ctx->matrix_state = GPU_matrix_state_create();
|
||||
GPUContext *ctx = new GLContext();
|
||||
GPU_context_active_set(ctx);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
bool GPUContext::is_active_on_thread(void)
|
||||
{
|
||||
return (this == active_ctx) && pthread_equal(pthread_self(), thread_);
|
||||
}
|
||||
|
||||
/* to be called after GPU_context_active_set(ctx_to_destroy) */
|
||||
void GPU_context_discard(GPUContext *ctx)
|
||||
{
|
||||
/* Make sure no other thread has locked it. */
|
||||
BLI_assert(ctx == active_ctx);
|
||||
BLI_assert(pthread_equal(pthread_self(), ctx->thread));
|
||||
BLI_assert(ctx->orphaned_vertarray_ids.empty());
|
||||
#ifdef DEBUG
|
||||
/* For now don't allow GPUFrameBuffers to be reuse in another ctx. */
|
||||
BLI_assert(ctx->framebuffers.empty());
|
||||
#endif
|
||||
/* delete remaining vaos */
|
||||
while (!ctx->batches.empty()) {
|
||||
/* this removes the array entry */
|
||||
GPU_batch_vao_cache_clear(*ctx->batches.begin());
|
||||
}
|
||||
GPU_matrix_state_discard(ctx->matrix_state);
|
||||
glDeleteVertexArrays(1, &ctx->default_vao);
|
||||
BLI_assert(ctx->is_active_on_thread());
|
||||
|
||||
delete ctx;
|
||||
active_ctx = NULL;
|
||||
}
|
||||
@@ -173,175 +82,69 @@ void GPU_context_discard(GPUContext *ctx)
|
||||
/* ctx can be NULL */
|
||||
void GPU_context_active_set(GPUContext *ctx)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
if (active_ctx) {
|
||||
active_ctx->thread_is_used = false;
|
||||
active_ctx->deactivate();
|
||||
}
|
||||
/* Make sure no other context is already bound to this thread. */
|
||||
if (ctx) {
|
||||
/* Make sure no other thread has locked it. */
|
||||
assert(ctx->thread_is_used == false);
|
||||
ctx->thread = pthread_self();
|
||||
ctx->thread_is_used = true;
|
||||
}
|
||||
#endif
|
||||
if (ctx) {
|
||||
orphans_clear(ctx);
|
||||
ctx->activate();
|
||||
}
|
||||
active_ctx = ctx;
|
||||
}
|
||||
|
||||
GPUContext *GPU_ctx(void)
|
||||
{
|
||||
/* Context has been activated by another thread! */
|
||||
BLI_assert(active_ctx->is_active_on_thread());
|
||||
return active_ctx;
|
||||
}
|
||||
|
||||
GPUContext *GPU_context_active_get(void)
|
||||
{
|
||||
return active_ctx;
|
||||
}
|
||||
|
||||
GLuint GPU_vao_default(void)
|
||||
{
|
||||
BLI_assert(active_ctx); /* need at least an active context */
|
||||
BLI_assert(pthread_equal(
|
||||
pthread_self(), active_ctx->thread)); /* context has been activated by another thread! */
|
||||
return active_ctx->default_vao;
|
||||
}
|
||||
|
||||
GLuint GPU_framebuffer_default(void)
|
||||
{
|
||||
BLI_assert(active_ctx); /* need at least an active context */
|
||||
BLI_assert(pthread_equal(
|
||||
pthread_self(), active_ctx->thread)); /* context has been activated by another thread! */
|
||||
return active_ctx->default_framebuffer;
|
||||
}
|
||||
|
||||
GLuint GPU_vao_alloc(void)
|
||||
{
|
||||
GLuint new_vao_id = 0;
|
||||
orphans_clear(active_ctx);
|
||||
glGenVertexArrays(1, &new_vao_id);
|
||||
return new_vao_id;
|
||||
return active_ctx->vao_alloc();
|
||||
}
|
||||
|
||||
GLuint GPU_fbo_alloc(void)
|
||||
{
|
||||
GLuint new_fbo_id = 0;
|
||||
orphans_clear(active_ctx);
|
||||
glGenFramebuffers(1, &new_fbo_id);
|
||||
return new_fbo_id;
|
||||
return active_ctx->fbo_alloc();
|
||||
}
|
||||
|
||||
GLuint GPU_buf_alloc(void)
|
||||
{
|
||||
GLuint new_buffer_id = 0;
|
||||
orphans_clear(active_ctx);
|
||||
glGenBuffers(1, &new_buffer_id);
|
||||
return new_buffer_id;
|
||||
return active_ctx->buf_alloc();
|
||||
}
|
||||
|
||||
GLuint GPU_tex_alloc(void)
|
||||
{
|
||||
GLuint new_texture_id = 0;
|
||||
orphans_clear(active_ctx);
|
||||
glGenTextures(1, &new_texture_id);
|
||||
return new_texture_id;
|
||||
return active_ctx->tex_alloc();
|
||||
}
|
||||
|
||||
void GPU_vao_free(GLuint vao_id, GPUContext *ctx)
|
||||
{
|
||||
BLI_assert(ctx);
|
||||
if (ctx == active_ctx) {
|
||||
glDeleteVertexArrays(1, &vao_id);
|
||||
}
|
||||
else {
|
||||
orphans_add(ctx, &ctx->orphaned_vertarray_ids, vao_id);
|
||||
}
|
||||
ctx->vao_free(vao_id);
|
||||
}
|
||||
|
||||
void GPU_fbo_free(GLuint fbo_id, GPUContext *ctx)
|
||||
{
|
||||
BLI_assert(ctx);
|
||||
if (ctx == active_ctx) {
|
||||
glDeleteFramebuffers(1, &fbo_id);
|
||||
}
|
||||
else {
|
||||
orphans_add(ctx, &ctx->orphaned_framebuffer_ids, fbo_id);
|
||||
}
|
||||
ctx->fbo_free(fbo_id);
|
||||
}
|
||||
|
||||
void GPU_buf_free(GLuint buf_id)
|
||||
{
|
||||
if (active_ctx) {
|
||||
glDeleteBuffers(1, &buf_id);
|
||||
}
|
||||
else {
|
||||
orphans_add(NULL, &orphaned_buffer_ids, buf_id);
|
||||
}
|
||||
/* FIXME active_ctx can be NULL */
|
||||
BLI_assert(active_ctx);
|
||||
active_ctx->buf_free(buf_id);
|
||||
}
|
||||
|
||||
void GPU_tex_free(GLuint tex_id)
|
||||
{
|
||||
if (active_ctx) {
|
||||
glDeleteTextures(1, &tex_id);
|
||||
}
|
||||
else {
|
||||
orphans_add(NULL, &orphaned_texture_ids, tex_id);
|
||||
}
|
||||
}
|
||||
|
||||
/* GPUBatch & GPUFrameBuffer contains respectively VAO & FBO indices
|
||||
* which are not shared across contexts. So we need to keep track of
|
||||
* ownership. */
|
||||
|
||||
void gpu_context_add_batch(GPUContext *ctx, GPUBatch *batch)
|
||||
{
|
||||
BLI_assert(ctx);
|
||||
ctx->orphans_mutex.lock();
|
||||
ctx->batches.emplace(batch);
|
||||
ctx->orphans_mutex.unlock();
|
||||
}
|
||||
|
||||
void gpu_context_remove_batch(GPUContext *ctx, GPUBatch *batch)
|
||||
{
|
||||
BLI_assert(ctx);
|
||||
ctx->orphans_mutex.lock();
|
||||
ctx->batches.erase(batch);
|
||||
ctx->orphans_mutex.unlock();
|
||||
}
|
||||
|
||||
void gpu_context_add_framebuffer(GPUContext *ctx, GPUFrameBuffer *fb)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
BLI_assert(ctx);
|
||||
ctx->orphans_mutex.lock();
|
||||
ctx->framebuffers.emplace(fb);
|
||||
ctx->orphans_mutex.unlock();
|
||||
#else
|
||||
UNUSED_VARS(ctx, fb);
|
||||
#endif
|
||||
}
|
||||
|
||||
void gpu_context_remove_framebuffer(GPUContext *ctx, GPUFrameBuffer *fb)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
BLI_assert(ctx);
|
||||
ctx->orphans_mutex.lock();
|
||||
ctx->framebuffers.erase(fb);
|
||||
ctx->orphans_mutex.unlock();
|
||||
#else
|
||||
UNUSED_VARS(ctx, fb);
|
||||
#endif
|
||||
}
|
||||
|
||||
void gpu_context_active_framebuffer_set(GPUContext *ctx, GPUFrameBuffer *fb)
|
||||
{
|
||||
ctx->current_fbo = fb;
|
||||
}
|
||||
|
||||
GPUFrameBuffer *gpu_context_active_framebuffer_get(GPUContext *ctx)
|
||||
{
|
||||
return ctx->current_fbo;
|
||||
}
|
||||
|
||||
struct GPUMatrixState *gpu_context_active_matrix_state_get()
|
||||
{
|
||||
/* FIXME active_ctx can be NULL */
|
||||
BLI_assert(active_ctx);
|
||||
return active_ctx->matrix_state;
|
||||
active_ctx->tex_free(tex_id);
|
||||
}
|
||||
|
@@ -1,68 +0,0 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* The Original Code is Copyright (C) 2016 by Mike Erwin.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*
|
||||
* This interface allow GPU to manage GL objects for multiple context and threads.
|
||||
*/
|
||||
|
||||
#ifndef __GPU_CONTEXT_PRIVATE_H__
|
||||
#define __GPU_CONTEXT_PRIVATE_H__
|
||||
|
||||
#include "GPU_context.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
struct GPUFrameBuffer;
|
||||
|
||||
GLuint GPU_vao_default(void);
|
||||
GLuint GPU_framebuffer_default(void);
|
||||
|
||||
/* These require a gl ctx bound. */
|
||||
GLuint GPU_buf_alloc(void);
|
||||
GLuint GPU_tex_alloc(void);
|
||||
GLuint GPU_vao_alloc(void);
|
||||
GLuint GPU_fbo_alloc(void);
|
||||
|
||||
/* These can be called any threads even without gl ctx. */
|
||||
void GPU_buf_free(GLuint buf_id);
|
||||
void GPU_tex_free(GLuint tex_id);
|
||||
/* These two need the ctx the id was created with. */
|
||||
void GPU_vao_free(GLuint vao_id, GPUContext *ctx);
|
||||
void GPU_fbo_free(GLuint fbo_id, GPUContext *ctx);
|
||||
|
||||
void gpu_context_add_batch(GPUContext *ctx, GPUBatch *batch);
|
||||
void gpu_context_remove_batch(GPUContext *ctx, GPUBatch *batch);
|
||||
|
||||
void gpu_context_add_framebuffer(GPUContext *ctx, struct GPUFrameBuffer *fb);
|
||||
void gpu_context_remove_framebuffer(GPUContext *ctx, struct GPUFrameBuffer *fb);
|
||||
|
||||
void gpu_context_active_framebuffer_set(GPUContext *ctx, struct GPUFrameBuffer *fb);
|
||||
struct GPUFrameBuffer *gpu_context_active_framebuffer_get(GPUContext *ctx);
|
||||
|
||||
struct GPUMatrixState *gpu_context_active_matrix_state_get(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __GPU_CONTEXT_PRIVATE_H__ */
|
98
source/blender/gpu/intern/gpu_context_private.hh
Normal file
98
source/blender/gpu/intern/gpu_context_private.hh
Normal file
@@ -0,0 +1,98 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* The Original Code is Copyright (C) 2016 by Mike Erwin.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*
|
||||
* This interface allow GPU to manage GL objects for multiple context and threads.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "GPU_context.h"
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
struct GPUBatch;
|
||||
struct GPUFrameBuffer;
|
||||
struct GPUMatrixState;
|
||||
|
||||
// TODO(fclem) this requires too much refactor for now.
|
||||
// namespace blender {
|
||||
// namespace gpu {
|
||||
|
||||
class GPUContext {
|
||||
public:
|
||||
GPUContext();
|
||||
virtual ~GPUContext(){};
|
||||
|
||||
virtual void activate(void) = 0;
|
||||
virtual void deactivate(void) = 0;
|
||||
|
||||
virtual void draw_batch(GPUBatch *batch, int v_first, int v_count, int i_first, int i_count) = 0;
|
||||
virtual void draw_primitive(GPUPrimType prim_type, int v_count) = 0;
|
||||
|
||||
virtual void batch_add(GPUBatch *){};
|
||||
virtual void batch_remove(GPUBatch *){};
|
||||
|
||||
virtual void framebuffer_add(struct GPUFrameBuffer *){};
|
||||
virtual void framebuffer_remove(struct GPUFrameBuffer *){};
|
||||
|
||||
/* TODO(fclem) These are gl specifics. To be hidden inside the gl backend. */
|
||||
virtual GLuint default_framebuffer_get(void) = 0;
|
||||
virtual GLuint buf_alloc(void) = 0;
|
||||
virtual GLuint tex_alloc(void) = 0;
|
||||
virtual GLuint vao_alloc(void) = 0;
|
||||
virtual GLuint fbo_alloc(void) = 0;
|
||||
virtual void vao_free(GLuint vao_id) = 0;
|
||||
virtual void fbo_free(GLuint fbo_id) = 0;
|
||||
virtual void buf_free(GLuint buf_id) = 0;
|
||||
virtual void tex_free(GLuint tex_id) = 0;
|
||||
|
||||
/** State managment */
|
||||
GPUFrameBuffer *current_fbo = NULL;
|
||||
GPUMatrixState *matrix_state = NULL;
|
||||
|
||||
bool is_active_on_thread(void);
|
||||
|
||||
protected:
|
||||
/** Thread on which this context is active. */
|
||||
pthread_t thread_;
|
||||
bool thread_is_used_;
|
||||
};
|
||||
|
||||
/* Return context currently bound to the caller's thread.
|
||||
* Note: this assume a context is active! */
|
||||
GPUContext *GPU_ctx(void);
|
||||
|
||||
/* These require a gl ctx bound. */
|
||||
GLuint GPU_buf_alloc(void);
|
||||
GLuint GPU_tex_alloc(void);
|
||||
GLuint GPU_vao_alloc(void);
|
||||
GLuint GPU_fbo_alloc(void);
|
||||
|
||||
/* These can be called any threads even without gl ctx. */
|
||||
void GPU_buf_free(GLuint buf_id);
|
||||
void GPU_tex_free(GLuint tex_id);
|
||||
/* These two need the ctx the id was created with. */
|
||||
void GPU_vao_free(GLuint vao_id, GPUContext *ctx);
|
||||
void GPU_fbo_free(GLuint fbo_id, GPUContext *ctx);
|
||||
|
||||
// } // namespace gpu
|
||||
// } // namespace blender
|
@@ -28,7 +28,7 @@
|
||||
#include "GPU_element.h"
|
||||
#include "GPU_glew.h"
|
||||
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
|
@@ -33,7 +33,7 @@
|
||||
#include "GPU_shader.h"
|
||||
#include "GPU_texture.h"
|
||||
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
#include "gpu_private.h"
|
||||
|
||||
typedef enum {
|
||||
@@ -188,10 +188,10 @@ GPUFrameBuffer *GPU_framebuffer_active_get(void)
|
||||
{
|
||||
GPUContext *ctx = GPU_context_active_get();
|
||||
if (ctx) {
|
||||
return gpu_context_active_framebuffer_get(ctx);
|
||||
return ctx->current_fbo;
|
||||
}
|
||||
else {
|
||||
return 0;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,7 +199,7 @@ static void gpu_framebuffer_current_set(GPUFrameBuffer *fb)
|
||||
{
|
||||
GPUContext *ctx = GPU_context_active_get();
|
||||
if (ctx) {
|
||||
gpu_context_active_framebuffer_set(ctx, fb);
|
||||
ctx->current_fbo = fb;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -216,7 +216,7 @@ static void gpu_framebuffer_init(GPUFrameBuffer *fb)
|
||||
{
|
||||
fb->object = GPU_fbo_alloc();
|
||||
fb->ctx = GPU_context_active_get();
|
||||
gpu_context_add_framebuffer(fb->ctx, fb);
|
||||
fb->ctx->framebuffer_add(fb);
|
||||
}
|
||||
|
||||
void GPU_framebuffer_free(GPUFrameBuffer *fb)
|
||||
@@ -231,7 +231,7 @@ void GPU_framebuffer_free(GPUFrameBuffer *fb)
|
||||
if (fb->object != 0) {
|
||||
/* This restores the framebuffer if it was bound */
|
||||
GPU_fbo_free(fb->object, fb->ctx);
|
||||
gpu_context_remove_framebuffer(fb->ctx, fb);
|
||||
fb->ctx->framebuffer_remove(fb);
|
||||
}
|
||||
|
||||
if (GPU_framebuffer_active_get() == fb) {
|
||||
@@ -560,7 +560,7 @@ void GPU_framebuffer_bind(GPUFrameBuffer *fb)
|
||||
void GPU_framebuffer_restore(void)
|
||||
{
|
||||
if (GPU_framebuffer_active_get() != NULL) {
|
||||
glBindFramebuffer(GL_FRAMEBUFFER, GPU_framebuffer_default());
|
||||
glBindFramebuffer(GL_FRAMEBUFFER, GPU_ctx()->default_framebuffer_get());
|
||||
gpu_framebuffer_current_set(NULL);
|
||||
glDisable(GL_FRAMEBUFFER_SRGB);
|
||||
GPU_shader_set_framebuffer_srgb_target(false);
|
||||
@@ -796,7 +796,7 @@ void GPU_framebuffer_blit(GPUFrameBuffer *fb_read,
|
||||
gpu_framebuffer_current_set(prev_fb);
|
||||
}
|
||||
else {
|
||||
glBindFramebuffer(GL_FRAMEBUFFER, GPU_framebuffer_default());
|
||||
glBindFramebuffer(GL_FRAMEBUFFER, GPU_ctx()->default_framebuffer_get());
|
||||
gpu_framebuffer_current_set(NULL);
|
||||
}
|
||||
}
|
||||
@@ -1037,7 +1037,7 @@ void GPU_offscreen_draw_to_screen(GPUOffScreen *ofs, int x, int y)
|
||||
gpu_print_framebuffer_error(status, NULL);
|
||||
}
|
||||
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, GPU_framebuffer_default());
|
||||
glBindFramebuffer(GL_READ_FRAMEBUFFER, GPU_ctx()->default_framebuffer_get());
|
||||
}
|
||||
|
||||
void GPU_offscreen_read_pixels(GPUOffScreen *ofs, eGPUDataFormat type, void *pixels)
|
||||
|
@@ -33,7 +33,7 @@
|
||||
#include "GPU_texture.h"
|
||||
|
||||
#include "gpu_attr_binding_private.h"
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
#include "gpu_primitive_private.h"
|
||||
#include "gpu_shader_private.h"
|
||||
#include "gpu_vertex_format_private.h"
|
||||
|
@@ -23,7 +23,7 @@
|
||||
|
||||
#include "GPU_shader_interface.h"
|
||||
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
#include "gpu_matrix_private.h"
|
||||
|
||||
#define SUPPRESS_GENERIC_MATRIX_API
|
||||
@@ -37,8 +37,6 @@
|
||||
|
||||
#include "MEM_guardedalloc.h"
|
||||
|
||||
#define DEBUG_MATRIX_BIND 0
|
||||
|
||||
#define MATRIX_STACK_DEPTH 32
|
||||
|
||||
typedef float Mat4[4][4];
|
||||
@@ -63,10 +61,10 @@ typedef struct GPUMatrixState {
|
||||
*/
|
||||
} GPUMatrixState;
|
||||
|
||||
#define ModelViewStack gpu_context_active_matrix_state_get()->model_view_stack
|
||||
#define ModelViewStack GPU_ctx()->matrix_state->model_view_stack
|
||||
#define ModelView ModelViewStack.stack[ModelViewStack.top]
|
||||
|
||||
#define ProjectionStack gpu_context_active_matrix_state_get()->projection_stack
|
||||
#define ProjectionStack GPU_ctx()->matrix_state->projection_stack
|
||||
#define Projection ProjectionStack.stack[ProjectionStack.top]
|
||||
|
||||
GPUMatrixState *GPU_matrix_state_create(void)
|
||||
@@ -79,7 +77,7 @@ GPUMatrixState *GPU_matrix_state_create(void)
|
||||
} \
|
||||
}
|
||||
|
||||
GPUMatrixState *state = MEM_mallocN(sizeof(*state), __func__);
|
||||
GPUMatrixState *state = (GPUMatrixState *)MEM_mallocN(sizeof(*state), __func__);
|
||||
const MatrixStack identity_stack = {{MATRIX_4X4_IDENTITY}, 0};
|
||||
|
||||
state->model_view_stack = state->projection_stack = identity_stack;
|
||||
@@ -97,13 +95,13 @@ void GPU_matrix_state_discard(GPUMatrixState *state)
|
||||
|
||||
static void gpu_matrix_state_active_set_dirty(bool value)
|
||||
{
|
||||
GPUMatrixState *state = gpu_context_active_matrix_state_get();
|
||||
GPUMatrixState *state = GPU_ctx()->matrix_state;
|
||||
state->dirty = value;
|
||||
}
|
||||
|
||||
void GPU_matrix_reset(void)
|
||||
{
|
||||
GPUMatrixState *state = gpu_context_active_matrix_state_get();
|
||||
GPUMatrixState *state = GPU_ctx()->matrix_state;
|
||||
state->model_view_stack.top = 0;
|
||||
state->projection_stack.top = 0;
|
||||
unit_m4(ModelView);
|
||||
@@ -662,51 +660,32 @@ void GPU_matrix_bind(const GPUShaderInterface *shaderface)
|
||||
int32_t MV_inv = GPU_shaderinterface_uniform_builtin(shaderface, GPU_UNIFORM_MODELVIEW_INV);
|
||||
int32_t P_inv = GPU_shaderinterface_uniform_builtin(shaderface, GPU_UNIFORM_PROJECTION_INV);
|
||||
|
||||
/* XXX(fclem) this works but this assumes shader is unused inside GPU_shader_uniform_vector. */
|
||||
GPUShader *sh = NULL;
|
||||
if (MV != -1) {
|
||||
#if DEBUG_MATRIX_BIND
|
||||
puts("setting MV matrix");
|
||||
#endif
|
||||
|
||||
glUniformMatrix4fv(MV, 1, GL_FALSE, (const float *)GPU_matrix_model_view_get(NULL));
|
||||
GPU_shader_uniform_vector(sh, MV, 16, 1, (const float *)GPU_matrix_model_view_get(NULL));
|
||||
}
|
||||
|
||||
if (P != -1) {
|
||||
#if DEBUG_MATRIX_BIND
|
||||
puts("setting P matrix");
|
||||
#endif
|
||||
|
||||
glUniformMatrix4fv(P, 1, GL_FALSE, (const float *)GPU_matrix_projection_get(NULL));
|
||||
GPU_shader_uniform_vector(sh, P, 16, 1, (const float *)GPU_matrix_projection_get(NULL));
|
||||
}
|
||||
|
||||
if (MVP != -1) {
|
||||
#if DEBUG_MATRIX_BIND
|
||||
puts("setting MVP matrix");
|
||||
#endif
|
||||
|
||||
glUniformMatrix4fv(
|
||||
MVP, 1, GL_FALSE, (const float *)GPU_matrix_model_view_projection_get(NULL));
|
||||
GPU_shader_uniform_vector(
|
||||
sh, MVP, 16, 1, (const float *)GPU_matrix_model_view_projection_get(NULL));
|
||||
}
|
||||
|
||||
if (N != -1) {
|
||||
#if DEBUG_MATRIX_BIND
|
||||
puts("setting normal matrix");
|
||||
#endif
|
||||
|
||||
glUniformMatrix3fv(N, 1, GL_FALSE, (const float *)GPU_matrix_normal_get(NULL));
|
||||
GPU_shader_uniform_vector(sh, N, 9, 1, (const float *)GPU_matrix_normal_get(NULL));
|
||||
}
|
||||
|
||||
if (MV_inv != -1) {
|
||||
Mat4 m;
|
||||
GPU_matrix_model_view_get(m);
|
||||
invert_m4(m);
|
||||
glUniformMatrix4fv(MV_inv, 1, GL_FALSE, (const float *)m);
|
||||
GPU_shader_uniform_vector(sh, MV_inv, 16, 1, (const float *)m);
|
||||
}
|
||||
|
||||
if (P_inv != -1) {
|
||||
Mat4 m;
|
||||
GPU_matrix_projection_get(m);
|
||||
invert_m4(m);
|
||||
glUniformMatrix4fv(P_inv, 1, GL_FALSE, (const float *)m);
|
||||
GPU_shader_uniform_vector(sh, P_inv, 16, 1, (const float *)m);
|
||||
}
|
||||
|
||||
gpu_matrix_state_active_set_dirty(false);
|
||||
@@ -714,7 +693,7 @@ void GPU_matrix_bind(const GPUShaderInterface *shaderface)
|
||||
|
||||
bool GPU_matrix_dirty_get(void)
|
||||
{
|
||||
GPUMatrixState *state = gpu_context_active_matrix_state_get();
|
||||
GPUMatrixState *state = GPU_ctx()->matrix_state;
|
||||
return state->dirty;
|
||||
}
|
||||
|
||||
@@ -727,13 +706,13 @@ BLI_STATIC_ASSERT(GPU_PY_MATRIX_STACK_LEN + 1 == MATRIX_STACK_DEPTH, "define mis
|
||||
|
||||
int GPU_matrix_stack_level_get_model_view(void)
|
||||
{
|
||||
GPUMatrixState *state = gpu_context_active_matrix_state_get();
|
||||
GPUMatrixState *state = GPU_ctx()->matrix_state;
|
||||
return (int)state->model_view_stack.top;
|
||||
}
|
||||
|
||||
int GPU_matrix_stack_level_get_projection(void)
|
||||
{
|
||||
GPUMatrixState *state = gpu_context_active_matrix_state_get();
|
||||
GPUMatrixState *state = GPU_ctx()->matrix_state;
|
||||
return (int)state->projection_stack.top;
|
||||
}
|
||||
|
@@ -33,7 +33,7 @@
|
||||
#include "GPU_shader_interface.h"
|
||||
|
||||
#include "gpu_batch_private.h"
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdlib.h>
|
||||
@@ -273,7 +273,7 @@ GPUShaderInterface *GPU_shaderinterface_create(int32_t program)
|
||||
/* Bit set to true if uniform comes from a uniform block. */
|
||||
BLI_bitmap *uniforms_from_blocks = BLI_BITMAP_NEW(active_uniform_len, __func__);
|
||||
/* Set uniforms from block for exclusion. */
|
||||
GLint *ubo_uni_ids = MEM_mallocN(sizeof(GLint) * max_ubo_uni_len, __func__);
|
||||
GLint *ubo_uni_ids = (GLint *)MEM_mallocN(sizeof(GLint) * max_ubo_uni_len, __func__);
|
||||
for (int i = 0; i < ubo_len; i++) {
|
||||
GLint ubo_uni_len;
|
||||
glGetActiveUniformBlockiv(program, i, GL_UNIFORM_BLOCK_ACTIVE_UNIFORMS, &ubo_uni_len);
|
||||
@@ -291,16 +291,18 @@ GPUShaderInterface *GPU_shaderinterface_create(int32_t program)
|
||||
int input_tot_len = attr_len + ubo_len + uniform_len;
|
||||
size_t interface_size = sizeof(GPUShaderInterface) + sizeof(GPUShaderInput) * input_tot_len;
|
||||
|
||||
GPUShaderInterface *shaderface = MEM_callocN(interface_size, "GPUShaderInterface");
|
||||
GPUShaderInterface *shaderface = (GPUShaderInterface *)MEM_callocN(interface_size,
|
||||
"GPUShaderInterface");
|
||||
shaderface->attribute_len = attr_len;
|
||||
shaderface->ubo_len = ubo_len;
|
||||
shaderface->uniform_len = uniform_len;
|
||||
shaderface->name_buffer = MEM_mallocN(name_buffer_len, "name_buffer");
|
||||
shaderface->name_buffer = (char *)MEM_mallocN(name_buffer_len, "name_buffer");
|
||||
GPUShaderInput *inputs = shaderface->inputs;
|
||||
|
||||
/* Temp buffer. */
|
||||
int input_tmp_len = max_iii(attr_len, ubo_len, uniform_len);
|
||||
GPUShaderInput *inputs_tmp = MEM_mallocN(sizeof(GPUShaderInput) * input_tmp_len, "name_buffer");
|
||||
GPUShaderInput *inputs_tmp = (GPUShaderInput *)MEM_mallocN(
|
||||
sizeof(GPUShaderInput) * input_tmp_len, "name_buffer");
|
||||
|
||||
/* Attributes */
|
||||
shaderface->enabled_attr_mask = 0;
|
||||
@@ -366,27 +368,29 @@ GPUShaderInterface *GPU_shaderinterface_create(int32_t program)
|
||||
sort_input_list(inputs, inputs_tmp, shaderface->uniform_len);
|
||||
|
||||
/* Builtin Uniforms */
|
||||
for (GPUUniformBuiltin u = 0; u < GPU_NUM_UNIFORMS; u++) {
|
||||
for (int32_t u_int = 0; u_int < GPU_NUM_UNIFORMS; u_int++) {
|
||||
GPUUniformBuiltin u = static_cast<GPUUniformBuiltin>(u_int);
|
||||
shaderface->builtins[u] = glGetUniformLocation(program, BuiltinUniform_name(u));
|
||||
}
|
||||
|
||||
/* Builtin Uniforms Blocks */
|
||||
for (GPUUniformBlockBuiltin u = 0; u < GPU_NUM_UNIFORM_BLOCKS; u++) {
|
||||
for (int32_t u_int = 0; u_int < GPU_NUM_UNIFORM_BLOCKS; u_int++) {
|
||||
GPUUniformBlockBuiltin u = static_cast<GPUUniformBlockBuiltin>(u_int);
|
||||
const GPUShaderInput *block = GPU_shaderinterface_ubo(shaderface, BuiltinUniformBlock_name(u));
|
||||
shaderface->builtin_blocks[u] = (block != NULL) ? block->binding : -1;
|
||||
}
|
||||
|
||||
/* Batches ref buffer */
|
||||
shaderface->batches_len = GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
|
||||
shaderface->batches = MEM_callocN(shaderface->batches_len * sizeof(GPUBatch *),
|
||||
"GPUShaderInterface batches");
|
||||
shaderface->batches = (GPUBatch **)MEM_callocN(shaderface->batches_len * sizeof(GPUBatch *),
|
||||
"GPUShaderInterface batches");
|
||||
|
||||
MEM_freeN(uniforms_from_blocks);
|
||||
MEM_freeN(inputs_tmp);
|
||||
|
||||
/* Resize name buffer to save some memory. */
|
||||
if (name_buffer_offset < name_buffer_len) {
|
||||
shaderface->name_buffer = MEM_reallocN(shaderface->name_buffer, name_buffer_offset);
|
||||
shaderface->name_buffer = (char *)MEM_reallocN(shaderface->name_buffer, name_buffer_offset);
|
||||
}
|
||||
|
||||
#if DEBUG_SHADER_INTERFACE
|
||||
@@ -501,8 +505,8 @@ void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *shaderface, GPUBatch
|
||||
/* Not enough place, realloc the array. */
|
||||
i = shaderface->batches_len;
|
||||
shaderface->batches_len += GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
|
||||
shaderface->batches = MEM_recallocN(shaderface->batches,
|
||||
sizeof(GPUBatch *) * shaderface->batches_len);
|
||||
shaderface->batches = (GPUBatch **)MEM_recallocN(shaderface->batches,
|
||||
sizeof(GPUBatch *) * shaderface->batches_len);
|
||||
}
|
||||
shaderface->batches[i] = batch;
|
||||
}
|
@@ -43,7 +43,7 @@
|
||||
#include "GPU_platform.h"
|
||||
#include "GPU_texture.h"
|
||||
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
|
||||
#define WARN_NOT_BOUND(_tex) \
|
||||
{ \
|
||||
|
@@ -27,7 +27,7 @@
|
||||
#include "BLI_blenlib.h"
|
||||
#include "BLI_math_base.h"
|
||||
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
#include "gpu_node_graph.h"
|
||||
|
||||
#include "GPU_extensions.h"
|
||||
|
@@ -27,7 +27,7 @@
|
||||
|
||||
#include "GPU_vertex_buffer.h"
|
||||
|
||||
#include "gpu_context_private.h"
|
||||
#include "gpu_context_private.hh"
|
||||
#include "gpu_vertex_format_private.h"
|
||||
|
||||
#include <stdlib.h>
|
||||
|
124
source/blender/gpu/opengl/gl_batch.cc
Normal file
124
source/blender/gpu/opengl/gl_batch.cc
Normal file
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* The Original Code is Copyright (C) 2016 by Mike Erwin.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*/
|
||||
|
||||
#include "BLI_assert.h"
|
||||
#include "BLI_utildefines.h"
|
||||
|
||||
#include "GPU_glew.h"
|
||||
|
||||
#include "gl_context.hh"
|
||||
|
||||
using namespace blender::gpu;
|
||||
|
||||
void GLContext::draw_batch(GPUBatch *batch, int v_first, int v_count, int i_first, int i_count)
|
||||
{
|
||||
/* Verify there is enough data do draw. */
|
||||
/* TODO(fclem) Nice to have but this is invalid when using procedural draw-calls.
|
||||
* The right assert would be to check if there is an enabled attribute from each VBO
|
||||
* and check their length. */
|
||||
// BLI_assert(i_first + i_count <= (batch->inst ? batch->inst->vertex_len : INT_MAX));
|
||||
// BLI_assert(v_first + v_count <=
|
||||
// (batch->elem ? batch->elem->index_len : batch->verts[0]->vertex_len));
|
||||
|
||||
#ifdef __APPLE__
|
||||
GLuint vao = 0;
|
||||
#endif
|
||||
|
||||
if (!GPU_arb_base_instance_is_supported()) {
|
||||
if (i_first > 0) {
|
||||
#ifdef __APPLE__
|
||||
/**
|
||||
* There seems to be a nasty bug when drawing using the same VAO reconfiguring. (see T71147)
|
||||
* We just use a throwaway VAO for that. Note that this is likely to degrade performance.
|
||||
**/
|
||||
glGenVertexArrays(1, &vao);
|
||||
glBindVertexArray(vao);
|
||||
#else
|
||||
/* If using offset drawing with instancing, we must
|
||||
* use the default VAO and redo bindings. */
|
||||
glBindVertexArray(default_vao_);
|
||||
#endif
|
||||
batch_update_program_bindings(batch, i_first);
|
||||
}
|
||||
else {
|
||||
/* Previous call could have bind the default vao
|
||||
* see above. */
|
||||
glBindVertexArray(batch->vao_id);
|
||||
}
|
||||
}
|
||||
|
||||
if (batch->elem) {
|
||||
const GPUIndexBuf *el = batch->elem;
|
||||
GLenum index_type = INDEX_TYPE(el);
|
||||
GLint base_index = BASE_INDEX(el);
|
||||
void *v_first_ofs = (GLuint *)0 + v_first + el->index_start;
|
||||
|
||||
#if GPU_TRACK_INDEX_RANGE
|
||||
if (el->index_type == GPU_INDEX_U16) {
|
||||
v_first_ofs = (GLushort *)0 + v_first + el->index_start;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (GPU_arb_base_instance_is_supported()) {
|
||||
glDrawElementsInstancedBaseVertexBaseInstance(
|
||||
batch->gl_prim_type, v_count, index_type, v_first_ofs, i_count, base_index, i_first);
|
||||
}
|
||||
else {
|
||||
glDrawElementsInstancedBaseVertex(
|
||||
batch->gl_prim_type, v_count, index_type, v_first_ofs, i_count, base_index);
|
||||
}
|
||||
}
|
||||
else {
|
||||
#ifdef __APPLE__
|
||||
glDisable(GL_PRIMITIVE_RESTART);
|
||||
#endif
|
||||
if (GPU_arb_base_instance_is_supported()) {
|
||||
glDrawArraysInstancedBaseInstance(batch->gl_prim_type, v_first, v_count, i_count, i_first);
|
||||
}
|
||||
else {
|
||||
glDrawArraysInstanced(batch->gl_prim_type, v_first, v_count, i_count);
|
||||
}
|
||||
#ifdef __APPLE__
|
||||
glEnable(GL_PRIMITIVE_RESTART);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
if (vao != 0) {
|
||||
glDeleteVertexArrays(1, &vao);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void GLContext::draw_primitive(GPUPrimType prim_type, int v_count)
|
||||
{
|
||||
/* we cannot draw without vao ... annoying ... */
|
||||
glBindVertexArray(default_vao_);
|
||||
|
||||
GLenum type = convert_prim_type_to_gl(prim_type);
|
||||
glDrawArrays(type, 0, v_count);
|
||||
|
||||
/* Performance hog if you are drawing with the same vao multiple time.
|
||||
* Only activate for debugging.*/
|
||||
// glBindVertexArray(0);
|
||||
}
|
188
source/blender/gpu/opengl/gl_context.cc
Normal file
188
source/blender/gpu/opengl/gl_context.cc
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* The Original Code is Copyright (C) 2016 by Mike Erwin.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*
|
||||
* Manage GL vertex array IDs in a thread-safe way
|
||||
* Use these instead of glGenBuffers & its friends
|
||||
* - alloc must be called from a thread that is bound
|
||||
* to the context that will be used for drawing with
|
||||
* this vao.
|
||||
* - free can be called from any thread
|
||||
*/
|
||||
|
||||
#include "BLI_assert.h"
|
||||
#include "BLI_utildefines.h"
|
||||
|
||||
#include "GPU_framebuffer.h"
|
||||
|
||||
#include "gpu_context_private.hh"
|
||||
|
||||
#include "gl_context.hh"
|
||||
|
||||
// TODO(fclem) this requires too much refactor for now.
|
||||
// using namespace blender::gpu;
|
||||
|
||||
GLContext::GLContext() : GPUContext()
|
||||
{
|
||||
glGenVertexArrays(1, &default_vao_);
|
||||
/* TODO(fclem) call ghost here. */
|
||||
// GHOST_GetDefaultOpenGLFramebuffer(g_WS.ghost_window);
|
||||
default_framebuffer_ = 0;
|
||||
}
|
||||
|
||||
GLContext::~GLContext()
|
||||
{
|
||||
BLI_assert(ctx->orphaned_vertarray_ids.empty());
|
||||
/* For now don't allow GPUFrameBuffers to be reuse in another ctx. */
|
||||
BLI_assert(ctx->framebuffers.empty());
|
||||
|
||||
/* delete remaining vaos */
|
||||
while (!ctx->batches.empty()) {
|
||||
/* this removes the array entry */
|
||||
GPU_batch_vao_cache_clear(*ctx->batches.begin());
|
||||
}
|
||||
GPU_matrix_state_discard(ctx->matrix_state);
|
||||
glDeleteVertexArrays(1, &ctx->default_vao);
|
||||
}
|
||||
|
||||
void GLContext::activate(void)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
/* Make sure no other context is already bound to this thread. */
|
||||
BLI_assert(pthread_equal(pthread_self(), thread_));
|
||||
/* Make sure no other thread has locked it. */
|
||||
BLI_assert(thread_is_used_ == false);
|
||||
thread_ = pthread_self();
|
||||
thread_is_used_ = true;
|
||||
#endif
|
||||
orphans_clear();
|
||||
};
|
||||
|
||||
void GLContext::deactivate(void)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
thread_is_used_ = false;
|
||||
#endif
|
||||
};
|
||||
|
||||
void GLContext::orphans_clear(void) override
|
||||
{
|
||||
/* Check if context has been activated by another thread! */
|
||||
BLI_assert(pthread_equal(pthread_self(), ctx->thread));
|
||||
|
||||
orphans_mutex_.lock();
|
||||
if (!orphaned_vertarrays_.empty()) {
|
||||
glDeleteVertexArrays((uint)orphaned_vertarrays_.size(), orphaned_vertarrays_.data());
|
||||
orphaned_vertarrays_.clear();
|
||||
}
|
||||
if (!orphaned_framebuffers_.empty()) {
|
||||
glDeleteFramebuffers((uint)orphaned_framebuffers_.size(), orphaned_framebuffers_.data());
|
||||
orphaned_framebuffers_.clear();
|
||||
}
|
||||
if (!orphaned_buffers_.empty()) {
|
||||
glDeleteBuffers((uint)orphaned_buffers_.size(), orphaned_buffers_.data());
|
||||
orphaned_buffers_.clear();
|
||||
}
|
||||
if (!orphaned_textures_.empty()) {
|
||||
glDeleteTextures((uint)orphaned_textures_.size(), orphaned_textures_.data());
|
||||
orphaned_textures_.clear();
|
||||
}
|
||||
orphans_mutex_.unlock();
|
||||
};
|
||||
|
||||
void GLContext::orphans_add(std::vector<GLuint> *orphan_list, GLuint id)
|
||||
{
|
||||
orphans_mutex_->lock();
|
||||
orphan_list->emplace_back(id);
|
||||
orphans_mutex_->unlock();
|
||||
}
|
||||
|
||||
void GLContext::vao_free(GLuint vao_id)
|
||||
{
|
||||
if (ctx == GPU_context_active_get()) {
|
||||
glDeleteVertexArrays(1, &vao_id);
|
||||
}
|
||||
else {
|
||||
orphans_add(ctx, &ctx->orphaned_vertarray_ids, vao_id);
|
||||
}
|
||||
}
|
||||
|
||||
void GLContext::fbo_free(GLuint fbo_id)
|
||||
{
|
||||
if (ctx == GPU_context_active_get()) {
|
||||
glDeleteFramebuffers(1, &fbo_id);
|
||||
}
|
||||
else {
|
||||
orphans_add(ctx, &ctx->orphaned_framebuffer_ids, fbo_id);
|
||||
}
|
||||
}
|
||||
|
||||
void GLContext::buf_free(GLuint buf_id)
|
||||
{
|
||||
if (GPU_context_active_get()) {
|
||||
glDeleteBuffers(1, &buf_id);
|
||||
}
|
||||
else {
|
||||
orphans_add(&orphaned_buffer_ids, buf_id);
|
||||
}
|
||||
}
|
||||
|
||||
void GLContext::tex_free(GLuint tex_id)
|
||||
{
|
||||
if (GPU_context_active_get()) {
|
||||
glDeleteTextures(1, &tex_id);
|
||||
}
|
||||
else {
|
||||
orphans_add(&orphaned_texture_ids, tex_id);
|
||||
}
|
||||
}
|
||||
|
||||
GLuint GLContext::vao_alloc(void)
|
||||
{
|
||||
GLuint new_vao_id = 0;
|
||||
orphans_clear();
|
||||
glGenVertexArrays(1, &new_vao_id);
|
||||
return new_vao_id;
|
||||
}
|
||||
|
||||
GLuint GLContext::fbo_alloc(void)
|
||||
{
|
||||
GLuint new_fbo_id = 0;
|
||||
orphans_clear();
|
||||
glGenFramebuffers(1, &new_fbo_id);
|
||||
return new_fbo_id;
|
||||
}
|
||||
|
||||
GLuint GLContext::buf_alloc(void)
|
||||
{
|
||||
GLuint new_buffer_id = 0;
|
||||
orphans_clear();
|
||||
glGenBuffers(1, &new_buffer_id);
|
||||
return new_buffer_id;
|
||||
}
|
||||
|
||||
GLuint GLContext::tex_alloc(void)
|
||||
{
|
||||
GLuint new_texture_id = 0;
|
||||
orphans_clear();
|
||||
glGenTextures(1, &new_texture_id);
|
||||
return new_texture_id;
|
||||
}
|
93
source/blender/gpu/opengl/gl_context.hh
Normal file
93
source/blender/gpu/opengl/gl_context.hh
Normal file
@@ -0,0 +1,93 @@
|
||||
/*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 2
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* The Original Code is Copyright (C) 2020 Blender Foundation.
|
||||
* All rights reserved.
|
||||
*/
|
||||
|
||||
/** \file
|
||||
* \ingroup gpu
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "../intern/gpu_context_private.hh"
|
||||
|
||||
#include "glew-mx.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <mutex>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
// TODO(fclem) this requires too much refactor for now.
|
||||
// namespace blender {
|
||||
// namespace gpu {
|
||||
|
||||
class GLContext : public GPUContext {
|
||||
public:
|
||||
GLContext();
|
||||
~GLContext(){};
|
||||
|
||||
void activate(void) override;
|
||||
void deactivate(void) override;
|
||||
|
||||
void draw_batch(GPUBatch *batch, int v_first, int v_count, int i_first, int i_count) override;
|
||||
void draw_primitive(GPUPrimType prim_type, int v_count) override;
|
||||
|
||||
void batch_add(GPUBatch *batch) override;
|
||||
void batch_remove(GPUBatch *batch) override;
|
||||
|
||||
/* TODO remove */
|
||||
GLuint tex_alloc(void) override;
|
||||
GLuint vao_alloc(void) override;
|
||||
GLuint buf_alloc(void) override;
|
||||
GLuint fbo_alloc(void) override;
|
||||
void vao_free(GLuint vao_id) override;
|
||||
void fbo_free(GLuint fbo_id) override;
|
||||
void buf_free(GLuint buf_id) override;
|
||||
void tex_free(GLuint tex_id) override;
|
||||
GLuint default_framebuffer_get(void) override
|
||||
{
|
||||
return default_framebuffer_;
|
||||
};
|
||||
|
||||
private:
|
||||
void orphans_add(std::vector<GLuint> *orphan_list, GLuint id);
|
||||
void orphans_clear(void);
|
||||
|
||||
/**
|
||||
* Batches & Framebuffers are not shared accross contexts.
|
||||
* For this reason we keep a list of them per GPUBatch & GPUFramebuffer.
|
||||
* However this list needs to be updated in the case a GPUContext is destroyed.
|
||||
*/
|
||||
std::unordered_set<GPUBatch *> batches;
|
||||
std::unordered_set<GPUFrameBuffer *> framebuffers;
|
||||
|
||||
std::vector<GLuint> orphaned_vertarrays_;
|
||||
std::vector<GLuint> orphaned_framebuffers_;
|
||||
std::vector<GLuint> orphaned_buffers_;
|
||||
std::vector<GLuint> orphaned_textures_;
|
||||
|
||||
/** Mutex for the above structures. */
|
||||
/** todo: try spinlock instead */
|
||||
std::mutex orphans_mutex_;
|
||||
|
||||
GLuint default_vao_;
|
||||
GLuint default_framebuffer_;
|
||||
};
|
||||
|
||||
// } // namespace gpu
|
||||
// } // namespace blender
|
Reference in New Issue
Block a user