Cleanup: style for GPU module
This commit is contained in:
@@ -51,15 +51,15 @@ typedef enum {
|
||||
|
||||
typedef struct GPUBatch {
|
||||
/* geometry */
|
||||
GPUVertBuf* verts[GPU_BATCH_VBO_MAX_LEN]; /* verts[0] is required, others can be NULL */
|
||||
GPUVertBuf* inst; /* instance attribs */
|
||||
GPUIndexBuf* elem; /* NULL if element list not needed */
|
||||
GPUVertBuf *verts[GPU_BATCH_VBO_MAX_LEN]; /* verts[0] is required, others can be NULL */
|
||||
GPUVertBuf *inst; /* instance attribs */
|
||||
GPUIndexBuf *elem; /* NULL if element list not needed */
|
||||
uint32_t gl_prim_type;
|
||||
|
||||
/* cached values (avoid dereferencing later) */
|
||||
uint32_t vao_id;
|
||||
uint32_t program;
|
||||
const struct GPUShaderInterface* interface;
|
||||
const struct GPUShaderInterface *interface;
|
||||
|
||||
/* book-keeping */
|
||||
uint owns_flag;
|
||||
@@ -74,21 +74,21 @@ typedef struct GPUBatch {
|
||||
union {
|
||||
/* Static handle count */
|
||||
struct {
|
||||
const struct GPUShaderInterface* interfaces[GPU_BATCH_VAO_STATIC_LEN];
|
||||
const struct GPUShaderInterface *interfaces[GPU_BATCH_VAO_STATIC_LEN];
|
||||
uint32_t vao_ids[GPU_BATCH_VAO_STATIC_LEN];
|
||||
} static_vaos;
|
||||
/* Dynamic handle count */
|
||||
struct {
|
||||
uint count;
|
||||
const struct GPUShaderInterface** interfaces;
|
||||
uint32_t* vao_ids;
|
||||
const struct GPUShaderInterface **interfaces;
|
||||
uint32_t *vao_ids;
|
||||
} dynamic_vaos;
|
||||
};
|
||||
|
||||
/* XXX This is the only solution if we want to have some data structure using
|
||||
* batches as key to identify nodes. We must destroy these nodes with this callback. */
|
||||
void (*free_callback)(struct GPUBatch*, void*);
|
||||
void* callback_data;
|
||||
void (*free_callback)(struct GPUBatch *, void *);
|
||||
void *callback_data;
|
||||
} GPUBatch;
|
||||
|
||||
enum {
|
||||
@@ -98,55 +98,55 @@ enum {
|
||||
GPU_BATCH_OWNS_INDEX = (1 << 31),
|
||||
};
|
||||
|
||||
GPUBatch* GPU_batch_create_ex(GPUPrimType, GPUVertBuf*, GPUIndexBuf*, uint owns_flag);
|
||||
void GPU_batch_init_ex(GPUBatch*, GPUPrimType, GPUVertBuf*, GPUIndexBuf*, uint owns_flag);
|
||||
GPUBatch* GPU_batch_duplicate(GPUBatch* batch_src);
|
||||
GPUBatch *GPU_batch_create_ex(GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
|
||||
void GPU_batch_init_ex(GPUBatch *, GPUPrimType, GPUVertBuf *, GPUIndexBuf *, uint owns_flag);
|
||||
GPUBatch *GPU_batch_duplicate(GPUBatch *batch_src);
|
||||
|
||||
#define GPU_batch_create(prim, verts, elem) \
|
||||
GPU_batch_create_ex(prim, verts, elem, 0)
|
||||
#define GPU_batch_init(batch, prim, verts, elem) \
|
||||
GPU_batch_init_ex(batch, prim, verts, elem, 0)
|
||||
|
||||
void GPU_batch_discard(GPUBatch*); /* verts & elem are not discarded */
|
||||
void GPU_batch_discard(GPUBatch *); /* verts & elem are not discarded */
|
||||
|
||||
void GPU_batch_vao_cache_clear(GPUBatch*);
|
||||
void GPU_batch_vao_cache_clear(GPUBatch *);
|
||||
|
||||
void GPU_batch_callback_free_set(GPUBatch*, void (*callback)(GPUBatch*, void*), void*);
|
||||
void GPU_batch_callback_free_set(GPUBatch *, void (*callback)(GPUBatch *, void *), void *);
|
||||
|
||||
void GPU_batch_instbuf_set(GPUBatch*, GPUVertBuf*, bool own_vbo); /* Instancing */
|
||||
void GPU_batch_instbuf_set(GPUBatch *, GPUVertBuf *, bool own_vbo); /* Instancing */
|
||||
|
||||
int GPU_batch_vertbuf_add_ex(GPUBatch*, GPUVertBuf*, bool own_vbo);
|
||||
int GPU_batch_vertbuf_add_ex(GPUBatch *, GPUVertBuf *, bool own_vbo);
|
||||
|
||||
#define GPU_batch_vertbuf_add(batch, verts) \
|
||||
GPU_batch_vertbuf_add_ex(batch, verts, false)
|
||||
|
||||
void GPU_batch_program_set_no_use(GPUBatch*, uint32_t program, const GPUShaderInterface*);
|
||||
void GPU_batch_program_set(GPUBatch*, uint32_t program, const GPUShaderInterface*);
|
||||
void GPU_batch_program_set_no_use(GPUBatch *, uint32_t program, const GPUShaderInterface *);
|
||||
void GPU_batch_program_set(GPUBatch *, uint32_t program, const GPUShaderInterface *);
|
||||
void GPU_batch_program_set_builtin(GPUBatch *batch, GPUBuiltinShader shader_id);
|
||||
/* Entire batch draws with one shader program, but can be redrawn later with another program. */
|
||||
/* Vertex shader's inputs must be compatible with the batch's vertex format. */
|
||||
|
||||
void GPU_batch_program_use_begin(GPUBatch*); /* call before Batch_Uniform (temp hack?) */
|
||||
void GPU_batch_program_use_end(GPUBatch*);
|
||||
void GPU_batch_program_use_begin(GPUBatch *); /* call before Batch_Uniform (temp hack?) */
|
||||
void GPU_batch_program_use_end(GPUBatch *);
|
||||
|
||||
void GPU_batch_uniform_1ui(GPUBatch*, const char* name, int value);
|
||||
void GPU_batch_uniform_1i(GPUBatch*, const char* name, int value);
|
||||
void GPU_batch_uniform_1b(GPUBatch*, const char* name, bool value);
|
||||
void GPU_batch_uniform_1f(GPUBatch*, const char* name, float value);
|
||||
void GPU_batch_uniform_2f(GPUBatch*, const char* name, float x, float y);
|
||||
void GPU_batch_uniform_3f(GPUBatch*, const char* name, float x, float y, float z);
|
||||
void GPU_batch_uniform_4f(GPUBatch*, const char* name, float x, float y, float z, float w);
|
||||
void GPU_batch_uniform_2fv(GPUBatch*, const char* name, const float data[2]);
|
||||
void GPU_batch_uniform_3fv(GPUBatch*, const char* name, const float data[3]);
|
||||
void GPU_batch_uniform_4fv(GPUBatch*, const char* name, const float data[4]);
|
||||
void GPU_batch_uniform_2fv_array(GPUBatch*, const char* name, int len, const float *data);
|
||||
void GPU_batch_uniform_4fv_array(GPUBatch*, const char* name, int len, const float *data);
|
||||
void GPU_batch_uniform_mat4(GPUBatch*, const char* name, const float data[4][4]);
|
||||
void GPU_batch_uniform_1ui(GPUBatch *, const char *name, int value);
|
||||
void GPU_batch_uniform_1i(GPUBatch *, const char *name, int value);
|
||||
void GPU_batch_uniform_1b(GPUBatch *, const char *name, bool value);
|
||||
void GPU_batch_uniform_1f(GPUBatch *, const char *name, float value);
|
||||
void GPU_batch_uniform_2f(GPUBatch *, const char *name, float x, float y);
|
||||
void GPU_batch_uniform_3f(GPUBatch *, const char *name, float x, float y, float z);
|
||||
void GPU_batch_uniform_4f(GPUBatch *, const char *name, float x, float y, float z, float w);
|
||||
void GPU_batch_uniform_2fv(GPUBatch *, const char *name, const float data[2]);
|
||||
void GPU_batch_uniform_3fv(GPUBatch *, const char *name, const float data[3]);
|
||||
void GPU_batch_uniform_4fv(GPUBatch *, const char *name, const float data[4]);
|
||||
void GPU_batch_uniform_2fv_array(GPUBatch *, const char *name, int len, const float *data);
|
||||
void GPU_batch_uniform_4fv_array(GPUBatch *, const char *name, int len, const float *data);
|
||||
void GPU_batch_uniform_mat4(GPUBatch *, const char *name, const float data[4][4]);
|
||||
|
||||
void GPU_batch_draw(GPUBatch*);
|
||||
void GPU_batch_draw(GPUBatch *);
|
||||
|
||||
/* This does not bind/unbind shader and does not call GPU_matrix_bind() */
|
||||
void GPU_batch_draw_range_ex(GPUBatch*, int v_first, int v_count, bool force_instance);
|
||||
void GPU_batch_draw_range_ex(GPUBatch *, int v_first, int v_count, bool force_instance);
|
||||
|
||||
/* Does not even need batch */
|
||||
void GPU_draw_primitive(GPUPrimType, int v_count);
|
||||
@@ -175,12 +175,12 @@ typedef struct BatchWithOwnVertexBufferAndElementList {
|
||||
GPUVertBuf verts; /* link batch.verts to this */
|
||||
} BatchWithOwnVertexBufferAndElementList;
|
||||
|
||||
GPUBatch* create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat*, uint v_len, GPUIndexBuf*);
|
||||
GPUBatch* create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf*, uint prim_len);
|
||||
GPUBatch* create_BatchWithOwnVertexBufferAndElementList(GPUPrimType, GPUVertFormat*, uint v_len, uint prim_len);
|
||||
GPUBatch *create_BatchWithOwnVertexBuffer(GPUPrimType, GPUVertFormat *, uint v_len, GPUIndexBuf *);
|
||||
GPUBatch *create_BatchWithOwnElementList(GPUPrimType, GPUVertBuf *, uint prim_len);
|
||||
GPUBatch *create_BatchWithOwnVertexBufferAndElementList(GPUPrimType, GPUVertFormat *, uint v_len, uint prim_len);
|
||||
/* verts: shared, own */
|
||||
/* elem: none, shared, own */
|
||||
GPUBatch* create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff);
|
||||
GPUBatch *create_BatchInGeneral(GPUPrimType, VertexBufferStuff, ElementListStuff);
|
||||
|
||||
#endif /* future plans */
|
||||
|
||||
|
||||
@@ -42,11 +42,11 @@ extern "C" {
|
||||
|
||||
typedef struct GPUContext GPUContext;
|
||||
|
||||
GPUContext* GPU_context_create(void);
|
||||
void GPU_context_discard(GPUContext*);
|
||||
GPUContext *GPU_context_create(void);
|
||||
void GPU_context_discard(GPUContext *);
|
||||
|
||||
void GPU_context_active_set(GPUContext*);
|
||||
GPUContext* GPU_context_active_get(void);
|
||||
void GPU_context_active_set(GPUContext *);
|
||||
GPUContext *GPU_context_active_get(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -57,37 +57,37 @@ typedef struct GPUIndexBuf {
|
||||
bool use_prim_restart;
|
||||
} GPUIndexBuf;
|
||||
|
||||
void GPU_indexbuf_use(GPUIndexBuf*);
|
||||
uint GPU_indexbuf_size_get(const GPUIndexBuf*);
|
||||
void GPU_indexbuf_use(GPUIndexBuf *);
|
||||
uint GPU_indexbuf_size_get(const GPUIndexBuf *);
|
||||
|
||||
typedef struct GPUIndexBufBuilder {
|
||||
uint max_allowed_index;
|
||||
uint max_index_len;
|
||||
uint index_len;
|
||||
GPUPrimType prim_type;
|
||||
uint* data;
|
||||
uint *data;
|
||||
bool use_prim_restart;
|
||||
} GPUIndexBufBuilder;
|
||||
|
||||
|
||||
/* supports all primitive types. */
|
||||
void GPU_indexbuf_init_ex(GPUIndexBufBuilder*, GPUPrimType, uint index_len, uint vertex_len, bool use_prim_restart);
|
||||
void GPU_indexbuf_init_ex(GPUIndexBufBuilder *, GPUPrimType, uint index_len, uint vertex_len, bool use_prim_restart);
|
||||
|
||||
/* supports only GPU_PRIM_POINTS, GPU_PRIM_LINES and GPU_PRIM_TRIS. */
|
||||
void GPU_indexbuf_init(GPUIndexBufBuilder*, GPUPrimType, uint prim_len, uint vertex_len);
|
||||
void GPU_indexbuf_init(GPUIndexBufBuilder *, GPUPrimType, uint prim_len, uint vertex_len);
|
||||
|
||||
void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder*, uint v);
|
||||
void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder*);
|
||||
void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *, uint v);
|
||||
void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder *);
|
||||
|
||||
void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder*, uint v);
|
||||
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder*, uint v1, uint v2);
|
||||
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder*, uint v1, uint v2, uint v3);
|
||||
void GPU_indexbuf_add_line_adj_verts(GPUIndexBufBuilder*, uint v1, uint v2, uint v3, uint v4);
|
||||
void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder *, uint v);
|
||||
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *, uint v1, uint v2);
|
||||
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *, uint v1, uint v2, uint v3);
|
||||
void GPU_indexbuf_add_line_adj_verts(GPUIndexBufBuilder *, uint v1, uint v2, uint v3, uint v4);
|
||||
|
||||
GPUIndexBuf* GPU_indexbuf_build(GPUIndexBufBuilder*);
|
||||
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder*, GPUIndexBuf*);
|
||||
GPUIndexBuf *GPU_indexbuf_build(GPUIndexBufBuilder *);
|
||||
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *, GPUIndexBuf *);
|
||||
|
||||
void GPU_indexbuf_discard(GPUIndexBuf*);
|
||||
void GPU_indexbuf_discard(GPUIndexBuf *);
|
||||
|
||||
|
||||
/* Macros */
|
||||
|
||||
@@ -179,7 +179,8 @@ void GPU_framebuffer_recursive_downsample(
|
||||
* - wrapper around framebuffer and texture for simple offscreen drawing
|
||||
*/
|
||||
|
||||
GPUOffScreen *GPU_offscreen_create(int width, int height, int samples,
|
||||
GPUOffScreen *GPU_offscreen_create(
|
||||
int width, int height, int samples,
|
||||
bool depth, bool high_bitdepth, char err_out[256]);
|
||||
void GPU_offscreen_free(GPUOffScreen *ofs);
|
||||
void GPU_offscreen_bind(GPUOffScreen *ofs, bool save);
|
||||
|
||||
@@ -40,9 +40,9 @@
|
||||
#include "GPU_immediate_util.h"
|
||||
#include "GPU_shader.h"
|
||||
|
||||
GPUVertFormat* immVertexFormat(void); /* returns a cleared vertex format, ready for add_attrib. */
|
||||
GPUVertFormat *immVertexFormat(void); /* returns a cleared vertex format, ready for add_attrib. */
|
||||
|
||||
void immBindProgram(uint32_t program, const GPUShaderInterface*); /* every immBegin must have a program bound first. */
|
||||
void immBindProgram(uint32_t program, const GPUShaderInterface *); /* every immBegin must have a program bound first. */
|
||||
void immUnbindProgram(void); /* call after your last immEnd, or before binding another program. */
|
||||
|
||||
void immBegin(GPUPrimType, uint vertex_len); /* must supply exactly vertex_len vertices. */
|
||||
@@ -52,8 +52,8 @@ void immEnd(void); /* finishes and draws. */
|
||||
/* ImmBegin a batch, then use standard immFunctions as usual. */
|
||||
/* ImmEnd will finalize the batch instead of drawing. */
|
||||
/* Then you can draw it as many times as you like! Partially replaces the need for display lists. */
|
||||
GPUBatch* immBeginBatch(GPUPrimType, uint vertex_len);
|
||||
GPUBatch* immBeginBatchAtMost(GPUPrimType, uint vertex_len);
|
||||
GPUBatch *immBeginBatch(GPUPrimType, uint vertex_len);
|
||||
GPUBatch *immBeginBatchAtMost(GPUPrimType, uint vertex_len);
|
||||
|
||||
/* Provide attribute values that can change per vertex. */
|
||||
/* First vertex after immBegin must have all its attributes specified. */
|
||||
@@ -99,18 +99,18 @@ void immVertex3fv(uint attrib_id, const float data[3]);
|
||||
void immVertex2iv(uint attrib_id, const int data[2]);
|
||||
|
||||
/* Provide uniform values that don't change for the entire draw call. */
|
||||
void immUniform1i(const char* name, int x);
|
||||
void immUniform4iv(const char* name, const int data[4]);
|
||||
void immUniform1f(const char* name, float x);
|
||||
void immUniform2f(const char* name, float x, float y);
|
||||
void immUniform2fv(const char* name, const float data[2]);
|
||||
void immUniform3f(const char* name, float x, float y, float z);
|
||||
void immUniform3fv(const char* name, const float data[3]);
|
||||
void immUniformArray3fv(const char* name, const float *data, int count);
|
||||
void immUniform4f(const char* name, float x, float y, float z, float w);
|
||||
void immUniform4fv(const char* name, const float data[4]);
|
||||
void immUniformArray4fv(const char* bare_name, const float *data, int count);
|
||||
void immUniformMatrix4fv(const char* name, const float data[4][4]);
|
||||
void immUniform1i(const char *name, int x);
|
||||
void immUniform4iv(const char *name, const int data[4]);
|
||||
void immUniform1f(const char *name, float x);
|
||||
void immUniform2f(const char *name, float x, float y);
|
||||
void immUniform2fv(const char *name, const float data[2]);
|
||||
void immUniform3f(const char *name, float x, float y, float z);
|
||||
void immUniform3fv(const char *name, const float data[3]);
|
||||
void immUniformArray3fv(const char *name, const float *data, int count);
|
||||
void immUniform4f(const char *name, float x, float y, float z, float w);
|
||||
void immUniform4fv(const char *name, const float data[4]);
|
||||
void immUniformArray4fv(const char *bare_name, const float *data, int count);
|
||||
void immUniformMatrix4fv(const char *name, const float data[4][4]);
|
||||
|
||||
/* Convenience functions for setting "uniform vec4 color". */
|
||||
/* The rgb functions have implicit alpha = 1.0. */
|
||||
|
||||
@@ -104,7 +104,7 @@ typedef enum GPUBuiltin {
|
||||
GPU_AUTO_BUMPSCALE = (1 << 7),
|
||||
GPU_CAMERA_TEXCO_FACTORS = (1 << 8),
|
||||
GPU_PARTICLE_SCALAR_PROPS = (1 << 9),
|
||||
GPU_PARTICLE_LOCATION = (1 << 10),
|
||||
GPU_PARTICLE_LOCATION = (1 << 10),
|
||||
GPU_PARTICLE_VELOCITY = (1 << 11),
|
||||
GPU_PARTICLE_ANG_VELOCITY = (1 << 12),
|
||||
GPU_LOC_TO_VIEW_MATRIX = (1 << 13),
|
||||
@@ -268,8 +268,9 @@ GPUMaterialStatus GPU_material_status(GPUMaterial *mat);
|
||||
struct GPUUniformBuffer *GPU_material_uniform_buffer_get(GPUMaterial *material);
|
||||
void GPU_material_uniform_buffer_create(GPUMaterial *material, ListBase *inputs);
|
||||
|
||||
void GPU_material_vertex_attributes(GPUMaterial *material,
|
||||
struct GPUVertexAttribs *attrib);
|
||||
void GPU_material_vertex_attributes(
|
||||
GPUMaterial *material,
|
||||
struct GPUVertexAttribs *attrib);
|
||||
|
||||
bool GPU_material_do_color_management(GPUMaterial *mat);
|
||||
bool GPU_material_use_domain_surface(GPUMaterial *mat);
|
||||
|
||||
@@ -89,10 +89,12 @@ void *GPU_shader_get_interface(GPUShader *shader);
|
||||
int GPU_shader_get_uniform(GPUShader *shader, const char *name);
|
||||
int GPU_shader_get_builtin_uniform(GPUShader *shader, int builtin);
|
||||
int GPU_shader_get_uniform_block(GPUShader *shader, const char *name);
|
||||
void GPU_shader_uniform_vector(GPUShader *shader, int location, int length,
|
||||
int arraysize, const float *value);
|
||||
void GPU_shader_uniform_vector_int(GPUShader *shader, int location, int length,
|
||||
int arraysize, const int *value);
|
||||
void GPU_shader_uniform_vector(
|
||||
GPUShader *shader, int location, int length,
|
||||
int arraysize, const float *value);
|
||||
void GPU_shader_uniform_vector_int(
|
||||
GPUShader *shader, int location, int length,
|
||||
int arraysize, const int *value);
|
||||
|
||||
void GPU_shader_uniform_buffer(GPUShader *shader, int location, struct GPUUniformBuffer *ubo);
|
||||
void GPU_shader_uniform_texture(GPUShader *shader, int location, struct GPUTexture *tex);
|
||||
@@ -375,7 +377,7 @@ typedef struct GPUVertexAttribs {
|
||||
int glinfoindoex;
|
||||
int gltexco;
|
||||
int attribid;
|
||||
char name[64]; /* MAX_CUSTOMDATA_LAYER_NAME */
|
||||
char name[64]; /* MAX_CUSTOMDATA_LAYER_NAME */
|
||||
} layer[GPU_MAX_ATTRIB];
|
||||
|
||||
int totlayer;
|
||||
|
||||
@@ -65,7 +65,7 @@ typedef enum {
|
||||
} GPUUniformBuiltin;
|
||||
|
||||
typedef struct GPUShaderInput {
|
||||
struct GPUShaderInput* next;
|
||||
struct GPUShaderInput *next;
|
||||
uint32_t name_offset;
|
||||
uint name_hash;
|
||||
GPUUniformBuiltin builtin_type; /* only for uniform inputs */
|
||||
@@ -80,25 +80,25 @@ typedef struct GPUShaderInput {
|
||||
typedef struct GPUShaderInterface {
|
||||
int32_t program;
|
||||
uint32_t name_buffer_offset;
|
||||
GPUShaderInput* attrib_buckets[GPU_NUM_SHADERINTERFACE_BUCKETS];
|
||||
GPUShaderInput* uniform_buckets[GPU_NUM_SHADERINTERFACE_BUCKETS];
|
||||
GPUShaderInput* ubo_buckets[GPU_NUM_SHADERINTERFACE_BUCKETS];
|
||||
GPUShaderInput* builtin_uniforms[GPU_NUM_UNIFORMS];
|
||||
char* name_buffer;
|
||||
struct GPUBatch** batches; /* references to batches using this interface */
|
||||
GPUShaderInput *attrib_buckets[GPU_NUM_SHADERINTERFACE_BUCKETS];
|
||||
GPUShaderInput *uniform_buckets[GPU_NUM_SHADERINTERFACE_BUCKETS];
|
||||
GPUShaderInput *ubo_buckets[GPU_NUM_SHADERINTERFACE_BUCKETS];
|
||||
GPUShaderInput *builtin_uniforms[GPU_NUM_UNIFORMS];
|
||||
char *name_buffer;
|
||||
struct GPUBatch **batches; /* references to batches using this interface */
|
||||
uint batches_len;
|
||||
} GPUShaderInterface;
|
||||
|
||||
GPUShaderInterface* GPU_shaderinterface_create(int32_t program_id);
|
||||
void GPU_shaderinterface_discard(GPUShaderInterface*);
|
||||
GPUShaderInterface *GPU_shaderinterface_create(int32_t program_id);
|
||||
void GPU_shaderinterface_discard(GPUShaderInterface *);
|
||||
|
||||
const GPUShaderInput* GPU_shaderinterface_uniform(const GPUShaderInterface*, const char* name);
|
||||
const GPUShaderInput* GPU_shaderinterface_uniform_builtin(const GPUShaderInterface*, GPUUniformBuiltin);
|
||||
const GPUShaderInput* GPU_shaderinterface_ubo(const GPUShaderInterface*, const char* name);
|
||||
const GPUShaderInput* GPU_shaderinterface_attr(const GPUShaderInterface*, const char* name);
|
||||
const GPUShaderInput *GPU_shaderinterface_uniform(const GPUShaderInterface *, const char *name);
|
||||
const GPUShaderInput *GPU_shaderinterface_uniform_builtin(const GPUShaderInterface *, GPUUniformBuiltin);
|
||||
const GPUShaderInput *GPU_shaderinterface_ubo(const GPUShaderInterface *, const char *name);
|
||||
const GPUShaderInput *GPU_shaderinterface_attr(const GPUShaderInterface *, const char *name);
|
||||
|
||||
/* keep track of batches using this interface */
|
||||
void GPU_shaderinterface_add_batch_ref(GPUShaderInterface*, struct GPUBatch*);
|
||||
void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface*, struct GPUBatch*);
|
||||
void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *, struct GPUBatch *);
|
||||
void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface *, struct GPUBatch *);
|
||||
|
||||
#endif /* __GPU_SHADER_INTERFACE_H__ */
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
#ifndef __GPU_STATE_H__
|
||||
#define __GPU_STATE_H__
|
||||
|
||||
/* These map directly to the GL_ blend functions, to minimize API add as needed*/
|
||||
/* These map directly to the GL_ blend functions, to minimize API add as needed*/
|
||||
typedef enum GPUBlendFunction {
|
||||
GPU_ONE,
|
||||
GPU_SRC_ALPHA,
|
||||
|
||||
@@ -46,7 +46,7 @@ extern "C" {
|
||||
|
||||
GLuint GPU_vao_default(void);
|
||||
GLuint GPU_vao_alloc(void);
|
||||
void GPU_vao_free(GLuint vao_id, GPUContext*);
|
||||
void GPU_vao_free(GLuint vao_id, GPUContext *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
@@ -55,54 +55,54 @@ typedef struct GPUVertBuf {
|
||||
uint vertex_len; /* number of verts we want to draw */
|
||||
uint vertex_alloc; /* number of verts data */
|
||||
bool dirty;
|
||||
unsigned char* data; /* NULL indicates data in VRAM (unmapped) */
|
||||
unsigned char *data; /* NULL indicates data in VRAM (unmapped) */
|
||||
uint32_t vbo_id; /* 0 indicates not yet allocated */
|
||||
GPUUsageType usage; /* usage hint for GL optimisation */
|
||||
} GPUVertBuf;
|
||||
|
||||
GPUVertBuf* GPU_vertbuf_create(GPUUsageType);
|
||||
GPUVertBuf* GPU_vertbuf_create_with_format_ex(const GPUVertFormat*, GPUUsageType);
|
||||
GPUVertBuf *GPU_vertbuf_create(GPUUsageType);
|
||||
GPUVertBuf *GPU_vertbuf_create_with_format_ex(const GPUVertFormat *, GPUUsageType);
|
||||
|
||||
#define GPU_vertbuf_create_with_format(format) \
|
||||
GPU_vertbuf_create_with_format_ex(format, GPU_USAGE_STATIC)
|
||||
|
||||
void GPU_vertbuf_discard(GPUVertBuf*);
|
||||
void GPU_vertbuf_discard(GPUVertBuf *);
|
||||
|
||||
void GPU_vertbuf_init(GPUVertBuf*, GPUUsageType);
|
||||
void GPU_vertbuf_init_with_format_ex(GPUVertBuf*, const GPUVertFormat*, GPUUsageType);
|
||||
void GPU_vertbuf_init(GPUVertBuf *, GPUUsageType);
|
||||
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *, const GPUVertFormat *, GPUUsageType);
|
||||
|
||||
#define GPU_vertbuf_init_with_format(verts, format) \
|
||||
GPU_vertbuf_init_with_format_ex(verts, format, GPU_USAGE_STATIC)
|
||||
|
||||
uint GPU_vertbuf_size_get(const GPUVertBuf*);
|
||||
void GPU_vertbuf_data_alloc(GPUVertBuf*, uint v_len);
|
||||
void GPU_vertbuf_data_resize(GPUVertBuf*, uint v_len);
|
||||
void GPU_vertbuf_vertex_count_set(GPUVertBuf*, uint v_len);
|
||||
uint GPU_vertbuf_size_get(const GPUVertBuf *);
|
||||
void GPU_vertbuf_data_alloc(GPUVertBuf *, uint v_len);
|
||||
void GPU_vertbuf_data_resize(GPUVertBuf *, uint v_len);
|
||||
void GPU_vertbuf_vertex_count_set(GPUVertBuf *, uint v_len);
|
||||
|
||||
/* The most important set_attrib variant is the untyped one. Get it right first. */
|
||||
/* It takes a void* so the app developer is responsible for matching their app data types */
|
||||
/* to the vertex attribute's type and component count. They're in control of both, so this */
|
||||
/* should not be a problem. */
|
||||
|
||||
void GPU_vertbuf_attr_set(GPUVertBuf*, uint a_idx, uint v_idx, const void* data);
|
||||
void GPU_vertbuf_attr_fill(GPUVertBuf*, uint a_idx, const void* data); /* tightly packed, non interleaved input data */
|
||||
void GPU_vertbuf_attr_fill_stride(GPUVertBuf*, uint a_idx, uint stride, const void* data);
|
||||
void GPU_vertbuf_attr_set(GPUVertBuf *, uint a_idx, uint v_idx, const void *data);
|
||||
void GPU_vertbuf_attr_fill(GPUVertBuf *, uint a_idx, const void *data); /* tightly packed, non interleaved input data */
|
||||
void GPU_vertbuf_attr_fill_stride(GPUVertBuf *, uint a_idx, uint stride, const void *data);
|
||||
|
||||
/* For low level access only */
|
||||
typedef struct GPUVertBufRaw {
|
||||
uint size;
|
||||
uint stride;
|
||||
unsigned char* data;
|
||||
unsigned char* data_init;
|
||||
unsigned char *data;
|
||||
unsigned char *data_init;
|
||||
#if TRUST_NO_ONE
|
||||
/* Only for overflow check */
|
||||
unsigned char* _data_end;
|
||||
unsigned char *_data_end;
|
||||
#endif
|
||||
} GPUVertBufRaw;
|
||||
|
||||
GPU_INLINE void *GPU_vertbuf_raw_step(GPUVertBufRaw *a)
|
||||
{
|
||||
unsigned char* data = a->data;
|
||||
unsigned char *data = a->data;
|
||||
a->data += a->stride;
|
||||
#if TRUST_NO_ONE
|
||||
assert(data < a->_data_end);
|
||||
@@ -115,7 +115,7 @@ GPU_INLINE uint GPU_vertbuf_raw_used(GPUVertBufRaw *a)
|
||||
return ((a->data - a->data_init) / a->stride);
|
||||
}
|
||||
|
||||
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf*, uint a_idx, GPUVertBufRaw *access);
|
||||
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *, uint a_idx, GPUVertBufRaw *access);
|
||||
|
||||
/* TODO: decide whether to keep the functions below */
|
||||
/* doesn't immediate mode satisfy these needs? */
|
||||
@@ -128,7 +128,7 @@ void GPU_vertbuf_attr_get_raw_data(GPUVertBuf*, uint a_idx, GPUVertBufRaw *acces
|
||||
/* void setAttrib3ub(unsigned a_idx, unsigned v_idx, unsigned char r, unsigned char g, unsigned char b); */
|
||||
/* void setAttrib4ub(unsigned a_idx, unsigned v_idx, unsigned char r, unsigned char g, unsigned char b, unsigned char a); */
|
||||
|
||||
void GPU_vertbuf_use(GPUVertBuf*);
|
||||
void GPU_vertbuf_use(GPUVertBuf *);
|
||||
|
||||
/* Metrics */
|
||||
uint GPU_vertbuf_get_memory_usage(void);
|
||||
|
||||
@@ -67,7 +67,7 @@ typedef struct GPUVertAttr {
|
||||
uint sz; /* size in bytes, 1 to 64 */
|
||||
uint offset; /* from beginning of vertex, in bytes */
|
||||
uint name_len; /* up to GPU_VERT_ATTR_MAX_NAMES */
|
||||
const char* name[GPU_VERT_ATTR_MAX_NAMES];
|
||||
const char *name[GPU_VERT_ATTR_MAX_NAMES];
|
||||
} GPUVertAttr;
|
||||
|
||||
typedef struct GPUVertFormat {
|
||||
@@ -80,11 +80,13 @@ typedef struct GPUVertFormat {
|
||||
GPUVertAttr attribs[GPU_VERT_ATTR_MAX_LEN]; /* TODO: variable-size attribs array */
|
||||
} GPUVertFormat;
|
||||
|
||||
void GPU_vertformat_clear(GPUVertFormat*);
|
||||
void GPU_vertformat_copy(GPUVertFormat* dest, const GPUVertFormat* src);
|
||||
void GPU_vertformat_clear(GPUVertFormat *);
|
||||
void GPU_vertformat_copy(GPUVertFormat *dest, const GPUVertFormat *src);
|
||||
|
||||
uint GPU_vertformat_attr_add(GPUVertFormat*, const char* name, GPUVertCompType, uint comp_len, GPUVertFetchMode);
|
||||
void GPU_vertformat_alias_add(GPUVertFormat*, const char* alias);
|
||||
uint GPU_vertformat_attr_add(
|
||||
GPUVertFormat *, const char *name,
|
||||
GPUVertCompType, uint comp_len, GPUVertFetchMode);
|
||||
void GPU_vertformat_alias_add(GPUVertFormat *, const char *alias);
|
||||
|
||||
/* format conversion */
|
||||
|
||||
@@ -92,7 +94,7 @@ typedef struct GPUPackedNormal {
|
||||
int x : 10;
|
||||
int y : 10;
|
||||
int z : 10;
|
||||
int w : 2; /* 0 by default, can manually set to { -2, -1, 0, 1 } */
|
||||
int w : 2; /* 0 by default, can manually set to { -2, -1, 0, 1 } */
|
||||
} GPUPackedNormal;
|
||||
|
||||
GPUPackedNormal GPU_normal_convert_i10_v3(const float data[3]);
|
||||
|
||||
@@ -38,13 +38,13 @@
|
||||
#error "attrib binding code assumes GPU_VERT_ATTR_MAX_LEN = 16"
|
||||
#endif
|
||||
|
||||
void AttribBinding_clear(GPUAttrBinding* binding)
|
||||
void AttribBinding_clear(GPUAttrBinding *binding)
|
||||
{
|
||||
binding->loc_bits = 0;
|
||||
binding->enabled_bits = 0;
|
||||
}
|
||||
|
||||
uint read_attrib_location(const GPUAttrBinding* binding, uint a_idx)
|
||||
uint read_attrib_location(const GPUAttrBinding *binding, uint a_idx)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(a_idx < GPU_VERT_ATTR_MAX_LEN);
|
||||
@@ -53,7 +53,7 @@ uint read_attrib_location(const GPUAttrBinding* binding, uint a_idx)
|
||||
return (binding->loc_bits >> (4 * a_idx)) & 0xF;
|
||||
}
|
||||
|
||||
static void write_attrib_location(GPUAttrBinding* binding, uint a_idx, uint location)
|
||||
static void write_attrib_location(GPUAttrBinding *binding, uint a_idx, uint location)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(a_idx < GPU_VERT_ATTR_MAX_LEN);
|
||||
@@ -67,14 +67,14 @@ static void write_attrib_location(GPUAttrBinding* binding, uint a_idx, uint loca
|
||||
binding->enabled_bits |= 1 << a_idx;
|
||||
}
|
||||
|
||||
void get_attrib_locations(const GPUVertFormat* format, GPUAttrBinding* binding, const GPUShaderInterface* shaderface)
|
||||
void get_attrib_locations(const GPUVertFormat *format, GPUAttrBinding *binding, const GPUShaderInterface *shaderface)
|
||||
{
|
||||
AttribBinding_clear(binding);
|
||||
|
||||
for (uint a_idx = 0; a_idx < format->attr_len; ++a_idx) {
|
||||
const GPUVertAttr* a = format->attribs + a_idx;
|
||||
const GPUVertAttr *a = format->attribs + a_idx;
|
||||
for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
|
||||
const GPUShaderInput* input = GPU_shaderinterface_attr(shaderface, a->name[n_idx]);
|
||||
const GPUShaderInput *input = GPU_shaderinterface_attr(shaderface, a->name[n_idx]);
|
||||
#if TRUST_NO_ONE
|
||||
assert(input != NULL);
|
||||
/* TODO: make this a recoverable runtime error? indicates mismatch between vertex format and program */
|
||||
|
||||
@@ -82,7 +82,8 @@ const GLubyte stipple_halftone[128] = {
|
||||
0xAA, 0xAA, 0xAA, 0xAA, 0x55, 0x55, 0x55, 0x55,
|
||||
0xAA, 0xAA, 0xAA, 0xAA, 0x55, 0x55, 0x55, 0x55,
|
||||
0xAA, 0xAA, 0xAA, 0xAA, 0x55, 0x55, 0x55, 0x55,
|
||||
0xAA, 0xAA, 0xAA, 0xAA, 0x55, 0x55, 0x55, 0x55};
|
||||
0xAA, 0xAA, 0xAA, 0xAA, 0x55, 0x55, 0x55, 0x55,
|
||||
};
|
||||
|
||||
const GLubyte stipple_quarttone[128] = {
|
||||
136, 136, 136, 136, 0, 0, 0, 0, 34, 34, 34, 34, 0, 0, 0, 0,
|
||||
@@ -92,7 +93,8 @@ const GLubyte stipple_quarttone[128] = {
|
||||
136, 136, 136, 136, 0, 0, 0, 0, 34, 34, 34, 34, 0, 0, 0, 0,
|
||||
136, 136, 136, 136, 0, 0, 0, 0, 34, 34, 34, 34, 0, 0, 0, 0,
|
||||
136, 136, 136, 136, 0, 0, 0, 0, 34, 34, 34, 34, 0, 0, 0, 0,
|
||||
136, 136, 136, 136, 0, 0, 0, 0, 34, 34, 34, 34, 0, 0, 0, 0};
|
||||
136, 136, 136, 136, 0, 0, 0, 0, 34, 34, 34, 34, 0, 0, 0, 0,
|
||||
};
|
||||
|
||||
const GLubyte stipple_diag_stripes_pos[128] = {
|
||||
0x00, 0xff, 0x00, 0xff, 0x01, 0xfe, 0x01, 0xfe,
|
||||
@@ -110,7 +112,8 @@ const GLubyte stipple_diag_stripes_pos[128] = {
|
||||
0xff, 0x00, 0xff, 0x00, 0xfe, 0x01, 0xfe, 0x01,
|
||||
0xfc, 0x03, 0xfc, 0x03, 0xf8, 0x07, 0xf8, 0x07,
|
||||
0xf0, 0x0f, 0xf0, 0x0f, 0xe0, 0x1f, 0xe0, 0x1f,
|
||||
0xc0, 0x3f, 0xc0, 0x3f, 0x80, 0x7f, 0x80, 0x7f};
|
||||
0xc0, 0x3f, 0xc0, 0x3f, 0x80, 0x7f, 0x80, 0x7f,
|
||||
};
|
||||
|
||||
const GLubyte stipple_diag_stripes_neg[128] = {
|
||||
0xff, 0x00, 0xff, 0x00, 0xfe, 0x01, 0xfe, 0x01,
|
||||
@@ -128,7 +131,8 @@ const GLubyte stipple_diag_stripes_neg[128] = {
|
||||
0x00, 0xff, 0x00, 0xff, 0x01, 0xfe, 0x01, 0xfe,
|
||||
0x03, 0xfc, 0x03, 0xfc, 0x07, 0xf8, 0x07, 0xf8,
|
||||
0x0f, 0xf0, 0x0f, 0xf0, 0x1f, 0xe0, 0x1f, 0xe0,
|
||||
0x3f, 0xc0, 0x3f, 0xc0, 0x7f, 0x80, 0x7f, 0x80};
|
||||
0x3f, 0xc0, 0x3f, 0xc0, 0x7f, 0x80, 0x7f, 0x80,
|
||||
};
|
||||
|
||||
const GLubyte stipple_checker_8px[128] = {
|
||||
255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0,
|
||||
@@ -138,7 +142,8 @@ const GLubyte stipple_checker_8px[128] = {
|
||||
255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0,
|
||||
255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0,
|
||||
0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255,
|
||||
0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255};
|
||||
0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255, 0, 255,
|
||||
};
|
||||
|
||||
const GLubyte stipple_hexagon[128] = {
|
||||
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
|
||||
@@ -156,7 +161,8 @@ const GLubyte stipple_hexagon[128] = {
|
||||
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
|
||||
0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
|
||||
0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88, 0x88,
|
||||
0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22};
|
||||
0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22, 0x22,
|
||||
};
|
||||
/* ********************************************* */
|
||||
|
||||
/* Init / exit */
|
||||
|
||||
@@ -44,9 +44,9 @@
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
static void batch_update_program_bindings(GPUBatch* batch, uint v_first);
|
||||
static void batch_update_program_bindings(GPUBatch *batch, uint v_first);
|
||||
|
||||
void GPU_batch_vao_cache_clear(GPUBatch* batch)
|
||||
void GPU_batch_vao_cache_clear(GPUBatch *batch)
|
||||
{
|
||||
if (batch->context == NULL) {
|
||||
return;
|
||||
@@ -82,17 +82,17 @@ void GPU_batch_vao_cache_clear(GPUBatch* batch)
|
||||
batch->context = NULL;
|
||||
}
|
||||
|
||||
GPUBatch* GPU_batch_create_ex(
|
||||
GPUPrimType prim_type, GPUVertBuf* verts, GPUIndexBuf* elem,
|
||||
GPUBatch *GPU_batch_create_ex(
|
||||
GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem,
|
||||
uint owns_flag)
|
||||
{
|
||||
GPUBatch* batch = calloc(1, sizeof(GPUBatch));
|
||||
GPUBatch *batch = calloc(1, sizeof(GPUBatch));
|
||||
GPU_batch_init_ex(batch, prim_type, verts, elem, owns_flag);
|
||||
return batch;
|
||||
}
|
||||
|
||||
void GPU_batch_init_ex(
|
||||
GPUBatch* batch, GPUPrimType prim_type, GPUVertBuf* verts, GPUIndexBuf* elem,
|
||||
GPUBatch *batch, GPUPrimType prim_type, GPUVertBuf *verts, GPUIndexBuf *elem,
|
||||
uint owns_flag)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
@@ -113,9 +113,9 @@ void GPU_batch_init_ex(
|
||||
}
|
||||
|
||||
/* This will share the VBOs with the new batch. */
|
||||
GPUBatch* GPU_batch_duplicate(GPUBatch* batch_src)
|
||||
GPUBatch *GPU_batch_duplicate(GPUBatch *batch_src)
|
||||
{
|
||||
GPUBatch* batch = GPU_batch_create_ex(GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
|
||||
GPUBatch *batch = GPU_batch_create_ex(GPU_PRIM_POINTS, batch_src->verts[0], batch_src->elem, 0);
|
||||
|
||||
batch->gl_prim_type = batch_src->gl_prim_type;
|
||||
for (int v = 1; v < GPU_BATCH_VBO_MAX_LEN; ++v) {
|
||||
@@ -124,7 +124,7 @@ GPUBatch* GPU_batch_duplicate(GPUBatch* batch_src)
|
||||
return batch;
|
||||
}
|
||||
|
||||
void GPU_batch_discard(GPUBatch* batch)
|
||||
void GPU_batch_discard(GPUBatch *batch)
|
||||
{
|
||||
if (batch->owns_flag & GPU_BATCH_OWNS_INDEX) {
|
||||
GPU_indexbuf_discard(batch->elem);
|
||||
@@ -150,13 +150,13 @@ void GPU_batch_discard(GPUBatch* batch)
|
||||
free(batch);
|
||||
}
|
||||
|
||||
void GPU_batch_callback_free_set(GPUBatch* batch, void (*callback)(GPUBatch*, void*), void* user_data)
|
||||
void GPU_batch_callback_free_set(GPUBatch *batch, void (*callback)(GPUBatch *, void *), void *user_data)
|
||||
{
|
||||
batch->free_callback = callback;
|
||||
batch->callback_data = user_data;
|
||||
}
|
||||
|
||||
void GPU_batch_instbuf_set(GPUBatch* batch, GPUVertBuf* inst, bool own_vbo)
|
||||
void GPU_batch_instbuf_set(GPUBatch *batch, GPUVertBuf *inst, bool own_vbo)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(inst != NULL);
|
||||
@@ -179,7 +179,7 @@ void GPU_batch_instbuf_set(GPUBatch* batch, GPUVertBuf* inst, bool own_vbo)
|
||||
|
||||
/* Returns the index of verts in the batch. */
|
||||
int GPU_batch_vertbuf_add_ex(
|
||||
GPUBatch* batch, GPUVertBuf* verts,
|
||||
GPUBatch *batch, GPUVertBuf *verts,
|
||||
bool own_vbo)
|
||||
{
|
||||
/* redo the bindings */
|
||||
@@ -238,7 +238,7 @@ static GLuint batch_vao_get(GPUBatch *batch)
|
||||
GLuint new_vao = 0;
|
||||
if (!batch->is_dynamic_vao_count) {
|
||||
int i; /* find first unused slot */
|
||||
for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
|
||||
for (i = 0; i < GPU_BATCH_VAO_STATIC_LEN; ++i)
|
||||
if (batch->static_vaos.vao_ids[i] == 0)
|
||||
break;
|
||||
|
||||
@@ -251,12 +251,12 @@ static GLuint batch_vao_get(GPUBatch *batch)
|
||||
batch->is_dynamic_vao_count = true;
|
||||
/* Erase previous entries, they will be added back if drawn again. */
|
||||
for (int j = 0; j < GPU_BATCH_VAO_STATIC_LEN; ++j) {
|
||||
GPU_shaderinterface_remove_batch_ref((GPUShaderInterface*)batch->static_vaos.interfaces[j], batch);
|
||||
GPU_shaderinterface_remove_batch_ref((GPUShaderInterface *)batch->static_vaos.interfaces[j], batch);
|
||||
GPU_vao_free(batch->static_vaos.vao_ids[j], batch->context);
|
||||
}
|
||||
/* Init dynamic arrays and let the branch below set the values. */
|
||||
batch->dynamic_vaos.count = GPU_BATCH_VAO_DYN_ALLOC_COUNT;
|
||||
batch->dynamic_vaos.interfaces = calloc(batch->dynamic_vaos.count, sizeof(GPUShaderInterface*));
|
||||
batch->dynamic_vaos.interfaces = calloc(batch->dynamic_vaos.count, sizeof(GPUShaderInterface *));
|
||||
batch->dynamic_vaos.vao_ids = calloc(batch->dynamic_vaos.count, sizeof(GLuint));
|
||||
}
|
||||
}
|
||||
@@ -271,16 +271,16 @@ static GLuint batch_vao_get(GPUBatch *batch)
|
||||
/* Not enough place, realloc the array. */
|
||||
i = batch->dynamic_vaos.count;
|
||||
batch->dynamic_vaos.count += GPU_BATCH_VAO_DYN_ALLOC_COUNT;
|
||||
batch->dynamic_vaos.interfaces = realloc(batch->dynamic_vaos.interfaces, sizeof(GPUShaderInterface*) * batch->dynamic_vaos.count);
|
||||
batch->dynamic_vaos.interfaces = realloc(batch->dynamic_vaos.interfaces, sizeof(GPUShaderInterface *) * batch->dynamic_vaos.count);
|
||||
batch->dynamic_vaos.vao_ids = realloc(batch->dynamic_vaos.vao_ids, sizeof(GLuint) * batch->dynamic_vaos.count);
|
||||
memset(batch->dynamic_vaos.interfaces + i, 0, sizeof(GPUShaderInterface*) * GPU_BATCH_VAO_DYN_ALLOC_COUNT);
|
||||
memset(batch->dynamic_vaos.interfaces + i, 0, sizeof(GPUShaderInterface *) * GPU_BATCH_VAO_DYN_ALLOC_COUNT);
|
||||
memset(batch->dynamic_vaos.vao_ids + i, 0, sizeof(GLuint) * GPU_BATCH_VAO_DYN_ALLOC_COUNT);
|
||||
}
|
||||
batch->dynamic_vaos.interfaces[i] = batch->interface;
|
||||
batch->dynamic_vaos.vao_ids[i] = new_vao = GPU_vao_alloc();
|
||||
}
|
||||
|
||||
GPU_shaderinterface_add_batch_ref((GPUShaderInterface*)batch->interface, batch);
|
||||
GPU_shaderinterface_add_batch_ref((GPUShaderInterface *)batch->interface, batch);
|
||||
|
||||
#if TRUST_NO_ONE
|
||||
assert(new_vao != 0);
|
||||
@@ -294,7 +294,7 @@ static GLuint batch_vao_get(GPUBatch *batch)
|
||||
return new_vao;
|
||||
}
|
||||
|
||||
void GPU_batch_program_set_no_use(GPUBatch* batch, uint32_t program, const GPUShaderInterface* shaderface)
|
||||
void GPU_batch_program_set_no_use(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(glIsProgram(shaderface->program));
|
||||
@@ -305,13 +305,13 @@ void GPU_batch_program_set_no_use(GPUBatch* batch, uint32_t program, const GPUSh
|
||||
batch->vao_id = batch_vao_get(batch);
|
||||
}
|
||||
|
||||
void GPU_batch_program_set(GPUBatch* batch, uint32_t program, const GPUShaderInterface* shaderface)
|
||||
void GPU_batch_program_set(GPUBatch *batch, uint32_t program, const GPUShaderInterface *shaderface)
|
||||
{
|
||||
GPU_batch_program_set_no_use(batch, program, shaderface);
|
||||
GPU_batch_program_use_begin(batch); /* hack! to make Batch_Uniform* simpler */
|
||||
}
|
||||
|
||||
void gpu_batch_remove_interface_ref(GPUBatch* batch, const GPUShaderInterface* interface)
|
||||
void gpu_batch_remove_interface_ref(GPUBatch *batch, const GPUShaderInterface *interface)
|
||||
{
|
||||
if (batch->is_dynamic_vao_count) {
|
||||
for (int i = 0; i < batch->dynamic_vaos.count; ++i) {
|
||||
@@ -337,10 +337,10 @@ void gpu_batch_remove_interface_ref(GPUBatch* batch, const GPUShaderInterface* i
|
||||
}
|
||||
|
||||
static void create_bindings(
|
||||
GPUVertBuf* verts, const GPUShaderInterface* interface,
|
||||
uint v_first, const bool use_instancing)
|
||||
GPUVertBuf *verts, const GPUShaderInterface *interface,
|
||||
uint v_first, const bool use_instancing)
|
||||
{
|
||||
const GPUVertFormat* format = &verts->format;
|
||||
const GPUVertFormat *format = &verts->format;
|
||||
|
||||
const uint attr_len = format->attr_len;
|
||||
const uint stride = format->stride;
|
||||
@@ -348,11 +348,11 @@ static void create_bindings(
|
||||
GPU_vertbuf_use(verts);
|
||||
|
||||
for (uint a_idx = 0; a_idx < attr_len; ++a_idx) {
|
||||
const GPUVertAttr* a = format->attribs + a_idx;
|
||||
const GLvoid* pointer = (const GLubyte*)0 + a->offset + v_first * stride;
|
||||
const GPUVertAttr *a = format->attribs + a_idx;
|
||||
const GLvoid *pointer = (const GLubyte *)0 + a->offset + v_first * stride;
|
||||
|
||||
for (uint n_idx = 0; n_idx < a->name_len; ++n_idx) {
|
||||
const GPUShaderInput* input = GPU_shaderinterface_attr(interface, a->name[n_idx]);
|
||||
const GPUShaderInput *input = GPU_shaderinterface_attr(interface, a->name[n_idx]);
|
||||
|
||||
if (input == NULL) continue;
|
||||
|
||||
@@ -365,11 +365,10 @@ static void create_bindings(
|
||||
glEnableVertexAttribArray(input->location + i);
|
||||
glVertexAttribDivisor(input->location + i, (use_instancing) ? 1 : 0);
|
||||
glVertexAttribPointer(input->location + i, 4, a->gl_comp_type, GL_FALSE, stride,
|
||||
(const GLubyte*)pointer + i * 16);
|
||||
(const GLubyte *)pointer + i * 16);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
else {
|
||||
glEnableVertexAttribArray(input->location);
|
||||
glVertexAttribDivisor(input->location, (use_instancing) ? 1 : 0);
|
||||
|
||||
@@ -390,7 +389,7 @@ static void create_bindings(
|
||||
}
|
||||
}
|
||||
|
||||
static void batch_update_program_bindings(GPUBatch* batch, uint v_first)
|
||||
static void batch_update_program_bindings(GPUBatch *batch, uint v_first)
|
||||
{
|
||||
for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN && batch->verts[v] != NULL; ++v) {
|
||||
create_bindings(batch->verts[v], batch->interface, (batch->inst) ? 0 : v_first, false);
|
||||
@@ -403,7 +402,7 @@ static void batch_update_program_bindings(GPUBatch* batch, uint v_first)
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_batch_program_use_begin(GPUBatch* batch)
|
||||
void GPU_batch_program_use_begin(GPUBatch *batch)
|
||||
{
|
||||
/* NOTE: use_program & done_using_program are fragile, depend on staying in sync with
|
||||
* the GL context's active program. use_program doesn't mark other programs as "not used". */
|
||||
@@ -415,7 +414,7 @@ void GPU_batch_program_use_begin(GPUBatch* batch)
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_batch_program_use_end(GPUBatch* batch)
|
||||
void GPU_batch_program_use_end(GPUBatch *batch)
|
||||
{
|
||||
if (batch->program_in_use) {
|
||||
#if PROGRAM_NO_OPTI
|
||||
@@ -426,84 +425,84 @@ void GPU_batch_program_use_end(GPUBatch* batch)
|
||||
}
|
||||
|
||||
#if TRUST_NO_ONE
|
||||
#define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(batch->interface, name); assert(uniform);
|
||||
# define GET_UNIFORM const GPUShaderInput *uniform = GPU_shaderinterface_uniform(batch->interface, name); assert(uniform);
|
||||
#else
|
||||
#define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(batch->interface, name);
|
||||
# define GET_UNIFORM const GPUShaderInput *uniform = GPU_shaderinterface_uniform(batch->interface, name);
|
||||
#endif
|
||||
|
||||
void GPU_batch_uniform_1ui(GPUBatch* batch, const char* name, int value)
|
||||
void GPU_batch_uniform_1ui(GPUBatch *batch, const char *name, int value)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform1ui(uniform->location, value);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_1i(GPUBatch* batch, const char* name, int value)
|
||||
void GPU_batch_uniform_1i(GPUBatch *batch, const char *name, int value)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform1i(uniform->location, value);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_1b(GPUBatch* batch, const char* name, bool value)
|
||||
void GPU_batch_uniform_1b(GPUBatch *batch, const char *name, bool value)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform1i(uniform->location, value ? GL_TRUE : GL_FALSE);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_2f(GPUBatch* batch, const char* name, float x, float y)
|
||||
void GPU_batch_uniform_2f(GPUBatch *batch, const char *name, float x, float y)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform2f(uniform->location, x, y);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_3f(GPUBatch* batch, const char* name, float x, float y, float z)
|
||||
void GPU_batch_uniform_3f(GPUBatch *batch, const char *name, float x, float y, float z)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform3f(uniform->location, x, y, z);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_4f(GPUBatch* batch, const char* name, float x, float y, float z, float w)
|
||||
void GPU_batch_uniform_4f(GPUBatch *batch, const char *name, float x, float y, float z, float w)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform4f(uniform->location, x, y, z, w);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_1f(GPUBatch* batch, const char* name, float x)
|
||||
void GPU_batch_uniform_1f(GPUBatch *batch, const char *name, float x)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform1f(uniform->location, x);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_2fv(GPUBatch* batch, const char* name, const float data[2])
|
||||
void GPU_batch_uniform_2fv(GPUBatch *batch, const char *name, const float data[2])
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform2fv(uniform->location, 1, data);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_3fv(GPUBatch* batch, const char* name, const float data[3])
|
||||
void GPU_batch_uniform_3fv(GPUBatch *batch, const char *name, const float data[3])
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform3fv(uniform->location, 1, data);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_4fv(GPUBatch* batch, const char* name, const float data[4])
|
||||
void GPU_batch_uniform_4fv(GPUBatch *batch, const char *name, const float data[4])
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform4fv(uniform->location, 1, data);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_2fv_array(GPUBatch* batch, const char* name, const int len, const float *data)
|
||||
void GPU_batch_uniform_2fv_array(GPUBatch *batch, const char *name, const int len, const float *data)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform2fv(uniform->location, len, data);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_4fv_array(GPUBatch* batch, const char* name, const int len, const float *data)
|
||||
void GPU_batch_uniform_4fv_array(GPUBatch *batch, const char *name, const int len, const float *data)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform4fv(uniform->location, len, data);
|
||||
}
|
||||
|
||||
void GPU_batch_uniform_mat4(GPUBatch* batch, const char* name, const float data[4][4])
|
||||
void GPU_batch_uniform_mat4(GPUBatch *batch, const char *name, const float data[4][4])
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (const float *)data);
|
||||
@@ -530,7 +529,7 @@ static void primitive_restart_disable(void)
|
||||
glDisable(GL_PRIMITIVE_RESTART);
|
||||
}
|
||||
|
||||
void GPU_batch_draw(GPUBatch* batch)
|
||||
void GPU_batch_draw(GPUBatch *batch)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(batch->phase == GPU_BATCH_READY_TO_DRAW);
|
||||
@@ -544,7 +543,7 @@ void GPU_batch_draw(GPUBatch* batch)
|
||||
GPU_batch_program_use_end(batch);
|
||||
}
|
||||
|
||||
void GPU_batch_draw_range_ex(GPUBatch* batch, int v_first, int v_count, bool force_instance)
|
||||
void GPU_batch_draw_range_ex(GPUBatch *batch, int v_first, int v_count, bool force_instance)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(!(force_instance && (batch->inst == NULL)) || v_count > 0); // we cannot infer length if force_instance
|
||||
@@ -567,7 +566,7 @@ void GPU_batch_draw_range_ex(GPUBatch* batch, int v_first, int v_count, bool for
|
||||
}
|
||||
|
||||
if (batch->elem) {
|
||||
const GPUIndexBuf* el = batch->elem;
|
||||
const GPUIndexBuf *el = batch->elem;
|
||||
|
||||
if (el->use_prim_restart) {
|
||||
primitive_restart_enable(el);
|
||||
@@ -597,7 +596,7 @@ void GPU_batch_draw_range_ex(GPUBatch* batch, int v_first, int v_count, bool for
|
||||
}
|
||||
|
||||
if (batch->elem) {
|
||||
const GPUIndexBuf* el = batch->elem;
|
||||
const GPUIndexBuf *el = batch->elem;
|
||||
|
||||
if (el->use_prim_restart) {
|
||||
primitive_restart_enable(el);
|
||||
@@ -605,13 +604,14 @@ void GPU_batch_draw_range_ex(GPUBatch* batch, int v_first, int v_count, bool for
|
||||
|
||||
#if GPU_TRACK_INDEX_RANGE
|
||||
if (el->base_index) {
|
||||
glDrawRangeElementsBaseVertex(batch->gl_prim_type,
|
||||
el->min_index,
|
||||
el->max_index,
|
||||
v_count,
|
||||
el->gl_index_type,
|
||||
0,
|
||||
el->base_index);
|
||||
glDrawRangeElementsBaseVertex(
|
||||
batch->gl_prim_type,
|
||||
el->min_index,
|
||||
el->max_index,
|
||||
v_count,
|
||||
el->gl_index_type,
|
||||
0,
|
||||
el->base_index);
|
||||
}
|
||||
else {
|
||||
glDrawRangeElements(batch->gl_prim_type, el->min_index, el->max_index, v_count, el->gl_index_type, 0);
|
||||
@@ -635,7 +635,7 @@ void GPU_batch_draw_range_ex(GPUBatch* batch, int v_first, int v_count, bool for
|
||||
|
||||
/* just draw some vertices and let shader place them where we want. */
|
||||
void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
|
||||
{
|
||||
{
|
||||
/* we cannot draw without vao ... annoying ... */
|
||||
glBindVertexArray(GPU_vao_default());
|
||||
|
||||
@@ -645,7 +645,7 @@ void GPU_draw_primitive(GPUPrimType prim_type, int v_count)
|
||||
/* Performance hog if you are drawing with the same vao multiple time.
|
||||
* Only activate for debugging.*/
|
||||
// glBindVertexArray(0);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
@@ -674,4 +674,4 @@ void gpu_batch_exit(void)
|
||||
gpu_batch_presets_exit();
|
||||
}
|
||||
|
||||
/** \} */
|
||||
/** \} */
|
||||
|
||||
@@ -522,42 +522,42 @@ void GPU_pbvh_grid_buffers_update(
|
||||
/* Build the element array buffer of grid indices using either
|
||||
* unsigned shorts or unsigned ints. */
|
||||
#define FILL_QUAD_BUFFER(max_vert_, tot_quad_, buffer_) \
|
||||
{ \
|
||||
int offset = 0; \
|
||||
int i, j, k; \
|
||||
{ \
|
||||
int offset = 0; \
|
||||
int i, j, k; \
|
||||
\
|
||||
GPUIndexBufBuilder elb; \
|
||||
GPU_indexbuf_init( \
|
||||
&elb, GPU_PRIM_TRIS, tot_quad_ * 2, max_vert_); \
|
||||
GPUIndexBufBuilder elb; \
|
||||
GPU_indexbuf_init( \
|
||||
&elb, GPU_PRIM_TRIS, tot_quad_ * 2, max_vert_); \
|
||||
\
|
||||
/* Fill the buffer */ \
|
||||
for (i = 0; i < totgrid; ++i) { \
|
||||
BLI_bitmap *gh = NULL; \
|
||||
if (grid_hidden) \
|
||||
gh = grid_hidden[(grid_indices)[i]]; \
|
||||
/* Fill the buffer */ \
|
||||
for (i = 0; i < totgrid; ++i) { \
|
||||
BLI_bitmap *gh = NULL; \
|
||||
if (grid_hidden) \
|
||||
gh = grid_hidden[(grid_indices)[i]]; \
|
||||
\
|
||||
for (j = 0; j < gridsize - 1; ++j) { \
|
||||
for (k = 0; k < gridsize - 1; ++k) { \
|
||||
/* Skip hidden grid face */ \
|
||||
if (gh && paint_is_grid_face_hidden( \
|
||||
gh, gridsize, k, j)) \
|
||||
{ \
|
||||
continue; \
|
||||
} \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k); \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
|
||||
\
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k + 1); \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
|
||||
} \
|
||||
} \
|
||||
for (j = 0; j < gridsize - 1; ++j) { \
|
||||
for (k = 0; k < gridsize - 1; ++k) { \
|
||||
/* Skip hidden grid face */ \
|
||||
if (gh && paint_is_grid_face_hidden( \
|
||||
gh, gridsize, k, j)) \
|
||||
{ \
|
||||
continue; \
|
||||
} \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k); \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
|
||||
\
|
||||
offset += gridsize * gridsize; \
|
||||
} \
|
||||
buffer_ = GPU_indexbuf_build(&elb); \
|
||||
} (void)0
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k + 1); \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + j * gridsize + k + 1); \
|
||||
GPU_indexbuf_add_generic_vert(&elb, offset + (j + 1) * gridsize + k); \
|
||||
} \
|
||||
} \
|
||||
\
|
||||
offset += gridsize * gridsize; \
|
||||
} \
|
||||
buffer_ = GPU_indexbuf_build(&elb); \
|
||||
} (void)0
|
||||
/* end FILL_QUAD_BUFFER */
|
||||
|
||||
static GPUIndexBuf *gpu_get_grid_buffer(
|
||||
@@ -912,7 +912,7 @@ void GPU_pbvh_bmesh_buffers_update(
|
||||
|
||||
BM_face_as_array_vert_tri(f, v);
|
||||
GPU_indexbuf_add_tri_verts(
|
||||
&elb, BM_elem_index_get(v[0]), BM_elem_index_get(v[1]), BM_elem_index_get(v[2]));
|
||||
&elb, BM_elem_index_get(v[0]), BM_elem_index_get(v[1]), BM_elem_index_get(v[2]));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -615,10 +615,11 @@ static int codegen_process_uniforms_functions(GPUMaterial *material, DynStr *ds,
|
||||
if ((input->source == GPU_SOURCE_TEX) || (input->source == GPU_SOURCE_TEX_PIXEL)) {
|
||||
/* create exactly one sampler for each texture */
|
||||
if (codegen_input_has_texture(input) && input->bindtex) {
|
||||
BLI_dynstr_appendf(ds, "uniform %s samp%d;\n",
|
||||
(input->textype == GPU_TEX2D) ? "sampler2D" :
|
||||
(input->textype == GPU_TEXCUBE) ? "samplerCube" : "sampler2DShadow",
|
||||
input->texid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "uniform %s samp%d;\n",
|
||||
(input->textype == GPU_TEX2D) ? "sampler2D" :
|
||||
(input->textype == GPU_TEXCUBE) ? "samplerCube" : "sampler2DShadow",
|
||||
input->texid);
|
||||
}
|
||||
}
|
||||
else if (input->source == GPU_SOURCE_BUILTIN) {
|
||||
@@ -635,13 +636,15 @@ static int codegen_process_uniforms_functions(GPUMaterial *material, DynStr *ds,
|
||||
}
|
||||
}
|
||||
else if (gpu_str_prefix(name, "unf")) {
|
||||
BLI_dynstr_appendf(ds, "uniform %s %s;\n",
|
||||
GPU_DATATYPE_STR[input->type], name);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "uniform %s %s;\n",
|
||||
GPU_DATATYPE_STR[input->type], name);
|
||||
}
|
||||
else {
|
||||
BLI_dynstr_appendf(ds, "%s %s %s;\n",
|
||||
GLEW_VERSION_3_0 ? "in" : "varying",
|
||||
GPU_DATATYPE_STR[input->type], name);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "%s %s %s;\n",
|
||||
GLEW_VERSION_3_0 ? "in" : "varying",
|
||||
GPU_DATATYPE_STR[input->type], name);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -658,12 +661,14 @@ static int codegen_process_uniforms_functions(GPUMaterial *material, DynStr *ds,
|
||||
}
|
||||
else if (input->dynamicvec) {
|
||||
/* only create uniforms for dynamic vectors */
|
||||
BLI_dynstr_appendf(ds, "uniform %s unf%d;\n",
|
||||
GPU_DATATYPE_STR[input->type], input->id);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "uniform %s unf%d;\n",
|
||||
GPU_DATATYPE_STR[input->type], input->id);
|
||||
}
|
||||
else {
|
||||
BLI_dynstr_appendf(ds, "const %s cons%d = ",
|
||||
GPU_DATATYPE_STR[input->type], input->id);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "const %s cons%d = ",
|
||||
GPU_DATATYPE_STR[input->type], input->id);
|
||||
codegen_print_datatype(ds, input->type, input->vec);
|
||||
BLI_dynstr_append(ds, ";\n");
|
||||
}
|
||||
@@ -675,9 +680,10 @@ static int codegen_process_uniforms_functions(GPUMaterial *material, DynStr *ds,
|
||||
BLI_dynstr_appendf(ds, "#ifndef USE_OPENSUBDIV\n");
|
||||
}
|
||||
#endif
|
||||
BLI_dynstr_appendf(ds, "%s %s var%d;\n",
|
||||
GLEW_VERSION_3_0 ? "in" : "varying",
|
||||
GPU_DATATYPE_STR[input->type], input->attribid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "%s %s var%d;\n",
|
||||
GLEW_VERSION_3_0 ? "in" : "varying",
|
||||
GPU_DATATYPE_STR[input->type], input->attribid);
|
||||
#ifdef WITH_OPENSUBDIV
|
||||
if (skip_opensubdiv) {
|
||||
BLI_dynstr_appendf(ds, "#endif\n");
|
||||
@@ -696,8 +702,9 @@ static int codegen_process_uniforms_functions(GPUMaterial *material, DynStr *ds,
|
||||
|
||||
for (LinkData *link = ubo_inputs.first; link; link = link->next) {
|
||||
input = link->data;
|
||||
BLI_dynstr_appendf(ds, "\t%s unf%d;\n",
|
||||
GPU_DATATYPE_STR[input->type], input->id);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\t%s unf%d;\n",
|
||||
GPU_DATATYPE_STR[input->type], input->id);
|
||||
}
|
||||
BLI_dynstr_append(ds, "};\n");
|
||||
BLI_freelistN(&ubo_inputs);
|
||||
@@ -719,9 +726,11 @@ static void codegen_declare_tmps(DynStr *ds, ListBase *nodes)
|
||||
for (input = node->inputs.first; input; input = input->next) {
|
||||
if (input->source == GPU_SOURCE_TEX_PIXEL) {
|
||||
if (codegen_input_has_texture(input) && input->definetex) {
|
||||
BLI_dynstr_appendf(ds, "\tvec4 tex%d = texture2D(", input->texid);
|
||||
BLI_dynstr_appendf(ds, "samp%d, gl_TexCoord[%d].st);\n",
|
||||
input->texid, input->texid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\tvec4 tex%d = texture2D(", input->texid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "samp%d, gl_TexCoord[%d].st);\n",
|
||||
input->texid, input->texid);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -729,11 +738,13 @@ static void codegen_declare_tmps(DynStr *ds, ListBase *nodes)
|
||||
/* declare temporary variables for node output storage */
|
||||
for (output = node->outputs.first; output; output = output->next) {
|
||||
if (output->type == GPU_CLOSURE) {
|
||||
BLI_dynstr_appendf(ds, "\tClosure tmp%d;\n", output->id);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\tClosure tmp%d;\n", output->id);
|
||||
}
|
||||
else {
|
||||
BLI_dynstr_appendf(ds, "\t%s tmp%d;\n",
|
||||
GPU_DATATYPE_STR[output->type], output->id);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\t%s tmp%d;\n",
|
||||
GPU_DATATYPE_STR[output->type], output->id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -757,8 +768,9 @@ static void codegen_call_functions(DynStr *ds, ListBase *nodes, GPUOutput *final
|
||||
BLI_dynstr_appendf(ds, ", gl_TexCoord[%d].st", input->texid);
|
||||
}
|
||||
else if (input->source == GPU_SOURCE_TEX_PIXEL) {
|
||||
codegen_convert_datatype(ds, input->link->output->type, input->type,
|
||||
"tmp", input->link->output->id);
|
||||
codegen_convert_datatype(
|
||||
ds, input->link->output->type, input->type,
|
||||
"tmp", input->link->output->id);
|
||||
}
|
||||
else if (input->source == GPU_SOURCE_BUILTIN) {
|
||||
if (input->builtin == GPU_INVERSE_VIEW_MATRIX)
|
||||
@@ -862,10 +874,12 @@ static char *code_generate_fragment(GPUMaterial *material, ListBase *nodes, GPUO
|
||||
for (input = node->inputs.first; input; input = input->next) {
|
||||
if (input->source == GPU_SOURCE_ATTRIB && input->attribfirst) {
|
||||
if (input->attribtype == CD_TANGENT) {
|
||||
BLI_dynstr_appendf(ds, "#ifdef USE_OPENSUBDIV\n");
|
||||
BLI_dynstr_appendf(ds, "\t%s var%d;\n",
|
||||
GPU_DATATYPE_STR[input->type],
|
||||
input->attribid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "#ifdef USE_OPENSUBDIV\n");
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\t%s var%d;\n",
|
||||
GPU_DATATYPE_STR[input->type],
|
||||
input->attribid);
|
||||
if (has_tangent == false) {
|
||||
BLI_dynstr_appendf(ds, "\tvec3 Q1 = dFdx(inpt.v.position.xyz);\n");
|
||||
BLI_dynstr_appendf(ds, "\tvec3 Q2 = dFdy(inpt.v.position.xyz);\n");
|
||||
@@ -929,12 +943,13 @@ static char *code_generate_vertex(ListBase *nodes, const char *vert_code, bool u
|
||||
char *code;
|
||||
|
||||
/* Hairs uv and col attribs are passed by bufferTextures. */
|
||||
BLI_dynstr_append(ds,
|
||||
"#ifdef HAIR_SHADER\n"
|
||||
"#define DEFINE_ATTRIB(type, attr) uniform samplerBuffer attr\n"
|
||||
"#else\n"
|
||||
"#define DEFINE_ATTRIB(type, attr) in type attr\n"
|
||||
"#endif\n"
|
||||
BLI_dynstr_append(
|
||||
ds,
|
||||
"#ifdef HAIR_SHADER\n"
|
||||
"#define DEFINE_ATTRIB(type, attr) uniform samplerBuffer attr\n"
|
||||
"#else\n"
|
||||
"#define DEFINE_ATTRIB(type, attr) in type attr\n"
|
||||
"#endif\n"
|
||||
);
|
||||
|
||||
for (node = nodes->first; node; node = node->next) {
|
||||
@@ -952,10 +967,12 @@ static char *code_generate_vertex(ListBase *nodes, const char *vert_code, bool u
|
||||
}
|
||||
else {
|
||||
unsigned int hash = BLI_ghashutil_strhash_p(input->attribname);
|
||||
BLI_dynstr_appendf(ds, "DEFINE_ATTRIB(%s, %s%u);\n",
|
||||
GPU_DATATYPE_STR[input->type], attrib_prefix_get(input->attribtype), hash);
|
||||
BLI_dynstr_appendf(ds, "#define att%d %s%u\n",
|
||||
input->attribid, attrib_prefix_get(input->attribtype), hash);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "DEFINE_ATTRIB(%s, %s%u);\n",
|
||||
GPU_DATATYPE_STR[input->type], attrib_prefix_get(input->attribtype), hash);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "#define att%d %s%u\n",
|
||||
input->attribid, attrib_prefix_get(input->attribtype), hash);
|
||||
/* Auto attrib can be vertex color byte buffer.
|
||||
* We need to know and convert them to linear space in VS. */
|
||||
if (!use_geom && input->attribtype == CD_AUTO_FROM_NAME) {
|
||||
@@ -963,33 +980,36 @@ static char *code_generate_vertex(ListBase *nodes, const char *vert_code, bool u
|
||||
BLI_dynstr_appendf(ds, "#define att%d_is_srgb ba%u\n", input->attribid, hash);
|
||||
}
|
||||
}
|
||||
BLI_dynstr_appendf(ds, "out %s var%d%s;\n",
|
||||
GPU_DATATYPE_STR[input->type], input->attribid, use_geom ? "g" : "");
|
||||
BLI_dynstr_appendf(
|
||||
ds, "out %s var%d%s;\n",
|
||||
GPU_DATATYPE_STR[input->type], input->attribid, use_geom ? "g" : "");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BLI_dynstr_append(ds, "\n");
|
||||
|
||||
BLI_dynstr_append(ds,
|
||||
"#define ATTRIB\n"
|
||||
"uniform mat3 NormalMatrix;\n"
|
||||
"uniform mat4 ModelMatrixInverse;\n"
|
||||
"vec3 srgb_to_linear_attrib(vec3 c) {\n"
|
||||
"\tc = max(c, vec3(0.0));\n"
|
||||
"\tvec3 c1 = c * (1.0 / 12.92);\n"
|
||||
"\tvec3 c2 = pow((c + 0.055) * (1.0 / 1.055), vec3(2.4));\n"
|
||||
"\treturn mix(c1, c2, step(vec3(0.04045), c));\n"
|
||||
"}\n\n"
|
||||
BLI_dynstr_append(
|
||||
ds,
|
||||
"#define ATTRIB\n"
|
||||
"uniform mat3 NormalMatrix;\n"
|
||||
"uniform mat4 ModelMatrixInverse;\n"
|
||||
"vec3 srgb_to_linear_attrib(vec3 c) {\n"
|
||||
"\tc = max(c, vec3(0.0));\n"
|
||||
"\tvec3 c1 = c * (1.0 / 12.92);\n"
|
||||
"\tvec3 c2 = pow((c + 0.055) * (1.0 / 1.055), vec3(2.4));\n"
|
||||
"\treturn mix(c1, c2, step(vec3(0.04045), c));\n"
|
||||
"}\n\n"
|
||||
);
|
||||
|
||||
/* Prototype because defined later. */
|
||||
BLI_dynstr_append(ds,
|
||||
"vec2 hair_get_customdata_vec2(const samplerBuffer);\n"
|
||||
"vec3 hair_get_customdata_vec3(const samplerBuffer);\n"
|
||||
"vec4 hair_get_customdata_vec4(const samplerBuffer);\n"
|
||||
"vec3 hair_get_strand_pos(void);\n"
|
||||
"\n"
|
||||
BLI_dynstr_append(
|
||||
ds,
|
||||
"vec2 hair_get_customdata_vec2(const samplerBuffer);\n"
|
||||
"vec3 hair_get_customdata_vec3(const samplerBuffer);\n"
|
||||
"vec4 hair_get_customdata_vec4(const samplerBuffer);\n"
|
||||
"vec3 hair_get_strand_pos(void);\n"
|
||||
"\n"
|
||||
);
|
||||
|
||||
BLI_dynstr_append(ds, "void pass_attrib(in vec3 position) {\n");
|
||||
@@ -1001,16 +1021,19 @@ static char *code_generate_vertex(ListBase *nodes, const char *vert_code, bool u
|
||||
if (input->source == GPU_SOURCE_ATTRIB && input->attribfirst) {
|
||||
if (input->attribtype == CD_TANGENT) {
|
||||
/* Not supported by hairs */
|
||||
BLI_dynstr_appendf(ds, "\tvar%d%s = vec4(0.0);\n",
|
||||
input->attribid, use_geom ? "g" : "");
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\tvar%d%s = vec4(0.0);\n",
|
||||
input->attribid, use_geom ? "g" : "");
|
||||
}
|
||||
else if (input->attribtype == CD_ORCO) {
|
||||
BLI_dynstr_appendf(ds, "\tvar%d%s = OrcoTexCoFactors[0] + (ModelMatrixInverse * vec4(hair_get_strand_pos(), 1.0)).xyz * OrcoTexCoFactors[1];\n",
|
||||
input->attribid, use_geom ? "g" : "");
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\tvar%d%s = OrcoTexCoFactors[0] + (ModelMatrixInverse * vec4(hair_get_strand_pos(), 1.0)).xyz * OrcoTexCoFactors[1];\n",
|
||||
input->attribid, use_geom ? "g" : "");
|
||||
}
|
||||
else {
|
||||
BLI_dynstr_appendf(ds, "\tvar%d%s = hair_get_customdata_%s(att%d);\n",
|
||||
input->attribid, use_geom ? "g" : "", GPU_DATATYPE_STR[input->type], input->attribid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\tvar%d%s = hair_get_customdata_%s(att%d);\n",
|
||||
input->attribid, use_geom ? "g" : "", GPU_DATATYPE_STR[input->type], input->attribid);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1030,21 +1053,25 @@ static char *code_generate_vertex(ListBase *nodes, const char *vert_code, bool u
|
||||
input->attribid, use_geom ? "g" : "", input->attribid);
|
||||
}
|
||||
else if (input->attribtype == CD_ORCO) {
|
||||
BLI_dynstr_appendf(ds, "\tvar%d%s = OrcoTexCoFactors[0] + position * OrcoTexCoFactors[1];\n",
|
||||
input->attribid, use_geom ? "g" : "");
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\tvar%d%s = OrcoTexCoFactors[0] + position * OrcoTexCoFactors[1];\n",
|
||||
input->attribid, use_geom ? "g" : "");
|
||||
}
|
||||
else if (input->attribtype == CD_MCOL) {
|
||||
BLI_dynstr_appendf(ds, "\tvar%d%s = srgb_to_linear_attrib(att%d);\n",
|
||||
input->attribid, use_geom ? "g" : "", input->attribid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\tvar%d%s = srgb_to_linear_attrib(att%d);\n",
|
||||
input->attribid, use_geom ? "g" : "", input->attribid);
|
||||
}
|
||||
else if (input->attribtype == CD_AUTO_FROM_NAME) {
|
||||
BLI_dynstr_appendf(ds, "\tvar%d%s = (att%d_is_srgb) ? srgb_to_linear_attrib(att%d) : att%d;\n",
|
||||
input->attribid, use_geom ? "g" : "",
|
||||
input->attribid, input->attribid, input->attribid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\tvar%d%s = (att%d_is_srgb) ? srgb_to_linear_attrib(att%d) : att%d;\n",
|
||||
input->attribid, use_geom ? "g" : "",
|
||||
input->attribid, input->attribid, input->attribid);
|
||||
}
|
||||
else {
|
||||
BLI_dynstr_appendf(ds, "\tvar%d%s = att%d;\n",
|
||||
input->attribid, use_geom ? "g" : "", input->attribid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "\tvar%d%s = att%d;\n",
|
||||
input->attribid, use_geom ? "g" : "", input->attribid);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1083,12 +1110,14 @@ static char *code_generate_geometry(ListBase *nodes, const char *geom_code)
|
||||
for (node = nodes->first; node; node = node->next) {
|
||||
for (input = node->inputs.first; input; input = input->next) {
|
||||
if (input->source == GPU_SOURCE_ATTRIB && input->attribfirst) {
|
||||
BLI_dynstr_appendf(ds, "in %s var%dg[];\n",
|
||||
GPU_DATATYPE_STR[input->type],
|
||||
input->attribid);
|
||||
BLI_dynstr_appendf(ds, "out %s var%d;\n",
|
||||
GPU_DATATYPE_STR[input->type],
|
||||
input->attribid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "in %s var%dg[];\n",
|
||||
GPU_DATATYPE_STR[input->type],
|
||||
input->attribid);
|
||||
BLI_dynstr_appendf(
|
||||
ds, "out %s var%d;\n",
|
||||
GPU_DATATYPE_STR[input->type],
|
||||
input->attribid);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1382,8 +1411,8 @@ static const char *gpu_uniform_set_function_from_type(eNodeSocketDatatype type)
|
||||
case SOCK_RGBA:
|
||||
return "set_rgba";
|
||||
default:
|
||||
BLI_assert(!"No gpu function for non-supported eNodeSocketDatatype");
|
||||
return NULL;
|
||||
BLI_assert(!"No gpu function for non-supported eNodeSocketDatatype");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1553,8 +1582,9 @@ void GPU_nodes_get_vertex_attributes(ListBase *nodes, GPUVertexAttribs *attribs)
|
||||
|
||||
attribs->layer[a].type = input->attribtype;
|
||||
attribs->layer[a].attribid = input->attribid;
|
||||
BLI_strncpy(attribs->layer[a].name, input->attribname,
|
||||
sizeof(attribs->layer[a].name));
|
||||
BLI_strncpy(
|
||||
attribs->layer[a].name, input->attribname,
|
||||
sizeof(attribs->layer[a].name));
|
||||
}
|
||||
else {
|
||||
input->attribid = attribs->layer[a].attribid;
|
||||
@@ -1966,11 +1996,12 @@ GPUPass *GPU_generate_pass_new(
|
||||
void GPU_pass_compile(GPUPass *pass)
|
||||
{
|
||||
if (!pass->compiled) {
|
||||
pass->shader = GPU_shader_create(pass->vertexcode,
|
||||
pass->fragmentcode,
|
||||
pass->geometrycode,
|
||||
NULL,
|
||||
pass->defines);
|
||||
pass->shader = GPU_shader_create(
|
||||
pass->vertexcode,
|
||||
pass->fragmentcode,
|
||||
pass->geometrycode,
|
||||
NULL,
|
||||
pass->defines);
|
||||
pass->compiled = true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,7 +107,7 @@ static bool is_over_resolution_limit(GLenum textarget, int w, int h)
|
||||
int size = (textarget == GL_TEXTURE_2D) ?
|
||||
GPU_max_texture_size() : GPU_max_cube_map_size();
|
||||
int reslimit = (U.glreslimit != 0) ?
|
||||
min_ii(U.glreslimit, size) : size;
|
||||
min_ii(U.glreslimit, size) : size;
|
||||
|
||||
return (w > reslimit || h > reslimit);
|
||||
}
|
||||
@@ -239,42 +239,48 @@ typedef struct VerifyThreadData {
|
||||
float *srgb_frect;
|
||||
} VerifyThreadData;
|
||||
|
||||
static void gpu_verify_high_bit_srgb_buffer_slice(float *srgb_frect,
|
||||
ImBuf *ibuf,
|
||||
const int start_line,
|
||||
const int height)
|
||||
static void gpu_verify_high_bit_srgb_buffer_slice(
|
||||
float *srgb_frect,
|
||||
ImBuf *ibuf,
|
||||
const int start_line,
|
||||
const int height)
|
||||
{
|
||||
size_t offset = ibuf->channels * start_line * ibuf->x;
|
||||
float *current_srgb_frect = srgb_frect + offset;
|
||||
float *current_rect_float = ibuf->rect_float + offset;
|
||||
IMB_buffer_float_from_float(current_srgb_frect,
|
||||
current_rect_float,
|
||||
ibuf->channels,
|
||||
IB_PROFILE_SRGB,
|
||||
IB_PROFILE_LINEAR_RGB, true,
|
||||
ibuf->x, height,
|
||||
ibuf->x, ibuf->x);
|
||||
IMB_buffer_float_from_float(
|
||||
current_srgb_frect,
|
||||
current_rect_float,
|
||||
ibuf->channels,
|
||||
IB_PROFILE_SRGB,
|
||||
IB_PROFILE_LINEAR_RGB, true,
|
||||
ibuf->x, height,
|
||||
ibuf->x, ibuf->x);
|
||||
IMB_buffer_float_unpremultiply(current_srgb_frect, ibuf->x, height);
|
||||
}
|
||||
|
||||
static void verify_thread_do(void *data_v,
|
||||
int start_scanline,
|
||||
int num_scanlines)
|
||||
static void verify_thread_do(
|
||||
void *data_v,
|
||||
int start_scanline,
|
||||
int num_scanlines)
|
||||
{
|
||||
VerifyThreadData *data = (VerifyThreadData *)data_v;
|
||||
gpu_verify_high_bit_srgb_buffer_slice(data->srgb_frect,
|
||||
data->ibuf,
|
||||
start_scanline,
|
||||
num_scanlines);
|
||||
gpu_verify_high_bit_srgb_buffer_slice(
|
||||
data->srgb_frect,
|
||||
data->ibuf,
|
||||
start_scanline,
|
||||
num_scanlines);
|
||||
}
|
||||
|
||||
static void gpu_verify_high_bit_srgb_buffer(float *srgb_frect,
|
||||
ImBuf *ibuf)
|
||||
static void gpu_verify_high_bit_srgb_buffer(
|
||||
float *srgb_frect,
|
||||
ImBuf *ibuf)
|
||||
{
|
||||
if (ibuf->y < 64) {
|
||||
gpu_verify_high_bit_srgb_buffer_slice(srgb_frect,
|
||||
ibuf,
|
||||
0, ibuf->y);
|
||||
gpu_verify_high_bit_srgb_buffer_slice(
|
||||
srgb_frect,
|
||||
ibuf,
|
||||
0, ibuf->y);
|
||||
}
|
||||
else {
|
||||
VerifyThreadData data;
|
||||
@@ -284,11 +290,12 @@ static void gpu_verify_high_bit_srgb_buffer(float *srgb_frect,
|
||||
}
|
||||
}
|
||||
|
||||
GPUTexture *GPU_texture_from_blender(Image *ima,
|
||||
ImageUser *iuser,
|
||||
int textarget,
|
||||
bool is_data,
|
||||
double UNUSED(time))
|
||||
GPUTexture *GPU_texture_from_blender(
|
||||
Image *ima,
|
||||
ImageUser *iuser,
|
||||
int textarget,
|
||||
bool is_data,
|
||||
double UNUSED(time))
|
||||
{
|
||||
if (ima == NULL) {
|
||||
return NULL;
|
||||
@@ -363,11 +370,14 @@ GPUTexture *GPU_texture_from_blender(Image *ima,
|
||||
const bool mipmap = GPU_get_mipmap();
|
||||
|
||||
#ifdef WITH_DDS
|
||||
if (ibuf->ftype == IMB_FTYPE_DDS)
|
||||
if (ibuf->ftype == IMB_FTYPE_DDS) {
|
||||
GPU_create_gl_tex_compressed(&bindcode, rect, rectw, recth, textarget, mipmap, ima, ibuf);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
GPU_create_gl_tex(&bindcode, rect, frect, rectw, recth, textarget, mipmap, use_high_bit_depth, ima);
|
||||
}
|
||||
|
||||
/* mark as non-color data texture */
|
||||
if (bindcode) {
|
||||
@@ -556,8 +566,9 @@ void GPU_create_gl_tex(
|
||||
|
||||
if (mip_cube_map) {
|
||||
for (int j = 0; j < 6; j++) {
|
||||
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, i,
|
||||
informat, mipw, miph, 0, GL_RGBA, type, mip_cube_map[j]);
|
||||
glTexImage2D(
|
||||
GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, i,
|
||||
informat, mipw, miph, 0, GL_RGBA, type, mip_cube_map[j]);
|
||||
}
|
||||
}
|
||||
gpu_del_cube_map(mip_cube_map);
|
||||
@@ -639,8 +650,9 @@ bool GPU_upload_dxt_texture(ImBuf *ibuf)
|
||||
|
||||
size = ((width + 3) / 4) * ((height + 3) / 4) * blocksize;
|
||||
|
||||
glCompressedTexImage2D(GL_TEXTURE_2D, i, format, width, height,
|
||||
0, size, ibuf->dds_data.data + offset);
|
||||
glCompressedTexImage2D(
|
||||
GL_TEXTURE_2D, i, format, width, height,
|
||||
0, size, ibuf->dds_data.data + offset);
|
||||
|
||||
offset += size;
|
||||
width >>= 1;
|
||||
@@ -755,8 +767,9 @@ static bool gpu_check_scaled_image(ImBuf *ibuf, Image *ima, float *frect, int x,
|
||||
ImBuf *ibuf_scale = IMB_allocFromBuffer(NULL, frect, w, h);
|
||||
IMB_scaleImBuf(ibuf_scale, rectw, recth);
|
||||
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, rectw, recth, GL_RGBA,
|
||||
GL_FLOAT, ibuf_scale->rect_float);
|
||||
glTexSubImage2D(
|
||||
GL_TEXTURE_2D, 0, x, y, rectw, recth, GL_RGBA,
|
||||
GL_FLOAT, ibuf_scale->rect_float);
|
||||
|
||||
IMB_freeImBuf(ibuf_scale);
|
||||
}
|
||||
@@ -775,8 +788,9 @@ static bool gpu_check_scaled_image(ImBuf *ibuf, Image *ima, float *frect, int x,
|
||||
}
|
||||
}
|
||||
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, rectw, recth, GL_RGBA,
|
||||
GL_UNSIGNED_BYTE, scalerect);
|
||||
glTexSubImage2D(
|
||||
GL_TEXTURE_2D, 0, x, y, rectw, recth, GL_RGBA,
|
||||
GL_UNSIGNED_BYTE, scalerect);
|
||||
|
||||
MEM_freeN(scalerect);
|
||||
}
|
||||
@@ -860,8 +874,9 @@ void GPU_paint_update_image(Image *ima, ImageUser *iuser, int x, int y, int w, i
|
||||
glPixelStorei(GL_UNPACK_SKIP_PIXELS, x);
|
||||
glPixelStorei(GL_UNPACK_SKIP_ROWS, y);
|
||||
|
||||
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA,
|
||||
GL_UNSIGNED_BYTE, ibuf->rect);
|
||||
glTexSubImage2D(
|
||||
GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA,
|
||||
GL_UNSIGNED_BYTE, ibuf->rect);
|
||||
|
||||
glPixelStorei(GL_UNPACK_ROW_LENGTH, row_length);
|
||||
glPixelStorei(GL_UNPACK_SKIP_PIXELS, skip_pixels);
|
||||
@@ -913,8 +928,9 @@ void GPU_create_smoke(SmokeModifierData *smd, int highres)
|
||||
}
|
||||
/* density only */
|
||||
else {
|
||||
sds->tex = GPU_texture_create_3D(sds->res[0], sds->res[1], sds->res[2],
|
||||
GPU_R8, smoke_get_density(sds->fluid), NULL);
|
||||
sds->tex = GPU_texture_create_3D(
|
||||
sds->res[0], sds->res[1], sds->res[2],
|
||||
GPU_R8, smoke_get_density(sds->fluid), NULL);
|
||||
|
||||
/* Swizzle the RGBA components to read the Red channel so
|
||||
* that the shader stay the same for colored and non color
|
||||
@@ -926,10 +942,12 @@ void GPU_create_smoke(SmokeModifierData *smd, int highres)
|
||||
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_SWIZZLE_A, GL_RED);
|
||||
GPU_texture_unbind(sds->tex);
|
||||
}
|
||||
sds->tex_flame = (smoke_has_fuel(sds->fluid)) ?
|
||||
GPU_texture_create_3D(sds->res[0], sds->res[1], sds->res[2],
|
||||
GPU_R8, smoke_get_flame(sds->fluid), NULL) :
|
||||
NULL;
|
||||
sds->tex_flame = (
|
||||
smoke_has_fuel(sds->fluid) ?
|
||||
GPU_texture_create_3D(
|
||||
sds->res[0], sds->res[1], sds->res[2],
|
||||
GPU_R8, smoke_get_flame(sds->fluid), NULL) :
|
||||
NULL);
|
||||
}
|
||||
else if (!sds->tex && highres) {
|
||||
/* rgba texture for color + density */
|
||||
@@ -941,8 +959,9 @@ void GPU_create_smoke(SmokeModifierData *smd, int highres)
|
||||
}
|
||||
/* density only */
|
||||
else {
|
||||
sds->tex = GPU_texture_create_3D(sds->res_wt[0], sds->res_wt[1], sds->res_wt[2],
|
||||
GPU_R8, smoke_turbulence_get_density(sds->wt), NULL);
|
||||
sds->tex = GPU_texture_create_3D(
|
||||
sds->res_wt[0], sds->res_wt[1], sds->res_wt[2],
|
||||
GPU_R8, smoke_turbulence_get_density(sds->wt), NULL);
|
||||
|
||||
/* Swizzle the RGBA components to read the Red channel so
|
||||
* that the shader stay the same for colored and non color
|
||||
@@ -954,14 +973,17 @@ void GPU_create_smoke(SmokeModifierData *smd, int highres)
|
||||
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_SWIZZLE_A, GL_RED);
|
||||
GPU_texture_unbind(sds->tex);
|
||||
}
|
||||
sds->tex_flame = (smoke_turbulence_has_fuel(sds->wt)) ?
|
||||
GPU_texture_create_3D(sds->res_wt[0], sds->res_wt[1], sds->res_wt[2],
|
||||
GPU_R8, smoke_turbulence_get_flame(sds->wt), NULL) :
|
||||
NULL;
|
||||
sds->tex_flame = (
|
||||
smoke_turbulence_has_fuel(sds->wt) ?
|
||||
GPU_texture_create_3D(
|
||||
sds->res_wt[0], sds->res_wt[1], sds->res_wt[2],
|
||||
GPU_R8, smoke_turbulence_get_flame(sds->wt), NULL) :
|
||||
NULL);
|
||||
}
|
||||
|
||||
sds->tex_shadow = GPU_texture_create_3D(sds->res[0], sds->res[1], sds->res[2],
|
||||
GPU_R8, sds->shadow, NULL);
|
||||
sds->tex_shadow = GPU_texture_create_3D(
|
||||
sds->res[0], sds->res[1], sds->res[2],
|
||||
GPU_R8, sds->shadow, NULL);
|
||||
}
|
||||
#else // WITH_SMOKE
|
||||
(void)highres;
|
||||
@@ -1301,10 +1323,10 @@ void GPU_select_to_index_array(unsigned int *col, const unsigned int size)
|
||||
{
|
||||
#define INDEX_BUF_ARRAY(INDEX_FROM_BUF_BITS) \
|
||||
for (i = size; i--; col++) { \
|
||||
if ((c = *col)) { \
|
||||
*col = INDEX_FROM_BUF_BITS(c); \
|
||||
} \
|
||||
} ((void)0)
|
||||
if ((c = *col)) { \
|
||||
*col = INDEX_FROM_BUF_BITS(c); \
|
||||
} \
|
||||
} ((void)0)
|
||||
|
||||
if (size > 0) {
|
||||
unsigned int i, c;
|
||||
|
||||
@@ -46,7 +46,7 @@ static GLenum convert_index_type_to_gl(GPUIndexBufType type)
|
||||
return table[type];
|
||||
}
|
||||
|
||||
uint GPU_indexbuf_size_get(const GPUIndexBuf* elem)
|
||||
uint GPU_indexbuf_size_get(const GPUIndexBuf *elem)
|
||||
{
|
||||
#if GPU_TRACK_INDEX_RANGE
|
||||
static const uint table[] = {
|
||||
@@ -61,7 +61,7 @@ uint GPU_indexbuf_size_get(const GPUIndexBuf* elem)
|
||||
}
|
||||
|
||||
void GPU_indexbuf_init_ex(
|
||||
GPUIndexBufBuilder* builder, GPUPrimType prim_type,
|
||||
GPUIndexBufBuilder *builder, GPUPrimType prim_type,
|
||||
uint index_len, uint vertex_len, bool use_prim_restart)
|
||||
{
|
||||
builder->use_prim_restart = use_prim_restart;
|
||||
@@ -72,7 +72,7 @@ void GPU_indexbuf_init_ex(
|
||||
builder->data = calloc(builder->max_index_len, sizeof(uint));
|
||||
}
|
||||
|
||||
void GPU_indexbuf_init(GPUIndexBufBuilder* builder, GPUPrimType prim_type, uint prim_len, uint vertex_len)
|
||||
void GPU_indexbuf_init(GPUIndexBufBuilder *builder, GPUPrimType prim_type, uint prim_len, uint vertex_len)
|
||||
{
|
||||
uint verts_per_prim = 0;
|
||||
switch (prim_type) {
|
||||
@@ -98,7 +98,7 @@ void GPU_indexbuf_init(GPUIndexBufBuilder* builder, GPUPrimType prim_type, uint
|
||||
GPU_indexbuf_init_ex(builder, prim_type, prim_len * verts_per_prim, vertex_len, false);
|
||||
}
|
||||
|
||||
void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder* builder, uint v)
|
||||
void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder *builder, uint v)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(builder->data != NULL);
|
||||
@@ -108,7 +108,7 @@ void GPU_indexbuf_add_generic_vert(GPUIndexBufBuilder* builder, uint v)
|
||||
builder->data[builder->index_len++] = v;
|
||||
}
|
||||
|
||||
void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder* builder)
|
||||
void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder *builder)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(builder->data != NULL);
|
||||
@@ -118,7 +118,7 @@ void GPU_indexbuf_add_primitive_restart(GPUIndexBufBuilder* builder)
|
||||
builder->data[builder->index_len++] = GPU_PRIM_RESTART;
|
||||
}
|
||||
|
||||
void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder* builder, uint v)
|
||||
void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder *builder, uint v)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(builder->prim_type == GPU_PRIM_POINTS);
|
||||
@@ -126,7 +126,7 @@ void GPU_indexbuf_add_point_vert(GPUIndexBufBuilder* builder, uint v)
|
||||
GPU_indexbuf_add_generic_vert(builder, v);
|
||||
}
|
||||
|
||||
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder* builder, uint v1, uint v2)
|
||||
void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder *builder, uint v1, uint v2)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(builder->prim_type == GPU_PRIM_LINES);
|
||||
@@ -136,7 +136,7 @@ void GPU_indexbuf_add_line_verts(GPUIndexBufBuilder* builder, uint v1, uint v2)
|
||||
GPU_indexbuf_add_generic_vert(builder, v2);
|
||||
}
|
||||
|
||||
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder* builder, uint v1, uint v2, uint v3)
|
||||
void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder *builder, uint v1, uint v2, uint v3)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(builder->prim_type == GPU_PRIM_TRIS);
|
||||
@@ -147,7 +147,7 @@ void GPU_indexbuf_add_tri_verts(GPUIndexBufBuilder* builder, uint v1, uint v2, u
|
||||
GPU_indexbuf_add_generic_vert(builder, v3);
|
||||
}
|
||||
|
||||
void GPU_indexbuf_add_line_adj_verts(GPUIndexBufBuilder* builder, uint v1, uint v2, uint v3, uint v4)
|
||||
void GPU_indexbuf_add_line_adj_verts(GPUIndexBufBuilder *builder, uint v1, uint v2, uint v3, uint v4)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(builder->prim_type == GPU_PRIM_LINES_ADJ);
|
||||
@@ -163,7 +163,7 @@ void GPU_indexbuf_add_line_adj_verts(GPUIndexBufBuilder* builder, uint v1, uint
|
||||
/* Everything remains 32 bit while building to keep things simple.
|
||||
* Find min/max after, then convert to smallest index type possible. */
|
||||
|
||||
static uint index_range(const uint values[], uint value_len, uint* min_out, uint* max_out)
|
||||
static uint index_range(const uint values[], uint value_len, uint *min_out, uint *max_out)
|
||||
{
|
||||
if (value_len == 0) {
|
||||
*min_out = 0;
|
||||
@@ -186,7 +186,7 @@ static uint index_range(const uint values[], uint value_len, uint* min_out, uint
|
||||
return max_value - min_value;
|
||||
}
|
||||
|
||||
static void squeeze_indices_byte(GPUIndexBufBuilder *builder, GPUIndexBuf* elem)
|
||||
static void squeeze_indices_byte(GPUIndexBufBuilder *builder, GPUIndexBuf *elem)
|
||||
{
|
||||
const uint *values = builder->data;
|
||||
const uint index_len = elem->index_len;
|
||||
@@ -212,7 +212,7 @@ static void squeeze_indices_byte(GPUIndexBufBuilder *builder, GPUIndexBuf* elem)
|
||||
}
|
||||
}
|
||||
|
||||
static void squeeze_indices_short(GPUIndexBufBuilder *builder, GPUIndexBuf* elem)
|
||||
static void squeeze_indices_short(GPUIndexBufBuilder *builder, GPUIndexBuf *elem)
|
||||
{
|
||||
const uint *values = builder->data;
|
||||
const uint index_len = elem->index_len;
|
||||
@@ -240,14 +240,14 @@ static void squeeze_indices_short(GPUIndexBufBuilder *builder, GPUIndexBuf* elem
|
||||
|
||||
#endif /* GPU_TRACK_INDEX_RANGE */
|
||||
|
||||
GPUIndexBuf* GPU_indexbuf_build(GPUIndexBufBuilder* builder)
|
||||
GPUIndexBuf *GPU_indexbuf_build(GPUIndexBufBuilder *builder)
|
||||
{
|
||||
GPUIndexBuf* elem = calloc(1, sizeof(GPUIndexBuf));
|
||||
GPUIndexBuf *elem = calloc(1, sizeof(GPUIndexBuf));
|
||||
GPU_indexbuf_build_in_place(builder, elem);
|
||||
return elem;
|
||||
}
|
||||
|
||||
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder* builder, GPUIndexBuf* elem)
|
||||
void GPU_indexbuf_build_in_place(GPUIndexBufBuilder *builder, GPUIndexBuf *elem)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(builder->data != NULL);
|
||||
@@ -294,12 +294,12 @@ void GPU_indexbuf_build_in_place(GPUIndexBufBuilder* builder, GPUIndexBuf* elem)
|
||||
/* other fields are safe to leave */
|
||||
}
|
||||
|
||||
void GPU_indexbuf_use(GPUIndexBuf* elem)
|
||||
void GPU_indexbuf_use(GPUIndexBuf *elem)
|
||||
{
|
||||
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elem->vbo_id);
|
||||
}
|
||||
|
||||
void GPU_indexbuf_discard(GPUIndexBuf* elem)
|
||||
void GPU_indexbuf_discard(GPUIndexBuf *elem)
|
||||
{
|
||||
if (elem->vbo_id) {
|
||||
GPU_buf_id_free(elem->vbo_id);
|
||||
|
||||
@@ -121,7 +121,7 @@ static GPUTexture *framebuffer_get_depth_tex(GPUFrameBuffer *fb)
|
||||
if (fb->attachments[GPU_FB_DEPTH_ATTACHMENT].tex)
|
||||
return fb->attachments[GPU_FB_DEPTH_ATTACHMENT].tex;
|
||||
else
|
||||
return fb->attachments[GPU_FB_DEPTH_STENCIL_ATTACHMENT].tex;;
|
||||
return fb->attachments[GPU_FB_DEPTH_STENCIL_ATTACHMENT].tex;
|
||||
}
|
||||
|
||||
static GPUTexture *framebuffer_get_color_tex(GPUFrameBuffer *fb, int slot)
|
||||
@@ -167,12 +167,12 @@ static void gpu_print_framebuffer_error(GLenum status, char err_out[256])
|
||||
|
||||
void gpu_framebuffer_module_init(void)
|
||||
{
|
||||
BLI_thread_local_create(g_currentfb);
|
||||
BLI_thread_local_create(g_currentfb);
|
||||
}
|
||||
|
||||
void gpu_framebuffer_module_exit(void)
|
||||
{
|
||||
BLI_thread_local_delete(g_currentfb);
|
||||
BLI_thread_local_delete(g_currentfb);
|
||||
}
|
||||
|
||||
static uint gpu_framebuffer_current_get(void)
|
||||
@@ -340,8 +340,9 @@ static void gpu_framebuffer_attachment_attach(GPUAttachment *attach, GPUAttachme
|
||||
|
||||
if (attach->layer > -1) {
|
||||
if (GPU_texture_cube(attach->tex)) {
|
||||
glFramebufferTexture2D(GL_FRAMEBUFFER, gl_attachment, GL_TEXTURE_CUBE_MAP_POSITIVE_X + attach->layer,
|
||||
tex_bind, attach->mip);
|
||||
glFramebufferTexture2D(
|
||||
GL_FRAMEBUFFER, gl_attachment, GL_TEXTURE_CUBE_MAP_POSITIVE_X + attach->layer,
|
||||
tex_bind, attach->mip);
|
||||
}
|
||||
else {
|
||||
glFramebufferTextureLayer(GL_FRAMEBUFFER, gl_attachment, tex_bind, attach->mip, attach->layer);
|
||||
@@ -518,7 +519,7 @@ void GPU_framebuffer_read_color(
|
||||
case 1: type = GL_RED; break;
|
||||
case 2: type = GL_RG; break;
|
||||
case 3: type = GL_RGB; break;
|
||||
case 4: type = GL_RGBA; break;
|
||||
case 4: type = GL_RGBA; break;
|
||||
default:
|
||||
BLI_assert(false && "wrong number of read channels");
|
||||
return;
|
||||
@@ -549,12 +550,14 @@ void GPU_framebuffer_blit(
|
||||
const bool do_depth = (blit_buffers & GPU_DEPTH_BIT);
|
||||
const bool do_stencil = (blit_buffers & GPU_STENCIL_BIT);
|
||||
|
||||
GPUTexture *read_tex = (do_depth || do_stencil)
|
||||
? framebuffer_get_depth_tex(fb_read)
|
||||
: framebuffer_get_color_tex(fb_read, read_slot);
|
||||
GPUTexture *write_tex = (do_depth || do_stencil)
|
||||
? framebuffer_get_depth_tex(fb_write)
|
||||
: framebuffer_get_color_tex(fb_write, read_slot);
|
||||
GPUTexture *read_tex = (
|
||||
(do_depth || do_stencil) ?
|
||||
framebuffer_get_depth_tex(fb_read) :
|
||||
framebuffer_get_color_tex(fb_read, read_slot));
|
||||
GPUTexture *write_tex = (
|
||||
(do_depth || do_stencil) ?
|
||||
framebuffer_get_depth_tex(fb_write) :
|
||||
framebuffer_get_color_tex(fb_write, read_slot));
|
||||
|
||||
if (do_depth) {
|
||||
BLI_assert(GPU_texture_depth(read_tex) && GPU_texture_depth(write_tex));
|
||||
@@ -585,9 +588,10 @@ void GPU_framebuffer_blit(
|
||||
|
||||
GLbitfield mask = convert_buffer_bits_to_gl(blit_buffers);
|
||||
|
||||
glBlitFramebuffer(0, 0, fb_read->width, fb_read->height,
|
||||
0, 0, fb_write->width, fb_write->height,
|
||||
mask, GL_NEAREST);
|
||||
glBlitFramebuffer(
|
||||
0, 0, fb_read->width, fb_read->height,
|
||||
0, 0, fb_write->width, fb_write->height,
|
||||
mask, GL_NEAREST);
|
||||
|
||||
/* Restore previous framebuffer */
|
||||
if (fb_write->object == prev_fb) {
|
||||
@@ -679,7 +683,8 @@ GPUOffScreen *GPU_offscreen_create(int width, int height, int samples, bool dept
|
||||
|
||||
ofs = MEM_callocN(sizeof(GPUOffScreen), "GPUOffScreen");
|
||||
|
||||
ofs->color = GPU_texture_create_2D_multisample(width, height,
|
||||
ofs->color = GPU_texture_create_2D_multisample(
|
||||
width, height,
|
||||
(high_bitdepth) ? GPU_RGBA16F : GPU_RGBA8, NULL, samples, err_out);
|
||||
|
||||
if (depth) {
|
||||
@@ -776,14 +781,16 @@ void GPU_offscreen_read_pixels(GPUOffScreen *ofs, int type, void *pixels)
|
||||
/* create texture for new 'fbo_blit' */
|
||||
glGenTextures(1, &tex_blit);
|
||||
glBindTexture(GL_TEXTURE_2D, tex_blit);
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, (type == GL_FLOAT) ? GL_RGBA16F : GL_RGBA8,
|
||||
w, h, 0, GL_RGBA, type, 0);
|
||||
glTexImage2D(
|
||||
GL_TEXTURE_2D, 0, (type == GL_FLOAT) ? GL_RGBA16F : GL_RGBA8,
|
||||
w, h, 0, GL_RGBA, type, 0);
|
||||
|
||||
/* write into new single-sample buffer */
|
||||
glGenFramebuffers(1, &fbo_blit);
|
||||
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo_blit);
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
|
||||
GL_TEXTURE_2D, tex_blit, 0);
|
||||
glFramebufferTexture2D(
|
||||
GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
|
||||
GL_TEXTURE_2D, tex_blit, 0);
|
||||
|
||||
GLenum status = glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER);
|
||||
if (status != GL_FRAMEBUFFER_COMPLETE) {
|
||||
|
||||
@@ -45,17 +45,17 @@
|
||||
#include <stdlib.h>
|
||||
|
||||
/* necessary functions from matrix API */
|
||||
extern void GPU_matrix_bind(const GPUShaderInterface*);
|
||||
extern void GPU_matrix_bind(const GPUShaderInterface *);
|
||||
extern bool GPU_matrix_dirty_get(void);
|
||||
|
||||
typedef struct {
|
||||
/* TODO: organize this struct by frequency of change (run-time) */
|
||||
|
||||
GPUBatch* batch;
|
||||
GPUContext* context;
|
||||
GPUBatch *batch;
|
||||
GPUContext *context;
|
||||
|
||||
/* current draw call */
|
||||
GLubyte* buffer_data;
|
||||
GLubyte *buffer_data;
|
||||
uint buffer_offset;
|
||||
uint buffer_bytes_mapped;
|
||||
uint vertex_len;
|
||||
@@ -66,14 +66,14 @@ typedef struct {
|
||||
|
||||
/* current vertex */
|
||||
uint vertex_idx;
|
||||
GLubyte* vertex_data;
|
||||
GLubyte *vertex_data;
|
||||
uint16_t unassigned_attrib_bits; /* which attributes of current vertex have not been given values? */
|
||||
|
||||
GLuint vbo_id;
|
||||
GLuint vao_id;
|
||||
|
||||
GLuint bound_program;
|
||||
const GPUShaderInterface* shader_interface;
|
||||
const GPUShaderInterface *shader_interface;
|
||||
GPUAttrBinding attrib_binding;
|
||||
uint16_t prev_enabled_attrib_bits; /* <-- only affects this VAO, so we're ok */
|
||||
} Immediate;
|
||||
@@ -131,13 +131,13 @@ void immDestroy(void)
|
||||
initialized = false;
|
||||
}
|
||||
|
||||
GPUVertFormat* immVertexFormat(void)
|
||||
GPUVertFormat *immVertexFormat(void)
|
||||
{
|
||||
GPU_vertformat_clear(&imm.vertex_format);
|
||||
return &imm.vertex_format;
|
||||
}
|
||||
|
||||
void immBindProgram(GLuint program, const GPUShaderInterface* shaderface)
|
||||
void immBindProgram(GLuint program, const GPUShaderInterface *shaderface)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(imm.bound_program == 0);
|
||||
@@ -261,7 +261,7 @@ void immBeginAtMost(GPUPrimType prim_type, uint vertex_len)
|
||||
}
|
||||
|
||||
|
||||
GPUBatch* immBeginBatch(GPUPrimType prim_type, uint vertex_len)
|
||||
GPUBatch *immBeginBatch(GPUPrimType prim_type, uint vertex_len)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(initialized);
|
||||
@@ -273,7 +273,7 @@ GPUBatch* immBeginBatch(GPUPrimType prim_type, uint vertex_len)
|
||||
imm.vertex_idx = 0;
|
||||
imm.unassigned_attrib_bits = imm.attrib_binding.enabled_bits;
|
||||
|
||||
GPUVertBuf* verts = GPU_vertbuf_create_with_format(&imm.vertex_format);
|
||||
GPUVertBuf *verts = GPU_vertbuf_create_with_format(&imm.vertex_format);
|
||||
GPU_vertbuf_data_alloc(verts, vertex_len);
|
||||
|
||||
imm.buffer_bytes_mapped = GPU_vertbuf_size_get(verts);
|
||||
@@ -285,7 +285,7 @@ GPUBatch* immBeginBatch(GPUPrimType prim_type, uint vertex_len)
|
||||
return imm.batch;
|
||||
}
|
||||
|
||||
GPUBatch* immBeginBatchAtMost(GPUPrimType prim_type, uint vertex_len)
|
||||
GPUBatch *immBeginBatchAtMost(GPUPrimType prim_type, uint vertex_len)
|
||||
{
|
||||
imm.strict_vertex_len = false;
|
||||
return immBeginBatch(prim_type, vertex_len);
|
||||
@@ -316,10 +316,10 @@ static void immDrawSetup(void)
|
||||
const uint stride = imm.vertex_format.stride;
|
||||
|
||||
for (uint a_idx = 0; a_idx < imm.vertex_format.attr_len; ++a_idx) {
|
||||
const GPUVertAttr* a = imm.vertex_format.attribs + a_idx;
|
||||
const GPUVertAttr *a = imm.vertex_format.attribs + a_idx;
|
||||
|
||||
const uint offset = imm.buffer_offset + a->offset;
|
||||
const GLvoid* pointer = (const GLubyte*)0 + offset;
|
||||
const GLvoid *pointer = (const GLubyte *)0 + offset;
|
||||
|
||||
const uint loc = read_attrib_location(&imm.attrib_binding, a_idx);
|
||||
|
||||
@@ -413,7 +413,7 @@ static void setAttribValueBit(uint attrib_id)
|
||||
|
||||
void immAttrib1f(uint attrib_id, float x)
|
||||
{
|
||||
GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
|
||||
GPUVertAttr *attrib = imm.vertex_format.attribs + attrib_id;
|
||||
#if TRUST_NO_ONE
|
||||
assert(attrib_id < imm.vertex_format.attr_len);
|
||||
assert(attrib->comp_type == GPU_COMP_F32);
|
||||
@@ -423,7 +423,7 @@ void immAttrib1f(uint attrib_id, float x)
|
||||
#endif
|
||||
setAttribValueBit(attrib_id);
|
||||
|
||||
float* data = (float*)(imm.vertex_data + attrib->offset);
|
||||
float *data = (float *)(imm.vertex_data + attrib->offset);
|
||||
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm.buffer_data, data); */
|
||||
|
||||
data[0] = x;
|
||||
@@ -431,7 +431,7 @@ void immAttrib1f(uint attrib_id, float x)
|
||||
|
||||
void immAttrib2f(uint attrib_id, float x, float y)
|
||||
{
|
||||
GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
|
||||
GPUVertAttr *attrib = imm.vertex_format.attribs + attrib_id;
|
||||
#if TRUST_NO_ONE
|
||||
assert(attrib_id < imm.vertex_format.attr_len);
|
||||
assert(attrib->comp_type == GPU_COMP_F32);
|
||||
@@ -441,7 +441,7 @@ void immAttrib2f(uint attrib_id, float x, float y)
|
||||
#endif
|
||||
setAttribValueBit(attrib_id);
|
||||
|
||||
float* data = (float*)(imm.vertex_data + attrib->offset);
|
||||
float *data = (float *)(imm.vertex_data + attrib->offset);
|
||||
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm.buffer_data, data); */
|
||||
|
||||
data[0] = x;
|
||||
@@ -450,7 +450,7 @@ void immAttrib2f(uint attrib_id, float x, float y)
|
||||
|
||||
void immAttrib3f(uint attrib_id, float x, float y, float z)
|
||||
{
|
||||
GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
|
||||
GPUVertAttr *attrib = imm.vertex_format.attribs + attrib_id;
|
||||
#if TRUST_NO_ONE
|
||||
assert(attrib_id < imm.vertex_format.attr_len);
|
||||
assert(attrib->comp_type == GPU_COMP_F32);
|
||||
@@ -460,7 +460,7 @@ void immAttrib3f(uint attrib_id, float x, float y, float z)
|
||||
#endif
|
||||
setAttribValueBit(attrib_id);
|
||||
|
||||
float* data = (float*)(imm.vertex_data + attrib->offset);
|
||||
float *data = (float *)(imm.vertex_data + attrib->offset);
|
||||
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm.buffer_data, data); */
|
||||
|
||||
data[0] = x;
|
||||
@@ -470,7 +470,7 @@ void immAttrib3f(uint attrib_id, float x, float y, float z)
|
||||
|
||||
void immAttrib4f(uint attrib_id, float x, float y, float z, float w)
|
||||
{
|
||||
GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
|
||||
GPUVertAttr *attrib = imm.vertex_format.attribs + attrib_id;
|
||||
#if TRUST_NO_ONE
|
||||
assert(attrib_id < imm.vertex_format.attr_len);
|
||||
assert(attrib->comp_type == GPU_COMP_F32);
|
||||
@@ -480,7 +480,7 @@ void immAttrib4f(uint attrib_id, float x, float y, float z, float w)
|
||||
#endif
|
||||
setAttribValueBit(attrib_id);
|
||||
|
||||
float* data = (float*)(imm.vertex_data + attrib->offset);
|
||||
float *data = (float *)(imm.vertex_data + attrib->offset);
|
||||
/* printf("%s %td %p\n", __FUNCTION__, (GLubyte*)data - imm.buffer_data, data); */
|
||||
|
||||
data[0] = x;
|
||||
@@ -491,7 +491,7 @@ void immAttrib4f(uint attrib_id, float x, float y, float z, float w)
|
||||
|
||||
void immAttrib1u(uint attrib_id, uint x)
|
||||
{
|
||||
GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
|
||||
GPUVertAttr *attrib = imm.vertex_format.attribs + attrib_id;
|
||||
#if TRUST_NO_ONE
|
||||
assert(attrib_id < imm.vertex_format.attr_len);
|
||||
assert(attrib->comp_type == GPU_COMP_U32);
|
||||
@@ -501,14 +501,14 @@ void immAttrib1u(uint attrib_id, uint x)
|
||||
#endif
|
||||
setAttribValueBit(attrib_id);
|
||||
|
||||
uint* data = (uint*)(imm.vertex_data + attrib->offset);
|
||||
uint *data = (uint *)(imm.vertex_data + attrib->offset);
|
||||
|
||||
data[0] = x;
|
||||
}
|
||||
|
||||
void immAttrib2i(uint attrib_id, int x, int y)
|
||||
{
|
||||
GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
|
||||
GPUVertAttr *attrib = imm.vertex_format.attribs + attrib_id;
|
||||
#if TRUST_NO_ONE
|
||||
assert(attrib_id < imm.vertex_format.attr_len);
|
||||
assert(attrib->comp_type == GPU_COMP_I32);
|
||||
@@ -518,7 +518,7 @@ void immAttrib2i(uint attrib_id, int x, int y)
|
||||
#endif
|
||||
setAttribValueBit(attrib_id);
|
||||
|
||||
int* data = (int*)(imm.vertex_data + attrib->offset);
|
||||
int *data = (int *)(imm.vertex_data + attrib->offset);
|
||||
|
||||
data[0] = x;
|
||||
data[1] = y;
|
||||
@@ -526,7 +526,7 @@ void immAttrib2i(uint attrib_id, int x, int y)
|
||||
|
||||
void immAttrib2s(uint attrib_id, short x, short y)
|
||||
{
|
||||
GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
|
||||
GPUVertAttr *attrib = imm.vertex_format.attribs + attrib_id;
|
||||
#if TRUST_NO_ONE
|
||||
assert(attrib_id < imm.vertex_format.attr_len);
|
||||
assert(attrib->comp_type == GPU_COMP_I16);
|
||||
@@ -536,7 +536,7 @@ void immAttrib2s(uint attrib_id, short x, short y)
|
||||
#endif
|
||||
setAttribValueBit(attrib_id);
|
||||
|
||||
short* data = (short*)(imm.vertex_data + attrib->offset);
|
||||
short *data = (short *)(imm.vertex_data + attrib->offset);
|
||||
|
||||
data[0] = x;
|
||||
data[1] = y;
|
||||
@@ -559,7 +559,7 @@ void immAttrib4fv(uint attrib_id, const float data[4])
|
||||
|
||||
void immAttrib3ub(uint attrib_id, unsigned char r, unsigned char g, unsigned char b)
|
||||
{
|
||||
GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
|
||||
GPUVertAttr *attrib = imm.vertex_format.attribs + attrib_id;
|
||||
#if TRUST_NO_ONE
|
||||
assert(attrib_id < imm.vertex_format.attr_len);
|
||||
assert(attrib->comp_type == GPU_COMP_U8);
|
||||
@@ -569,7 +569,7 @@ void immAttrib3ub(uint attrib_id, unsigned char r, unsigned char g, unsigned cha
|
||||
#endif
|
||||
setAttribValueBit(attrib_id);
|
||||
|
||||
GLubyte* data = imm.vertex_data + attrib->offset;
|
||||
GLubyte *data = imm.vertex_data + attrib->offset;
|
||||
/* printf("%s %td %p\n", __FUNCTION__, data - imm.buffer_data, data); */
|
||||
|
||||
data[0] = r;
|
||||
@@ -579,7 +579,7 @@ void immAttrib3ub(uint attrib_id, unsigned char r, unsigned char g, unsigned cha
|
||||
|
||||
void immAttrib4ub(uint attrib_id, unsigned char r, unsigned char g, unsigned char b, unsigned char a)
|
||||
{
|
||||
GPUVertAttr* attrib = imm.vertex_format.attribs + attrib_id;
|
||||
GPUVertAttr *attrib = imm.vertex_format.attribs + attrib_id;
|
||||
#if TRUST_NO_ONE
|
||||
assert(attrib_id < imm.vertex_format.attr_len);
|
||||
assert(attrib->comp_type == GPU_COMP_U8);
|
||||
@@ -589,7 +589,7 @@ void immAttrib4ub(uint attrib_id, unsigned char r, unsigned char g, unsigned cha
|
||||
#endif
|
||||
setAttribValueBit(attrib_id);
|
||||
|
||||
GLubyte* data = imm.vertex_data + attrib->offset;
|
||||
GLubyte *data = imm.vertex_data + attrib->offset;
|
||||
/* printf("%s %td %p\n", __FUNCTION__, data - imm.buffer_data, data); */
|
||||
|
||||
data[0] = r;
|
||||
@@ -633,11 +633,11 @@ static void immEndVertex(void) /* and move on to the next vertex */
|
||||
#endif
|
||||
for (uint a_idx = 0; a_idx < imm.vertex_format.attr_len; ++a_idx) {
|
||||
if ((imm.unassigned_attrib_bits >> a_idx) & 1) {
|
||||
const GPUVertAttr* a = imm.vertex_format.attribs + a_idx;
|
||||
const GPUVertAttr *a = imm.vertex_format.attribs + a_idx;
|
||||
|
||||
/* printf("copying %s from vertex %u to %u\n", a->name, imm.vertex_idx - 1, imm.vertex_idx); */
|
||||
|
||||
GLubyte* data = imm.vertex_data + a->offset;
|
||||
GLubyte *data = imm.vertex_data + a->offset;
|
||||
memcpy(data, data - imm.vertex_format.stride, a->sz);
|
||||
/* TODO: consolidate copy of adjacent attributes */
|
||||
}
|
||||
@@ -714,31 +714,31 @@ void immVertex2iv(uint attrib_id, const int data[2])
|
||||
#define GET_UNIFORM const GPUShaderInput* uniform = GPU_shaderinterface_uniform(imm.shader_interface, name); if (uniform == NULL) return;
|
||||
#endif
|
||||
|
||||
void immUniform1f(const char* name, float x)
|
||||
void immUniform1f(const char *name, float x)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform1f(uniform->location, x);
|
||||
}
|
||||
|
||||
void immUniform2f(const char* name, float x, float y)
|
||||
void immUniform2f(const char *name, float x, float y)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform2f(uniform->location, x, y);
|
||||
}
|
||||
|
||||
void immUniform2fv(const char* name, const float data[2])
|
||||
void immUniform2fv(const char *name, const float data[2])
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform2fv(uniform->location, 1, data);
|
||||
}
|
||||
|
||||
void immUniform3f(const char* name, float x, float y, float z)
|
||||
void immUniform3f(const char *name, float x, float y, float z)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform3f(uniform->location, x, y, z);
|
||||
}
|
||||
|
||||
void immUniform3fv(const char* name, const float data[3])
|
||||
void immUniform3fv(const char *name, const float data[3])
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform3fv(uniform->location, 1, data);
|
||||
@@ -747,7 +747,7 @@ void immUniform3fv(const char* name, const float data[3])
|
||||
/* can increase this limit or move to another file */
|
||||
#define MAX_UNIFORM_NAME_LEN 60
|
||||
|
||||
void immUniformArray3fv(const char* bare_name, const float *data, int count)
|
||||
void immUniformArray3fv(const char *bare_name, const float *data, int count)
|
||||
{
|
||||
/* look up "name[0]" when given "name" */
|
||||
const size_t len = strlen(bare_name);
|
||||
@@ -765,19 +765,19 @@ void immUniformArray3fv(const char* bare_name, const float *data, int count)
|
||||
glUniform3fv(uniform->location, count, data);
|
||||
}
|
||||
|
||||
void immUniform4f(const char* name, float x, float y, float z, float w)
|
||||
void immUniform4f(const char *name, float x, float y, float z, float w)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform4f(uniform->location, x, y, z, w);
|
||||
}
|
||||
|
||||
void immUniform4fv(const char* name, const float data[4])
|
||||
void immUniform4fv(const char *name, const float data[4])
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform4fv(uniform->location, 1, data);
|
||||
}
|
||||
|
||||
void immUniformArray4fv(const char* bare_name, const float *data, int count)
|
||||
void immUniformArray4fv(const char *bare_name, const float *data, int count)
|
||||
{
|
||||
/* look up "name[0]" when given "name" */
|
||||
const size_t len = strlen(bare_name);
|
||||
@@ -795,19 +795,19 @@ void immUniformArray4fv(const char* bare_name, const float *data, int count)
|
||||
glUniform4fv(uniform->location, count, data);
|
||||
}
|
||||
|
||||
void immUniformMatrix4fv(const char* name, const float data[4][4])
|
||||
void immUniformMatrix4fv(const char *name, const float data[4][4])
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniformMatrix4fv(uniform->location, 1, GL_FALSE, (float *)data);
|
||||
}
|
||||
|
||||
void immUniform1i(const char* name, int x)
|
||||
void immUniform1i(const char *name, int x)
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform1i(uniform->location, x);
|
||||
}
|
||||
|
||||
void immUniform4iv(const char* name, const int data[4])
|
||||
void immUniform4iv(const char *name, const int data[4])
|
||||
{
|
||||
GET_UNIFORM
|
||||
glUniform4iv(uniform->location, 1, data);
|
||||
@@ -817,7 +817,7 @@ void immUniform4iv(const char* name, const int data[4])
|
||||
|
||||
void immUniformColor4f(float r, float g, float b, float a)
|
||||
{
|
||||
const GPUShaderInput* uniform = GPU_shaderinterface_uniform_builtin(imm.shader_interface, GPU_UNIFORM_COLOR);
|
||||
const GPUShaderInput *uniform = GPU_shaderinterface_uniform_builtin(imm.shader_interface, GPU_UNIFORM_COLOR);
|
||||
#if TRUST_NO_ONE
|
||||
assert(uniform != NULL);
|
||||
#endif
|
||||
|
||||
@@ -269,14 +269,14 @@ void imm_draw_circle_fill_3d(unsigned pos, float x, float y, float rad, int nseg
|
||||
}
|
||||
|
||||
/**
|
||||
* Draw a lined box.
|
||||
*
|
||||
* \param pos The vertex attribute number for position.
|
||||
* \param x1 left.
|
||||
* \param y1 bottom.
|
||||
* \param x2 right.
|
||||
* \param y2 top.
|
||||
*/
|
||||
* Draw a lined box.
|
||||
*
|
||||
* \param pos The vertex attribute number for position.
|
||||
* \param x1 left.
|
||||
* \param y1 bottom.
|
||||
* \param x2 right.
|
||||
* \param y2 top.
|
||||
*/
|
||||
void imm_draw_box_wire_2d(unsigned pos, float x1, float y1, float x2, float y2)
|
||||
{
|
||||
immBegin(GPU_PRIM_LINE_LOOP, 4);
|
||||
@@ -353,17 +353,17 @@ void imm_draw_cube_wire_3d(uint pos, const float co[3], const float aspect[3])
|
||||
}
|
||||
|
||||
/**
|
||||
* Draw a cylinder. Replacement for gluCylinder.
|
||||
* _warning_ : Slow, better use it only if you no other choices.
|
||||
*
|
||||
* \param pos The vertex attribute number for position.
|
||||
* \param nor The vertex attribute number for normal.
|
||||
* \param base Specifies the radius of the cylinder at z = 0.
|
||||
* \param top Specifies the radius of the cylinder at z = height.
|
||||
* \param height Specifies the height of the cylinder.
|
||||
* \param slices Specifies the number of subdivisions around the z axis.
|
||||
* \param stacks Specifies the number of subdivisions along the z axis.
|
||||
*/
|
||||
* Draw a cylinder. Replacement for gluCylinder.
|
||||
* _warning_ : Slow, better use it only if you no other choices.
|
||||
*
|
||||
* \param pos The vertex attribute number for position.
|
||||
* \param nor The vertex attribute number for normal.
|
||||
* \param base Specifies the radius of the cylinder at z = 0.
|
||||
* \param top Specifies the radius of the cylinder at z = height.
|
||||
* \param height Specifies the height of the cylinder.
|
||||
* \param slices Specifies the number of subdivisions around the z axis.
|
||||
* \param stacks Specifies the number of subdivisions along the z axis.
|
||||
*/
|
||||
void imm_draw_cylinder_fill_normal_3d(
|
||||
unsigned int pos, unsigned int nor, float base, float top, float height, int slices, int stacks)
|
||||
{
|
||||
@@ -384,10 +384,10 @@ void imm_draw_cylinder_fill_normal_3d(
|
||||
float h1 = height * ((float)j / (float)stacks);
|
||||
float h2 = height * ((float)(j + 1) / (float)stacks);
|
||||
|
||||
float v1[3] = {r1 *cos2, r1 * sin2, h1};
|
||||
float v2[3] = {r2 *cos2, r2 * sin2, h2};
|
||||
float v3[3] = {r2 *cos1, r2 * sin1, h2};
|
||||
float v4[3] = {r1 *cos1, r1 * sin1, h1};
|
||||
float v1[3] = {r1 * cos2, r1 * sin2, h1};
|
||||
float v2[3] = {r2 * cos2, r2 * sin2, h2};
|
||||
float v3[3] = {r2 * cos1, r2 * sin1, h2};
|
||||
float v4[3] = {r1 * cos1, r1 * sin1, h1};
|
||||
float n1[3], n2[3];
|
||||
|
||||
/* calc normals */
|
||||
|
||||
@@ -659,14 +659,15 @@ GPUMaterial *GPU_material_from_nodetree(
|
||||
GPU_nodes_prune(&mat->nodes, mat->outlink);
|
||||
GPU_nodes_get_vertex_attributes(&mat->nodes, &mat->attribs);
|
||||
/* Create source code and search pass cache for an already compiled version. */
|
||||
mat->pass = GPU_generate_pass_new(mat,
|
||||
mat->outlink,
|
||||
&mat->attribs,
|
||||
&mat->nodes,
|
||||
vert_code,
|
||||
geom_code,
|
||||
frag_lib,
|
||||
defines);
|
||||
mat->pass = GPU_generate_pass_new(
|
||||
mat,
|
||||
mat->outlink,
|
||||
&mat->attribs,
|
||||
&mat->nodes,
|
||||
vert_code,
|
||||
geom_code,
|
||||
frag_lib,
|
||||
defines);
|
||||
|
||||
if (mat->pass == NULL) {
|
||||
/* We had a cache hit and the shader has already failed to compile. */
|
||||
|
||||
@@ -110,7 +110,7 @@ static void checkmat(cosnt float *m)
|
||||
}
|
||||
}
|
||||
|
||||
#define CHECKMAT(m) checkmat((const float*)m)
|
||||
#define CHECKMAT(m) checkmat((const float *)m)
|
||||
|
||||
#else
|
||||
|
||||
|
||||
@@ -242,7 +242,7 @@ bool GPU_select_is_cached(void)
|
||||
const uint *GPU_select_buffer_near(const uint *buffer, int hits)
|
||||
{
|
||||
const uint *buffer_near = NULL;
|
||||
uint depth_min = (uint)-1;
|
||||
uint depth_min = (uint) - 1;
|
||||
for (int i = 0; i < hits; i++) {
|
||||
if (buffer[1] < depth_min) {
|
||||
BLI_assert(buffer[3] != -1);
|
||||
|
||||
@@ -214,8 +214,9 @@ static void gpu_shader_standard_extensions(char defines[MAX_EXT_DEFINE_LENGTH])
|
||||
}
|
||||
}
|
||||
|
||||
static void gpu_shader_standard_defines(char defines[MAX_DEFINE_LENGTH],
|
||||
bool use_opensubdiv)
|
||||
static void gpu_shader_standard_defines(
|
||||
char defines[MAX_DEFINE_LENGTH],
|
||||
bool use_opensubdiv)
|
||||
{
|
||||
/* some useful defines to detect GPU type */
|
||||
if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_ANY, GPU_DRIVER_ANY)) {
|
||||
@@ -244,11 +245,12 @@ static void gpu_shader_standard_defines(char defines[MAX_DEFINE_LENGTH],
|
||||
* a global typedef which we don't have better place to define
|
||||
* in yet.
|
||||
*/
|
||||
strcat(defines, "struct VertexData {\n"
|
||||
" vec4 position;\n"
|
||||
" vec3 normal;\n"
|
||||
" vec2 uv;"
|
||||
"};\n");
|
||||
strcat(defines,
|
||||
"struct VertexData {\n"
|
||||
" vec4 position;\n"
|
||||
" vec3 normal;\n"
|
||||
" vec2 uv;"
|
||||
"};\n");
|
||||
}
|
||||
#else
|
||||
UNUSED_VARS(use_opensubdiv);
|
||||
@@ -257,21 +259,23 @@ static void gpu_shader_standard_defines(char defines[MAX_DEFINE_LENGTH],
|
||||
return;
|
||||
}
|
||||
|
||||
GPUShader *GPU_shader_create(const char *vertexcode,
|
||||
const char *fragcode,
|
||||
const char *geocode,
|
||||
const char *libcode,
|
||||
const char *defines)
|
||||
GPUShader *GPU_shader_create(
|
||||
const char *vertexcode,
|
||||
const char *fragcode,
|
||||
const char *geocode,
|
||||
const char *libcode,
|
||||
const char *defines)
|
||||
{
|
||||
return GPU_shader_create_ex(vertexcode,
|
||||
fragcode,
|
||||
geocode,
|
||||
libcode,
|
||||
defines,
|
||||
GPU_SHADER_FLAGS_NONE,
|
||||
GPU_SHADER_TFB_NONE,
|
||||
NULL,
|
||||
0);
|
||||
return GPU_shader_create_ex(
|
||||
vertexcode,
|
||||
fragcode,
|
||||
geocode,
|
||||
libcode,
|
||||
defines,
|
||||
GPU_SHADER_FLAGS_NONE,
|
||||
GPU_SHADER_TFB_NONE,
|
||||
NULL,
|
||||
0);
|
||||
}
|
||||
|
||||
#define DEBUG_SHADER_NONE ""
|
||||
@@ -321,15 +325,16 @@ static void gpu_dump_shaders(const char **code, const int num_shaders, const cha
|
||||
printf("Shader file written to disk: %s\n", shader_path);
|
||||
}
|
||||
|
||||
GPUShader *GPU_shader_create_ex(const char *vertexcode,
|
||||
const char *fragcode,
|
||||
const char *geocode,
|
||||
const char *libcode,
|
||||
const char *defines,
|
||||
const int flags,
|
||||
const GPUShaderTFBType tf_type,
|
||||
const char **tf_names,
|
||||
const int tf_count)
|
||||
GPUShader *GPU_shader_create_ex(
|
||||
const char *vertexcode,
|
||||
const char *fragcode,
|
||||
const char *geocode,
|
||||
const char *libcode,
|
||||
const char *defines,
|
||||
const int flags,
|
||||
const GPUShaderTFBType tf_type,
|
||||
const char **tf_names,
|
||||
const int tf_count)
|
||||
{
|
||||
#ifdef WITH_OPENSUBDIV
|
||||
bool use_opensubdiv = (flags & GPU_SHADER_FLAGS_SPECIAL_OPENSUBDIV) != 0;
|
||||
@@ -366,8 +371,9 @@ GPUShader *GPU_shader_create_ex(const char *vertexcode,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
gpu_shader_standard_defines(standard_defines,
|
||||
use_opensubdiv);
|
||||
gpu_shader_standard_defines(
|
||||
standard_defines,
|
||||
use_opensubdiv);
|
||||
gpu_shader_standard_extensions(standard_extensions);
|
||||
|
||||
if (vertexcode) {
|
||||
@@ -410,12 +416,13 @@ GPUShader *GPU_shader_create_ex(const char *vertexcode,
|
||||
#ifdef WITH_OPENSUBDIV
|
||||
/* TODO(sergey): Move to fragment shader source code generation. */
|
||||
if (use_opensubdiv) {
|
||||
source[num_source++] =
|
||||
source[num_source++] = (
|
||||
"#ifdef USE_OPENSUBDIV\n"
|
||||
"in block {\n"
|
||||
" VertexData v;\n"
|
||||
"} inpt;\n"
|
||||
"#endif\n";
|
||||
"#endif\n"
|
||||
);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -502,13 +509,15 @@ GPUShader *GPU_shader_create_ex(const char *vertexcode,
|
||||
/* TODO(sergey): Find a better place for this. */
|
||||
if (use_opensubdiv) {
|
||||
if (GLEW_VERSION_4_1) {
|
||||
glProgramUniform1i(shader->program,
|
||||
GPU_shaderinterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location,
|
||||
30); /* GL_TEXTURE30 */
|
||||
glProgramUniform1i(
|
||||
shader->program,
|
||||
GPU_shaderinterface_uniform(shader->interface, "FVarDataOffsetBuffer")->location,
|
||||
30); /* GL_TEXTURE30 */
|
||||
|
||||
glProgramUniform1i(shader->program,
|
||||
GPU_shaderinterface_uniform(shader->interface, "FVarDataBuffer")->location,
|
||||
31); /* GL_TEXTURE31 */
|
||||
glProgramUniform1i(
|
||||
shader->program,
|
||||
GPU_shaderinterface_uniform(shader->interface, "FVarDataBuffer")->location,
|
||||
31); /* GL_TEXTURE31 */
|
||||
}
|
||||
else {
|
||||
glUseProgram(shader->program);
|
||||
@@ -903,17 +912,20 @@ GPUShader *GPU_shader_get_builtin_shader(GPUBuiltinShader shader)
|
||||
|
||||
if (shader == GPU_SHADER_EDGES_FRONT_BACK_PERSP && !GLEW_VERSION_3_2) {
|
||||
/* TODO: remove after switch to core profile (maybe) */
|
||||
static const GPUShaderStages legacy_fancy_edges =
|
||||
{ datatoc_gpu_shader_edges_front_back_persp_legacy_vert_glsl,
|
||||
datatoc_gpu_shader_flat_color_alpha_test_0_frag_glsl };
|
||||
static const GPUShaderStages legacy_fancy_edges = {
|
||||
datatoc_gpu_shader_edges_front_back_persp_legacy_vert_glsl,
|
||||
datatoc_gpu_shader_flat_color_alpha_test_0_frag_glsl,
|
||||
};
|
||||
stages = &legacy_fancy_edges;
|
||||
}
|
||||
|
||||
if (shader == GPU_SHADER_3D_LINE_DASHED_UNIFORM_COLOR && !GLEW_VERSION_3_2) {
|
||||
/* Dashed need geometry shader, which are not supported by legacy OpenGL, fallback to solid lines. */
|
||||
/* TODO: remove after switch to core profile (maybe) */
|
||||
static const GPUShaderStages legacy_dashed_lines = { datatoc_gpu_shader_3D_line_dashed_uniform_color_legacy_vert_glsl,
|
||||
datatoc_gpu_shader_2D_line_dashed_frag_glsl };
|
||||
static const GPUShaderStages legacy_dashed_lines = {
|
||||
datatoc_gpu_shader_3D_line_dashed_uniform_color_legacy_vert_glsl,
|
||||
datatoc_gpu_shader_2D_line_dashed_frag_glsl,
|
||||
};
|
||||
stages = &legacy_dashed_lines;
|
||||
}
|
||||
|
||||
|
||||
@@ -42,9 +42,9 @@
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
static const char* BuiltinUniform_name(GPUUniformBuiltin u)
|
||||
static const char *BuiltinUniform_name(GPUUniformBuiltin u)
|
||||
{
|
||||
static const char* names[] = {
|
||||
static const char *names[] = {
|
||||
[GPU_UNIFORM_NONE] = NULL,
|
||||
|
||||
[GPU_UNIFORM_MODEL] = "ModelMatrix",
|
||||
@@ -76,7 +76,7 @@ static const char* BuiltinUniform_name(GPUUniformBuiltin u)
|
||||
return names[u];
|
||||
}
|
||||
|
||||
GPU_INLINE bool match(const char* a, const char* b)
|
||||
GPU_INLINE bool match(const char *a, const char *b)
|
||||
{
|
||||
return strcmp(a, b) == 0;
|
||||
}
|
||||
@@ -90,28 +90,31 @@ GPU_INLINE uint hash_string(const char *str)
|
||||
return i;
|
||||
}
|
||||
|
||||
GPU_INLINE void set_input_name(GPUShaderInterface* shaderface, GPUShaderInput* input,
|
||||
const char* name, uint32_t name_len)
|
||||
GPU_INLINE void set_input_name(
|
||||
GPUShaderInterface *shaderface, GPUShaderInput *input,
|
||||
const char *name, uint32_t name_len)
|
||||
{
|
||||
input->name_offset = shaderface->name_buffer_offset;
|
||||
input->name_hash = hash_string(name);
|
||||
shaderface->name_buffer_offset += name_len + 1; /* include NULL terminator */
|
||||
}
|
||||
|
||||
GPU_INLINE void shader_input_to_bucket(GPUShaderInput* input,
|
||||
GPUShaderInput* buckets[GPU_NUM_SHADERINTERFACE_BUCKETS])
|
||||
GPU_INLINE void shader_input_to_bucket(
|
||||
GPUShaderInput *input,
|
||||
GPUShaderInput *buckets[GPU_NUM_SHADERINTERFACE_BUCKETS])
|
||||
{
|
||||
const uint bucket_index = input->name_hash % GPU_NUM_SHADERINTERFACE_BUCKETS;
|
||||
input->next = buckets[bucket_index];
|
||||
buckets[bucket_index] = input;
|
||||
}
|
||||
|
||||
GPU_INLINE const GPUShaderInput* buckets_lookup(GPUShaderInput* const buckets[GPU_NUM_SHADERINTERFACE_BUCKETS],
|
||||
const char *name_buffer, const char *name)
|
||||
GPU_INLINE const GPUShaderInput *buckets_lookup(
|
||||
GPUShaderInput *const buckets[GPU_NUM_SHADERINTERFACE_BUCKETS],
|
||||
const char *name_buffer, const char *name)
|
||||
{
|
||||
const uint name_hash = hash_string(name);
|
||||
const uint bucket_index = name_hash % GPU_NUM_SHADERINTERFACE_BUCKETS;
|
||||
const GPUShaderInput* input = buckets[bucket_index];
|
||||
const GPUShaderInput *input = buckets[bucket_index];
|
||||
if (input == NULL) {
|
||||
/* Requested uniform is not found at all. */
|
||||
return NULL;
|
||||
@@ -129,7 +132,7 @@ GPU_INLINE const GPUShaderInput* buckets_lookup(GPUShaderInput* const buckets[GP
|
||||
return NULL;
|
||||
}
|
||||
/* Work through possible collisions. */
|
||||
const GPUShaderInput* next = input;
|
||||
const GPUShaderInput *next = input;
|
||||
while (next != NULL) {
|
||||
input = next;
|
||||
next = input->next;
|
||||
@@ -143,7 +146,7 @@ GPU_INLINE const GPUShaderInput* buckets_lookup(GPUShaderInput* const buckets[GP
|
||||
return NULL; /* not found */
|
||||
}
|
||||
|
||||
GPU_INLINE void buckets_free(GPUShaderInput* buckets[GPU_NUM_SHADERINTERFACE_BUCKETS])
|
||||
GPU_INLINE void buckets_free(GPUShaderInput *buckets[GPU_NUM_SHADERINTERFACE_BUCKETS])
|
||||
{
|
||||
for (uint bucket_index = 0; bucket_index < GPU_NUM_SHADERINTERFACE_BUCKETS; ++bucket_index) {
|
||||
GPUShaderInput *input = buckets[bucket_index];
|
||||
@@ -155,13 +158,13 @@ GPU_INLINE void buckets_free(GPUShaderInput* buckets[GPU_NUM_SHADERINTERFACE_BUC
|
||||
}
|
||||
}
|
||||
|
||||
static bool setup_builtin_uniform(GPUShaderInput* input, const char* name)
|
||||
static bool setup_builtin_uniform(GPUShaderInput *input, const char *name)
|
||||
{
|
||||
/* TODO: reject DOUBLE, IMAGE, ATOMIC_COUNTER gl_types */
|
||||
|
||||
/* detect built-in uniforms (name must match) */
|
||||
for (GPUUniformBuiltin u = GPU_UNIFORM_NONE + 1; u < GPU_UNIFORM_CUSTOM; ++u) {
|
||||
const char* builtin_name = BuiltinUniform_name(u);
|
||||
const char *builtin_name = BuiltinUniform_name(u);
|
||||
if (match(name, builtin_name)) {
|
||||
input->builtin_type = u;
|
||||
return true;
|
||||
@@ -171,15 +174,15 @@ static bool setup_builtin_uniform(GPUShaderInput* input, const char* name)
|
||||
return false;
|
||||
}
|
||||
|
||||
static const GPUShaderInput* add_uniform(GPUShaderInterface* shaderface, const char* name)
|
||||
static const GPUShaderInput *add_uniform(GPUShaderInterface *shaderface, const char *name)
|
||||
{
|
||||
GPUShaderInput* input = malloc(sizeof(GPUShaderInput));
|
||||
GPUShaderInput *input = malloc(sizeof(GPUShaderInput));
|
||||
|
||||
input->location = glGetUniformLocation(shaderface->program, name);
|
||||
|
||||
uint name_len = strlen(name);
|
||||
shaderface->name_buffer = realloc(shaderface->name_buffer, shaderface->name_buffer_offset + name_len + 1); /* include NULL terminator */
|
||||
char* name_buffer = shaderface->name_buffer + shaderface->name_buffer_offset;
|
||||
char *name_buffer = shaderface->name_buffer + shaderface->name_buffer_offset;
|
||||
strcpy(name_buffer, name);
|
||||
|
||||
set_input_name(shaderface, input, name, name_len);
|
||||
@@ -192,17 +195,18 @@ static const GPUShaderInput* add_uniform(GPUShaderInterface* shaderface, const c
|
||||
shaderface->builtin_uniforms[input->builtin_type] = input;
|
||||
}
|
||||
#if DEBUG_SHADER_INTERFACE
|
||||
printf("GPUShaderInterface %p, program %d, uniform[] '%s' at location %d\n", shaderface,
|
||||
shaderface->program,
|
||||
name,
|
||||
input->location);
|
||||
printf("GPUShaderInterface %p, program %d, uniform[] '%s' at location %d\n",
|
||||
shaderface,
|
||||
shaderface->program,
|
||||
name,
|
||||
input->location);
|
||||
#endif
|
||||
return input;
|
||||
}
|
||||
|
||||
GPUShaderInterface* GPU_shaderinterface_create(int32_t program)
|
||||
GPUShaderInterface *GPU_shaderinterface_create(int32_t program)
|
||||
{
|
||||
GPUShaderInterface* shaderface = calloc(1, sizeof(GPUShaderInterface));
|
||||
GPUShaderInterface *shaderface = calloc(1, sizeof(GPUShaderInterface));
|
||||
shaderface->program = program;
|
||||
|
||||
#if DEBUG_SHADER_INTERFACE
|
||||
@@ -223,16 +227,16 @@ GPUShaderInterface* GPU_shaderinterface_create(int32_t program)
|
||||
|
||||
/* Attributes */
|
||||
for (uint32_t i = 0; i < attr_len; ++i) {
|
||||
GPUShaderInput* input = malloc(sizeof(GPUShaderInput));
|
||||
GPUShaderInput *input = malloc(sizeof(GPUShaderInput));
|
||||
GLsizei remaining_buffer = name_buffer_len - shaderface->name_buffer_offset;
|
||||
char* name = shaderface->name_buffer + shaderface->name_buffer_offset;
|
||||
char *name = shaderface->name_buffer + shaderface->name_buffer_offset;
|
||||
GLsizei name_len = 0;
|
||||
|
||||
glGetActiveAttrib(program, i, remaining_buffer, &name_len, &input->size, &input->gl_type, name);
|
||||
|
||||
/* remove "[0]" from array name */
|
||||
if (name[name_len-1] == ']') {
|
||||
name[name_len-3] = '\0';
|
||||
if (name[name_len - 1] == ']') {
|
||||
name[name_len - 3] = '\0';
|
||||
name_len -= 3;
|
||||
}
|
||||
|
||||
@@ -250,9 +254,9 @@ GPUShaderInterface* GPU_shaderinterface_create(int32_t program)
|
||||
}
|
||||
/* Uniform Blocks */
|
||||
for (uint32_t i = 0; i < ubo_len; ++i) {
|
||||
GPUShaderInput* input = malloc(sizeof(GPUShaderInput));
|
||||
GPUShaderInput *input = malloc(sizeof(GPUShaderInput));
|
||||
GLsizei remaining_buffer = name_buffer_len - shaderface->name_buffer_offset;
|
||||
char* name = shaderface->name_buffer + shaderface->name_buffer_offset;
|
||||
char *name = shaderface->name_buffer + shaderface->name_buffer_offset;
|
||||
GLsizei name_len = 0;
|
||||
|
||||
glGetActiveUniformBlockName(program, i, remaining_buffer, &name_len, name);
|
||||
@@ -269,19 +273,19 @@ GPUShaderInterface* GPU_shaderinterface_create(int32_t program)
|
||||
}
|
||||
/* Builtin Uniforms */
|
||||
for (GPUUniformBuiltin u = GPU_UNIFORM_NONE + 1; u < GPU_UNIFORM_CUSTOM; ++u) {
|
||||
const char* builtin_name = BuiltinUniform_name(u);
|
||||
const char *builtin_name = BuiltinUniform_name(u);
|
||||
if (glGetUniformLocation(program, builtin_name) != -1) {
|
||||
add_uniform((GPUShaderInterface*)shaderface, builtin_name);
|
||||
add_uniform((GPUShaderInterface *)shaderface, builtin_name);
|
||||
}
|
||||
}
|
||||
/* Batches ref buffer */
|
||||
shaderface->batches_len = GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
|
||||
shaderface->batches = calloc(shaderface->batches_len, sizeof(GPUBatch*));
|
||||
shaderface->batches = calloc(shaderface->batches_len, sizeof(GPUBatch *));
|
||||
|
||||
return shaderface;
|
||||
}
|
||||
|
||||
void GPU_shaderinterface_discard(GPUShaderInterface* shaderface)
|
||||
void GPU_shaderinterface_discard(GPUShaderInterface *shaderface)
|
||||
{
|
||||
/* Free memory used by buckets and has entries. */
|
||||
buckets_free(shaderface->uniform_buckets);
|
||||
@@ -300,19 +304,19 @@ void GPU_shaderinterface_discard(GPUShaderInterface* shaderface)
|
||||
free(shaderface);
|
||||
}
|
||||
|
||||
const GPUShaderInput* GPU_shaderinterface_uniform(const GPUShaderInterface* shaderface, const char* name)
|
||||
const GPUShaderInput *GPU_shaderinterface_uniform(const GPUShaderInterface *shaderface, const char *name)
|
||||
{
|
||||
/* TODO: Warn if we find a matching builtin, since these can be looked up much quicker. */
|
||||
const GPUShaderInput* input = buckets_lookup(shaderface->uniform_buckets, shaderface->name_buffer, name);
|
||||
const GPUShaderInput *input = buckets_lookup(shaderface->uniform_buckets, shaderface->name_buffer, name);
|
||||
/* If input is not found add it so it's found next time. */
|
||||
if (input == NULL) {
|
||||
input = add_uniform((GPUShaderInterface*)shaderface, name);
|
||||
input = add_uniform((GPUShaderInterface *)shaderface, name);
|
||||
}
|
||||
return (input->location != -1) ? input : NULL;
|
||||
}
|
||||
|
||||
const GPUShaderInput* GPU_shaderinterface_uniform_builtin(
|
||||
const GPUShaderInterface* shaderface, GPUUniformBuiltin builtin)
|
||||
const GPUShaderInput *GPU_shaderinterface_uniform_builtin(
|
||||
const GPUShaderInterface *shaderface, GPUUniformBuiltin builtin)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(builtin != GPU_UNIFORM_NONE);
|
||||
@@ -322,17 +326,17 @@ const GPUShaderInput* GPU_shaderinterface_uniform_builtin(
|
||||
return shaderface->builtin_uniforms[builtin];
|
||||
}
|
||||
|
||||
const GPUShaderInput* GPU_shaderinterface_ubo(const GPUShaderInterface* shaderface, const char* name)
|
||||
const GPUShaderInput *GPU_shaderinterface_ubo(const GPUShaderInterface *shaderface, const char *name)
|
||||
{
|
||||
return buckets_lookup(shaderface->ubo_buckets, shaderface->name_buffer, name);
|
||||
}
|
||||
|
||||
const GPUShaderInput* GPU_shaderinterface_attr(const GPUShaderInterface* shaderface, const char* name)
|
||||
const GPUShaderInput *GPU_shaderinterface_attr(const GPUShaderInterface *shaderface, const char *name)
|
||||
{
|
||||
return buckets_lookup(shaderface->attrib_buckets, shaderface->name_buffer, name);
|
||||
}
|
||||
|
||||
void GPU_shaderinterface_add_batch_ref(GPUShaderInterface* shaderface, GPUBatch* batch)
|
||||
void GPU_shaderinterface_add_batch_ref(GPUShaderInterface *shaderface, GPUBatch *batch)
|
||||
{
|
||||
int i; /* find first unused slot */
|
||||
for (i = 0; i < shaderface->batches_len; ++i) {
|
||||
@@ -344,13 +348,13 @@ void GPU_shaderinterface_add_batch_ref(GPUShaderInterface* shaderface, GPUBatch*
|
||||
/* Not enough place, realloc the array. */
|
||||
i = shaderface->batches_len;
|
||||
shaderface->batches_len += GPU_SHADERINTERFACE_REF_ALLOC_COUNT;
|
||||
shaderface->batches = realloc(shaderface->batches, sizeof(GPUBatch*) * shaderface->batches_len);
|
||||
memset(shaderface->batches + i, 0, sizeof(GPUBatch*) * GPU_SHADERINTERFACE_REF_ALLOC_COUNT);
|
||||
shaderface->batches = realloc(shaderface->batches, sizeof(GPUBatch *) * shaderface->batches_len);
|
||||
memset(shaderface->batches + i, 0, sizeof(GPUBatch *) * GPU_SHADERINTERFACE_REF_ALLOC_COUNT);
|
||||
}
|
||||
shaderface->batches[i] = batch;
|
||||
}
|
||||
|
||||
void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface* shaderface, GPUBatch* batch)
|
||||
void GPU_shaderinterface_remove_batch_ref(GPUShaderInterface *shaderface, GPUBatch *batch)
|
||||
{
|
||||
for (int i = 0; i < shaderface->batches_len; ++i) {
|
||||
if (shaderface->batches[i] == batch) {
|
||||
|
||||
@@ -66,10 +66,11 @@ void GPU_blend_set_func_separate(
|
||||
GPUBlendFunction src_rgb, GPUBlendFunction dst_rgb,
|
||||
GPUBlendFunction src_alpha, GPUBlendFunction dst_alpha)
|
||||
{
|
||||
glBlendFuncSeparate(gpu_get_gl_blendfunction(src_rgb),
|
||||
gpu_get_gl_blendfunction(dst_rgb),
|
||||
gpu_get_gl_blendfunction(src_alpha),
|
||||
gpu_get_gl_blendfunction(dst_alpha));
|
||||
glBlendFuncSeparate(
|
||||
gpu_get_gl_blendfunction(src_rgb),
|
||||
gpu_get_gl_blendfunction(dst_rgb),
|
||||
gpu_get_gl_blendfunction(src_alpha),
|
||||
gpu_get_gl_blendfunction(dst_alpha));
|
||||
}
|
||||
|
||||
void GPU_depth_test(bool enable)
|
||||
|
||||
@@ -160,9 +160,10 @@ static int gpu_get_component_count(GPUTextureFormat format)
|
||||
/* Definitely not complete, edit according to the gl specification. */
|
||||
static void gpu_validate_data_format(GPUTextureFormat tex_format, GPUDataFormat data_format)
|
||||
{
|
||||
if (ELEM(tex_format, GPU_DEPTH_COMPONENT24,
|
||||
GPU_DEPTH_COMPONENT16,
|
||||
GPU_DEPTH_COMPONENT32F))
|
||||
if (ELEM(tex_format,
|
||||
GPU_DEPTH_COMPONENT24,
|
||||
GPU_DEPTH_COMPONENT16,
|
||||
GPU_DEPTH_COMPONENT32F))
|
||||
{
|
||||
BLI_assert(data_format == GPU_DATA_FLOAT);
|
||||
}
|
||||
@@ -196,9 +197,10 @@ static void gpu_validate_data_format(GPUTextureFormat tex_format, GPUDataFormat
|
||||
|
||||
static GPUDataFormat gpu_get_data_format_from_tex_format(GPUTextureFormat tex_format)
|
||||
{
|
||||
if (ELEM(tex_format, GPU_DEPTH_COMPONENT24,
|
||||
GPU_DEPTH_COMPONENT16,
|
||||
GPU_DEPTH_COMPONENT32F))
|
||||
if (ELEM(tex_format,
|
||||
GPU_DEPTH_COMPONENT24,
|
||||
GPU_DEPTH_COMPONENT16,
|
||||
GPU_DEPTH_COMPONENT32F))
|
||||
{
|
||||
return GPU_DATA_FLOAT;
|
||||
}
|
||||
@@ -232,9 +234,10 @@ static GPUDataFormat gpu_get_data_format_from_tex_format(GPUTextureFormat tex_fo
|
||||
/* Definitely not complete, edit according to the gl specification. */
|
||||
static GLenum gpu_get_gl_dataformat(GPUTextureFormat data_type, GPUTextureFormatFlag *format_flag)
|
||||
{
|
||||
if (ELEM(data_type, GPU_DEPTH_COMPONENT24,
|
||||
GPU_DEPTH_COMPONENT16,
|
||||
GPU_DEPTH_COMPONENT32F))
|
||||
if (ELEM(data_type,
|
||||
GPU_DEPTH_COMPONENT24,
|
||||
GPU_DEPTH_COMPONENT16,
|
||||
GPU_DEPTH_COMPONENT32F))
|
||||
{
|
||||
*format_flag |= GPU_FORMAT_DEPTH;
|
||||
return GL_DEPTH_COMPONENT;
|
||||
@@ -1161,8 +1164,9 @@ void GPU_texture_bind(GPUTexture *tex, int number)
|
||||
if ((G.debug & G_DEBUG)) {
|
||||
for (int i = 0; i < GPU_TEX_MAX_FBO_ATTACHED; ++i) {
|
||||
if (tex->fb[i] && GPU_framebuffer_bound(tex->fb[i])) {
|
||||
fprintf(stderr, "Feedback loop warning!: Attempting to bind "
|
||||
"texture attached to current framebuffer!\n");
|
||||
fprintf(stderr,
|
||||
"Feedback loop warning!: Attempting to bind "
|
||||
"texture attached to current framebuffer!\n");
|
||||
BLI_assert(0); /* Should never happen! */
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -49,16 +49,16 @@ static GLenum convert_usage_type_to_gl(GPUUsageType type)
|
||||
return table[type];
|
||||
}
|
||||
|
||||
GPUVertBuf* GPU_vertbuf_create(GPUUsageType usage)
|
||||
GPUVertBuf *GPU_vertbuf_create(GPUUsageType usage)
|
||||
{
|
||||
GPUVertBuf* verts = malloc(sizeof(GPUVertBuf));
|
||||
GPUVertBuf *verts = malloc(sizeof(GPUVertBuf));
|
||||
GPU_vertbuf_init(verts, usage);
|
||||
return verts;
|
||||
}
|
||||
|
||||
GPUVertBuf* GPU_vertbuf_create_with_format_ex(const GPUVertFormat* format, GPUUsageType usage)
|
||||
GPUVertBuf *GPU_vertbuf_create_with_format_ex(const GPUVertFormat *format, GPUUsageType usage)
|
||||
{
|
||||
GPUVertBuf* verts = GPU_vertbuf_create(usage);
|
||||
GPUVertBuf *verts = GPU_vertbuf_create(usage);
|
||||
GPU_vertformat_copy(&verts->format, format);
|
||||
if (!format->packed) {
|
||||
VertexFormat_pack(&verts->format);
|
||||
@@ -69,14 +69,14 @@ GPUVertBuf* GPU_vertbuf_create_with_format_ex(const GPUVertFormat* format, GPUUs
|
||||
/* TODO: implement those memory savings */
|
||||
}
|
||||
|
||||
void GPU_vertbuf_init(GPUVertBuf* verts, GPUUsageType usage)
|
||||
void GPU_vertbuf_init(GPUVertBuf *verts, GPUUsageType usage)
|
||||
{
|
||||
memset(verts, 0, sizeof(GPUVertBuf));
|
||||
verts->usage = usage;
|
||||
verts->dirty = true;
|
||||
}
|
||||
|
||||
void GPU_vertbuf_init_with_format_ex(GPUVertBuf* verts, const GPUVertFormat* format, GPUUsageType usage)
|
||||
void GPU_vertbuf_init_with_format_ex(GPUVertBuf *verts, const GPUVertFormat *format, GPUUsageType usage)
|
||||
{
|
||||
GPU_vertbuf_init(verts, usage);
|
||||
GPU_vertformat_copy(&verts->format, format);
|
||||
@@ -85,7 +85,7 @@ void GPU_vertbuf_init_with_format_ex(GPUVertBuf* verts, const GPUVertFormat* for
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_vertbuf_discard(GPUVertBuf* verts)
|
||||
void GPU_vertbuf_discard(GPUVertBuf *verts)
|
||||
{
|
||||
if (verts->vbo_id) {
|
||||
GPU_buf_id_free(verts->vbo_id);
|
||||
@@ -99,15 +99,15 @@ void GPU_vertbuf_discard(GPUVertBuf* verts)
|
||||
free(verts);
|
||||
}
|
||||
|
||||
uint GPU_vertbuf_size_get(const GPUVertBuf* verts)
|
||||
uint GPU_vertbuf_size_get(const GPUVertBuf *verts)
|
||||
{
|
||||
return vertex_buffer_size(&verts->format, verts->vertex_len);
|
||||
}
|
||||
|
||||
/* create a new allocation, discarding any existing data */
|
||||
void GPU_vertbuf_data_alloc(GPUVertBuf* verts, uint v_len)
|
||||
void GPU_vertbuf_data_alloc(GPUVertBuf *verts, uint v_len)
|
||||
{
|
||||
GPUVertFormat* format = &verts->format;
|
||||
GPUVertFormat *format = &verts->format;
|
||||
if (!format->packed) {
|
||||
VertexFormat_pack(format);
|
||||
}
|
||||
@@ -133,7 +133,7 @@ void GPU_vertbuf_data_alloc(GPUVertBuf* verts, uint v_len)
|
||||
}
|
||||
|
||||
/* resize buffer keeping existing data */
|
||||
void GPU_vertbuf_data_resize(GPUVertBuf* verts, uint v_len)
|
||||
void GPU_vertbuf_data_resize(GPUVertBuf *verts, uint v_len)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(verts->data != NULL);
|
||||
@@ -152,7 +152,7 @@ void GPU_vertbuf_data_resize(GPUVertBuf* verts, uint v_len)
|
||||
/* Set vertex count but does not change allocation.
|
||||
* Only this many verts will be uploaded to the GPU and rendered.
|
||||
* This is usefull for streaming data. */
|
||||
void GPU_vertbuf_vertex_count_set(GPUVertBuf* verts, uint v_len)
|
||||
void GPU_vertbuf_vertex_count_set(GPUVertBuf *verts, uint v_len)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(verts->data != NULL); /* only for dynamic data */
|
||||
@@ -166,10 +166,10 @@ void GPU_vertbuf_vertex_count_set(GPUVertBuf* verts, uint v_len)
|
||||
verts->vertex_len = v_len;
|
||||
}
|
||||
|
||||
void GPU_vertbuf_attr_set(GPUVertBuf* verts, uint a_idx, uint v_idx, const void* data)
|
||||
void GPU_vertbuf_attr_set(GPUVertBuf *verts, uint a_idx, uint v_idx, const void *data)
|
||||
{
|
||||
const GPUVertFormat* format = &verts->format;
|
||||
const GPUVertAttr* a = format->attribs + a_idx;
|
||||
const GPUVertFormat *format = &verts->format;
|
||||
const GPUVertAttr *a = format->attribs + a_idx;
|
||||
|
||||
#if TRUST_NO_ONE
|
||||
assert(a_idx < format->attr_len);
|
||||
@@ -177,13 +177,13 @@ void GPU_vertbuf_attr_set(GPUVertBuf* verts, uint a_idx, uint v_idx, const void*
|
||||
assert(verts->data != NULL);
|
||||
#endif
|
||||
verts->dirty = true;
|
||||
memcpy((GLubyte*)verts->data + a->offset + v_idx * format->stride, data, a->sz);
|
||||
memcpy((GLubyte *)verts->data + a->offset + v_idx * format->stride, data, a->sz);
|
||||
}
|
||||
|
||||
void GPU_vertbuf_attr_fill(GPUVertBuf* verts, uint a_idx, const void* data)
|
||||
void GPU_vertbuf_attr_fill(GPUVertBuf *verts, uint a_idx, const void *data)
|
||||
{
|
||||
const GPUVertFormat* format = &verts->format;
|
||||
const GPUVertAttr* a = format->attribs + a_idx;
|
||||
const GPUVertFormat *format = &verts->format;
|
||||
const GPUVertAttr *a = format->attribs + a_idx;
|
||||
|
||||
#if TRUST_NO_ONE
|
||||
assert(a_idx < format->attr_len);
|
||||
@@ -193,10 +193,10 @@ void GPU_vertbuf_attr_fill(GPUVertBuf* verts, uint a_idx, const void* data)
|
||||
GPU_vertbuf_attr_fill_stride(verts, a_idx, stride, data);
|
||||
}
|
||||
|
||||
void GPU_vertbuf_attr_fill_stride(GPUVertBuf* verts, uint a_idx, uint stride, const void* data)
|
||||
void GPU_vertbuf_attr_fill_stride(GPUVertBuf *verts, uint a_idx, uint stride, const void *data)
|
||||
{
|
||||
const GPUVertFormat* format = &verts->format;
|
||||
const GPUVertAttr* a = format->attribs + a_idx;
|
||||
const GPUVertFormat *format = &verts->format;
|
||||
const GPUVertAttr *a = format->attribs + a_idx;
|
||||
|
||||
#if TRUST_NO_ONE
|
||||
assert(a_idx < format->attr_len);
|
||||
@@ -212,15 +212,15 @@ void GPU_vertbuf_attr_fill_stride(GPUVertBuf* verts, uint a_idx, uint stride, co
|
||||
else {
|
||||
/* we must copy it per vertex */
|
||||
for (uint v = 0; v < vertex_len; ++v) {
|
||||
memcpy((GLubyte*)verts->data + a->offset + v * format->stride, (const GLubyte*)data + v * stride, a->sz);
|
||||
memcpy((GLubyte *)verts->data + a->offset + v * format->stride, (const GLubyte *)data + v * stride, a->sz);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf* verts, uint a_idx, GPUVertBufRaw *access)
|
||||
void GPU_vertbuf_attr_get_raw_data(GPUVertBuf *verts, uint a_idx, GPUVertBufRaw *access)
|
||||
{
|
||||
const GPUVertFormat* format = &verts->format;
|
||||
const GPUVertAttr* a = format->attribs + a_idx;
|
||||
const GPUVertFormat *format = &verts->format;
|
||||
const GPUVertAttr *a = format->attribs + a_idx;
|
||||
|
||||
#if TRUST_NO_ONE
|
||||
assert(a_idx < format->attr_len);
|
||||
@@ -231,14 +231,14 @@ void GPU_vertbuf_attr_get_raw_data(GPUVertBuf* verts, uint a_idx, GPUVertBufRaw
|
||||
|
||||
access->size = a->sz;
|
||||
access->stride = format->stride;
|
||||
access->data = (GLubyte*)verts->data + a->offset;
|
||||
access->data = (GLubyte *)verts->data + a->offset;
|
||||
access->data_init = access->data;
|
||||
#if TRUST_NO_ONE
|
||||
access->_data_end = access->data_init + (size_t)(verts->vertex_alloc * format->stride);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void VertBuffer_upload_data(GPUVertBuf* verts)
|
||||
static void VertBuffer_upload_data(GPUVertBuf *verts)
|
||||
{
|
||||
uint buffer_sz = GPU_vertbuf_size_get(verts);
|
||||
|
||||
@@ -254,7 +254,7 @@ static void VertBuffer_upload_data(GPUVertBuf* verts)
|
||||
verts->dirty = false;
|
||||
}
|
||||
|
||||
void GPU_vertbuf_use(GPUVertBuf* verts)
|
||||
void GPU_vertbuf_use(GPUVertBuf *verts)
|
||||
{
|
||||
glBindBuffer(GL_ARRAY_BUFFER, verts->vbo_id);
|
||||
if (verts->dirty) {
|
||||
|
||||
@@ -40,7 +40,7 @@
|
||||
#include <stdio.h>
|
||||
#endif
|
||||
|
||||
void GPU_vertformat_clear(GPUVertFormat* format)
|
||||
void GPU_vertformat_clear(GPUVertFormat *format)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
memset(format, 0, sizeof(GPUVertFormat));
|
||||
@@ -56,7 +56,7 @@ void GPU_vertformat_clear(GPUVertFormat* format)
|
||||
#endif
|
||||
}
|
||||
|
||||
void GPU_vertformat_copy(GPUVertFormat* dest, const GPUVertFormat* src)
|
||||
void GPU_vertformat_copy(GPUVertFormat *dest, const GPUVertFormat *src)
|
||||
{
|
||||
/* copy regular struct fields */
|
||||
memcpy(dest, src, sizeof(GPUVertFormat));
|
||||
@@ -90,7 +90,7 @@ static unsigned comp_sz(GPUVertCompType type)
|
||||
#if TRUST_NO_ONE
|
||||
assert(type <= GPU_COMP_F32); /* other types have irregular sizes (not bytes) */
|
||||
#endif
|
||||
const GLubyte sizes[] = {1,1,2,2,4,4,4};
|
||||
const GLubyte sizes[] = {1, 1, 2, 2, 4, 4, 4};
|
||||
return sizes[type];
|
||||
}
|
||||
|
||||
@@ -116,7 +116,7 @@ static unsigned attrib_align(const GPUVertAttr *a)
|
||||
}
|
||||
}
|
||||
|
||||
unsigned vertex_buffer_size(const GPUVertFormat* format, unsigned vertex_len)
|
||||
unsigned vertex_buffer_size(const GPUVertFormat *format, unsigned vertex_len)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(format->packed && format->stride > 0);
|
||||
@@ -124,10 +124,10 @@ unsigned vertex_buffer_size(const GPUVertFormat* format, unsigned vertex_len)
|
||||
return format->stride * vertex_len;
|
||||
}
|
||||
|
||||
static const char* copy_attrib_name(GPUVertFormat* format, const char* name)
|
||||
static const char *copy_attrib_name(GPUVertFormat *format, const char *name)
|
||||
{
|
||||
/* strncpy does 110% of what we need; let's do exactly 100% */
|
||||
char* name_copy = format->names + format->name_offset;
|
||||
char *name_copy = format->names + format->name_offset;
|
||||
unsigned available = GPU_VERT_ATTR_NAMES_BUF_LEN - format->name_offset;
|
||||
bool terminated = false;
|
||||
|
||||
@@ -149,7 +149,9 @@ static const char* copy_attrib_name(GPUVertFormat* format, const char* name)
|
||||
return name_copy;
|
||||
}
|
||||
|
||||
unsigned GPU_vertformat_attr_add(GPUVertFormat* format, const char* name, GPUVertCompType comp_type, unsigned comp_len, GPUVertFetchMode fetch_mode)
|
||||
unsigned GPU_vertformat_attr_add(
|
||||
GPUVertFormat *format, const char *name,
|
||||
GPUVertCompType comp_type, unsigned comp_len, GPUVertFetchMode fetch_mode)
|
||||
{
|
||||
#if TRUST_NO_ONE
|
||||
assert(format->name_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */
|
||||
@@ -178,7 +180,7 @@ unsigned GPU_vertformat_attr_add(GPUVertFormat* format, const char* name, GPUVer
|
||||
format->name_len++; /* multiname support */
|
||||
|
||||
const unsigned attrib_id = format->attr_len++;
|
||||
GPUVertAttr* attrib = format->attribs + attrib_id;
|
||||
GPUVertAttr *attrib = format->attribs + attrib_id;
|
||||
|
||||
attrib->name[attrib->name_len++] = copy_attrib_name(format, name);
|
||||
attrib->comp_type = comp_type;
|
||||
@@ -191,9 +193,9 @@ unsigned GPU_vertformat_attr_add(GPUVertFormat* format, const char* name, GPUVer
|
||||
return attrib_id;
|
||||
}
|
||||
|
||||
void GPU_vertformat_alias_add(GPUVertFormat* format, const char* alias)
|
||||
void GPU_vertformat_alias_add(GPUVertFormat *format, const char *alias)
|
||||
{
|
||||
GPUVertAttr* attrib = format->attribs + (format->attr_len - 1);
|
||||
GPUVertAttr *attrib = format->attribs + (format->attr_len - 1);
|
||||
#if TRUST_NO_ONE
|
||||
assert(format->name_len < GPU_VERT_ATTR_MAX_LEN); /* there's room for more */
|
||||
assert(attrib->name_len < GPU_VERT_ATTR_MAX_NAMES);
|
||||
@@ -221,7 +223,7 @@ static void show_pack(unsigned a_idx, unsigned sz, unsigned pad)
|
||||
}
|
||||
#endif
|
||||
|
||||
void VertexFormat_pack(GPUVertFormat* format)
|
||||
void VertexFormat_pack(GPUVertFormat *format)
|
||||
{
|
||||
/* For now, attributes are packed in the order they were added,
|
||||
* making sure each attrib is naturally aligned (add padding where necessary)
|
||||
@@ -231,7 +233,7 @@ void VertexFormat_pack(GPUVertFormat* format)
|
||||
/* TODO: realloc just enough to hold the final combo string. And just enough to
|
||||
* hold used attribs, not all 16. */
|
||||
|
||||
GPUVertAttr* a0 = format->attribs + 0;
|
||||
GPUVertAttr *a0 = format->attribs + 0;
|
||||
a0->offset = 0;
|
||||
unsigned offset = a0->sz;
|
||||
|
||||
@@ -240,7 +242,7 @@ void VertexFormat_pack(GPUVertFormat* format)
|
||||
#endif
|
||||
|
||||
for (unsigned a_idx = 1; a_idx < format->attr_len; ++a_idx) {
|
||||
GPUVertAttr* a = format->attribs + a_idx;
|
||||
GPUVertAttr *a = format->attribs + a_idx;
|
||||
unsigned mid_padding = padding(offset, attrib_align(a));
|
||||
offset += mid_padding;
|
||||
a->offset = offset;
|
||||
|
||||
Reference in New Issue
Block a user