1
1

Compare commits

...

25 Commits

Author SHA1 Message Date
784da45955 GLDebug: Trim NVidia debug output
These buffer detailed infos are not needed unless going deep into
perf. profiling, in which case you can still disable this compile option.

This makes user report log much more readable.
2020-09-05 16:47:31 +02:00
263ee86fb4 GLTexture: Fix texture creation when using direct state access
There was 2 errors:
- A texture object must have been bound to target_ before use with DSA funcs
- We need to use glTextureSubImage for DSA updating
2020-09-05 16:28:07 +02:00
Clément
d1479c437b GLTexture: Add Feedback loop check
The check is better than before as we take into
consideration the attached mip level.
2020-09-05 14:48:44 +02:00
Clément
0038b598cf Cleanup: GPUFramebuffer: Remove unused variable 2020-09-05 14:08:53 +02:00
Clément
efd00731f5 GLTexture: Add direct state access support 2020-09-05 14:08:02 +02:00
Clément
219e263b59 GLState: Add texture multibind and remove redundant binds 2020-09-05 14:06:50 +02:00
Clément
5265ed7be2 Math Utils: Add bitscan 64bit version 2020-09-05 14:04:00 +02:00
5b314f884d GPUTexture: OCIO: Workaround new missing bind error checking
We cannot change the texture bind point since the interface count
4/5 textures to bind. Changing the uniform to avoid one bind
make the system thinks one texture is missing.

Avoid this by creating a dummy texture and binding it to the empty
slot.
2020-09-05 02:55:30 +02:00
af095ab71f GPUDebug: Only enable resource checking in debug mode 2020-09-05 02:55:30 +02:00
6f998edf2a Cleanup: GPUTexture: Move debug messages outside of creation function
# Conflicts:
#	source/blender/gpu/intern/gpu_framebuffer.cc
2020-09-05 02:55:22 +02:00
bd081711d6 GPUTexture: Change texture creation API
This is to modernize the API:
- Add meaningful name to all textures (except DRW textures).
- Remove unused err_out argument: only used for offscreen python.
- Add mipmap count to creation functions for future changes.
- Clarify the data usage in creation functions.

This is a cleanup commit, there is no functional change.
2020-09-05 01:58:07 +02:00
42e5de3f4d Cleanup: GPUTexture: Remove use of GPU_texture_create_nD
Use creation + update function instead.
2020-09-04 23:13:12 +02:00
fd4f766d58 GLTexture: Add validation for empty slots before drawing
This is to have better error detection in debug builds.
This is not a replacement for a full check like in renderdoc but it
might catch some issues early on.
2020-09-04 22:19:36 +02:00
922abf37b2 OCIO: Use GPUTexture instead of OpenGL calls and remove gl dependency
Simple cleanup. Part of the Vulkan Port T68990.
2020-09-04 22:18:10 +02:00
cfba534403 GPUTexture: Implement back 3D texture resize
But this time implement it outside the texture module.
This makes more sense as only the volume textures need this feature.
2020-09-04 21:10:54 +02:00
1394c0b852 BLI: Math: Add equals_v3v3_int 2020-09-04 21:07:26 +02:00
Clément
84cb571c1f Cleanup: GPUTexture: Clean framebuffer attachment 2020-09-04 16:35:02 +02:00
Clément
34921b47f7 Cleanup: GPUFrameBuffer: Use Texture class instead of gl calls 2020-09-04 13:51:25 +02:00
5f7cc8cf51 GLTexture: Add back texture proxy check
Cleanup the feature itself:
- Check 3D textures size against the correct limit.
- Add check for compressed textures.
2020-09-04 02:45:55 +02:00
ffa351d533 GPUTexture: Add support for samplers
This just add back the support.
This commit also includes a bit of cleanup.
2020-09-04 02:45:55 +02:00
e58408cbaf GPUTexture: GL Backend Isolation
This is a massive cleanup needed for vulkan support T68990. It provides:

- More meaningful enums with conversion functions.
- Less hacky supports of arrays and cubemaps (all considered layered).
- More inline with the stateless design of vulkan and modern GL.
- Methods Fallbacks are using framebuffer functions that are wrapped
  instead of implementing inside the texture module.

What is not in there:
- API change.
- Samplers support (breaks a few effects).
2020-09-04 02:45:53 +02:00
19d056cb85 GPUFramebuffer: Encapsulate single attachement clear
This is in preparation of using it to clear single texture.

Also includes minor cleanups about not using tex target in
assert and adding enum operators.
2020-09-04 02:44:33 +02:00
da306fb2fc Cleanup: EEVEE: Use correct array size and use equals_v2v2_int 2020-09-04 02:41:57 +02:00
63b6e87d98 GPUTexture: Add skeleton of the new GLTexture class 2020-09-04 02:41:53 +02:00
f7e1c007ab GPUTexture: Remove bind to edit calls
This is going to be unecessary after the GPU opengl texture backend refactor.

For now add a save/restore mechanism to leave the state untouched.

Also remove some calls where the caller would bind to particular binding
point and set the shader uniform.
2020-09-04 02:39:05 +02:00
62 changed files with 2526 additions and 2474 deletions

View File

@@ -34,8 +34,6 @@
#include <sstream>
#include <string.h>
#include "glew-mx.h"
#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable : 4251 4275)
@@ -99,7 +97,6 @@ struct OCIO_GLSLShader {
/** Uniform locations. */
GLint dither_loc;
GLint overlay_loc;
GLint overlay_tex_loc;
GLint predivide_loc;
GLint curve_mapping_loc;
GLint ubo_bind;
@@ -110,9 +107,10 @@ struct OCIO_GLSLShader {
struct OCIO_GLSLLut3d {
/** Cache IDs */
std::string cacheId;
/** OpenGL Texture handles. 0 if not allocated. */
GLuint texture;
GLuint texture_display;
/** OpenGL Texture handles. NULL if not allocated. */
GPUTexture *texture;
GPUTexture *texture_display;
GPUTexture *texture_dummy;
/** Error checking. */
bool valid;
};
@@ -123,7 +121,7 @@ struct OCIO_GLSLCurveMappping {
/** GPU Uniform Buffer handle. 0 if not allocated. */
GPUUniformBuf *buffer;
/** OpenGL Texture handles. 0 if not allocated. */
GLuint texture;
GPUTexture *texture;
/** Error checking. */
bool valid;
};
@@ -186,11 +184,10 @@ static void updateGLSLShader(OCIO_GLSLShader *shader,
NULL,
NULL,
NULL,
__func__);
"OCIOShader");
if (shader->shader) {
shader->dither_loc = GPU_shader_get_uniform(shader->shader, "dither");
shader->overlay_tex_loc = GPU_shader_get_uniform(shader->shader, "overlay_texture");
shader->overlay_loc = GPU_shader_get_uniform(shader->shader, "overlay");
shader->predivide_loc = GPU_shader_get_uniform(shader->shader, "predivide");
shader->curve_mapping_loc = GPU_shader_get_uniform(shader->shader, "curve_mapping");
@@ -202,6 +199,7 @@ static void updateGLSLShader(OCIO_GLSLShader *shader,
/* Set texture bind point uniform once. This is saved by the shader. */
GPUShader *sh = shader->shader;
GPU_shader_uniform_int(sh, GPU_shader_get_uniform(sh, "image_texture"), 0);
GPU_shader_uniform_int(sh, GPU_shader_get_uniform(sh, "overlay_texture"), 1);
GPU_shader_uniform_int(sh, GPU_shader_get_uniform(sh, "lut3d_texture"), 2);
GPU_shader_uniform_int(sh, GPU_shader_get_uniform(sh, "lut3d_display_texture"), 3);
GPU_shader_uniform_int(sh, GPU_shader_get_uniform(sh, "curve_mapping_texture"), 4);
@@ -258,24 +256,13 @@ static void updateGLSLLut3d(OCIO_GLSLLut3d *lut3d,
for (int i = 0; i < 2; i++) {
ConstProcessorRcPtr *processor = ocio_processors[i];
GLuint texture = (&lut3d->texture)[i];
GPUTexture *texture = (&lut3d->texture)[i];
(*processor)->getGpuLut3D(lut_data, *shader_desc);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_3D, texture);
glTexSubImage3D(GL_TEXTURE_3D,
0,
0,
0,
0,
LUT3D_EDGE_SIZE,
LUT3D_EDGE_SIZE,
LUT3D_EDGE_SIZE,
GL_RGB,
GL_FLOAT,
lut_data);
int offset[3] = {0, 0, 0};
int extent[3] = {LUT3D_EDGE_SIZE, LUT3D_EDGE_SIZE, LUT3D_EDGE_SIZE};
GPU_texture_update_sub(texture, GPU_DATA_FLOAT, lut_data, UNPACK3(offset), UNPACK3(extent));
}
MEM_freeN(lut_data);
@@ -295,41 +282,31 @@ static void ensureGLSLLut3d(OCIO_GLSLLut3d **lut3d_ptr,
OCIO_GLSLLut3d *lut3d = OBJECT_GUARDED_NEW(OCIO_GLSLLut3d);
glGenTextures(3, &lut3d->texture);
int extent[3] = {LUT3D_EDGE_SIZE, LUT3D_EDGE_SIZE, LUT3D_EDGE_SIZE};
for (int i = 0; i < 2; i++) {
GLuint texture = (&lut3d->texture)[i];
lut3d->texture = GPU_texture_create_3d("OCIOLut", UNPACK3(extent), 1, GPU_RGB16F, NULL);
GPU_texture_filter_mode(lut3d->texture, true);
GPU_texture_wrap_mode(lut3d->texture, false, true);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_3D, texture);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
lut3d->texture_display = GPU_texture_create_3d(
"OCIOLutDisplay", UNPACK3(extent), 1, GPU_RGB16F, NULL);
GPU_texture_filter_mode(lut3d->texture_display, true);
GPU_texture_wrap_mode(lut3d->texture_display, false, true);
glTexImage3D(GL_TEXTURE_3D,
0,
GL_RGB16F,
LUT3D_EDGE_SIZE,
LUT3D_EDGE_SIZE,
LUT3D_EDGE_SIZE,
0,
GL_RGB,
GL_FLOAT,
NULL);
}
lut3d->texture_dummy = GPU_texture_create_error(2, false);
updateGLSLLut3d(lut3d, processor_scene_to_ui, processpr_ui_to_display, shaderDesc, cache_id);
lut3d->valid = (lut3d->texture != 0);
lut3d->valid = (lut3d->texture && lut3d->texture_display);
*lut3d_ptr = lut3d;
}
static void freeGLSLLut3d(OCIO_GLSLLut3d *lut3d)
{
glDeleteTextures(1, &lut3d->texture);
GPU_texture_free(lut3d->texture);
GPU_texture_free(lut3d->texture_display);
GPU_texture_free(lut3d->texture_dummy);
OBJECT_GUARDED_DELETE(lut3d, OCIO_GLSLLut3d);
}
@@ -342,19 +319,11 @@ static void freeGLSLLut3d(OCIO_GLSLLut3d *lut3d)
static void allocateCurveMappingTexture(OCIO_GLSLCurveMappping *curvemap,
OCIO_CurveMappingSettings *curve_mapping_settings)
{
glGenTextures(1, &curvemap->texture);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_1D, curvemap->texture);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
/* Do not initialize. Only if used. */
int lut_size = curve_mapping_settings ? curve_mapping_settings->lut_size : 1;
glTexImage1D(GL_TEXTURE_1D, 0, GL_RGBA16F, lut_size, 0, GL_RGBA, GL_FLOAT, NULL);
/* Do not initialize. Only if used. */
curvemap->texture = GPU_texture_create_1d("OCIOCurveMap", lut_size, 1, GPU_RGBA16F, NULL);
GPU_texture_filter_mode(curvemap->texture, false);
GPU_texture_wrap_mode(curvemap->texture, false, true);
}
/* curve_mapping_settings can be null. In this case we alloc a dummy curvemap. */
@@ -381,7 +350,7 @@ static void ensureGLSLCurveMapping(OCIO_GLSLCurveMappping **curvemap_ptr,
static void freeGLSLCurveMapping(OCIO_GLSLCurveMappping *curvemap)
{
glDeleteTextures(1, &curvemap->texture);
GPU_texture_free(curvemap->texture);
GPU_uniformbuf_free(curvemap->buffer);
OBJECT_GUARDED_DELETE(curvemap, OCIO_GLSLCurveMappping);
@@ -400,20 +369,16 @@ static void updateGLSLCurveMapping(OCIO_GLSLCurveMappping *curvemap,
if (curvemap->cacheId == 0) {
/* This cache was previously used as dummy. Recreate the texture. */
glDeleteTextures(1, &curvemap->texture);
GPU_texture_free(curvemap->texture);
allocateCurveMappingTexture(curvemap, curve_mapping_settings);
}
/* Update texture. */
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_1D, curvemap->texture);
glTexSubImage1D(GL_TEXTURE_1D,
0,
0,
curve_mapping_settings->lut_size,
GL_RGBA,
GL_FLOAT,
curve_mapping_settings->lut);
int offset[3] = {0, 0, 0};
int extent[3] = {curve_mapping_settings->lut_size, 0, 0};
const float *pixels = curve_mapping_settings->lut;
GPU_texture_update_sub(
curvemap->texture, GPU_DATA_FLOAT, pixels, UNPACK3(offset), UNPACK3(extent));
/* Update uniforms. */
OCIO_GLSLCurveMappingParameters data;
@@ -581,16 +546,13 @@ bool OCIOImpl::setupGLSLDraw(OCIO_GLSLDrawState **state_r,
/* Bind textures to sampler units. Texture 0 is set by caller.
* Uniforms have already been set for texture bind points.*/
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_3D, shader_lut->texture);
glActiveTexture(GL_TEXTURE3);
glBindTexture(GL_TEXTURE_3D, shader_lut->texture_display);
glActiveTexture(GL_TEXTURE4);
glBindTexture(GL_TEXTURE_1D, shader_curvemap->texture);
glActiveTexture(GL_TEXTURE0);
if (!use_overlay) {
/* Avoid missing binds. */
GPU_texture_bind(shader_lut->texture_dummy, 1);
}
GPU_texture_bind(shader_lut->texture, 2);
GPU_texture_bind(shader_lut->texture_display, 3);
GPU_texture_bind(shader_curvemap->texture, 4);
/* Bind UBO. */
GPU_uniformbuf_bind(shader_curvemap->buffer, shader->ubo_bind);
@@ -599,12 +561,11 @@ bool OCIOImpl::setupGLSLDraw(OCIO_GLSLDrawState **state_r,
immBindShader(shader->shader);
/* Bind Shader and set uniforms. */
// glUseProgram(shader->program);
glUniform1f(shader->dither_loc, dither);
glUniform1i(shader->overlay_tex_loc, use_overlay ? 1 : 0);
glUniform1i(shader->overlay_loc, use_overlay);
glUniform1i(shader->predivide_loc, use_predivide);
glUniform1i(shader->curve_mapping_loc, use_curve_mapping);
// GPU_shader_bind(shader->shader);
GPU_shader_uniform_float(shader->shader, shader->dither_loc, dither);
GPU_shader_uniform_int(shader->shader, shader->overlay_loc, use_overlay);
GPU_shader_uniform_int(shader->shader, shader->predivide_loc, use_predivide);
GPU_shader_uniform_int(shader->shader, shader->curve_mapping_loc, use_curve_mapping);
return true;
}

View File

@@ -187,8 +187,6 @@ static GPUTexture *blf_batch_cache_texture_load(void)
int offset_x = bitmap_len_landed % tex_width;
int offset_y = bitmap_len_landed / tex_width;
GPU_texture_bind(gc->texture, 0);
/* TODO(germano): Update more than one row in a single call. */
while (remain) {
int remain_row = tex_width - offset_x;
@@ -229,16 +227,17 @@ void blf_batch_draw(void)
#endif
GPUTexture *texture = blf_batch_cache_texture_load();
GPU_texture_bind(texture, 0);
GPU_vertbuf_data_len_set(g_batch.verts, g_batch.glyph_len);
GPU_vertbuf_use(g_batch.verts); /* send data */
GPU_batch_program_set_builtin(g_batch.batch, GPU_SHADER_TEXT);
GPU_batch_uniform_1i(g_batch.batch, "glyph", 0);
GPU_batch_texture_bind(g_batch.batch, "glyph", texture);
GPU_batch_draw(g_batch.batch);
GPU_blend(GPU_BLEND_NONE);
GPU_texture_unbind(texture);
/* restart to 1st vertex data pointers */
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.pos_loc, &g_batch.pos_step);
GPU_vertbuf_attr_get_raw_data(g_batch.verts, g_batch.col_loc, &g_batch.col_step);

View File

@@ -508,8 +508,7 @@ void blf_glyph_render(FontBLF *font, GlyphCacheBLF *gc, GlyphBLF *g, float x, fl
if (gc->texture) {
GPU_texture_free(gc->texture);
}
gc->texture = GPU_texture_create_nD(
w, h, 0, 1, NULL, GPU_R8, GPU_DATA_UNSIGNED_BYTE, 0, false, NULL);
gc->texture = GPU_texture_create_1d_array(__func__, w, h, 1, GPU_R8, NULL);
gc->bitmap_len_landed = 0;
}

View File

@@ -95,7 +95,7 @@ static GPUTexture *gpu_texture_create_tile_mapping(Image *ima, const int multivi
tile_info[3] = tile->runtime.tilearray_size[1] / array_h;
}
GPUTexture *tex = GPU_texture_create_1d_array(width, 2, GPU_RGBA32F, data, NULL);
GPUTexture *tex = GPU_texture_create_1d_array(ima->id.name + 2, width, 2, 1, GPU_RGBA32F, data);
GPU_texture_mipmap_mode(tex, false, false);
MEM_freeN(data);
@@ -180,9 +180,7 @@ static GPUTexture *gpu_texture_create_tile_array(Image *ima, ImBuf *main_ibuf)
const bool use_high_bitdepth = (ima->flag & IMA_HIGH_BITDEPTH);
/* Create Texture without content. */
GPUTexture *tex = IMB_touch_gpu_texture(
main_ibuf, arraywidth, arrayheight, arraylayers, use_high_bitdepth);
GPU_texture_bind(tex, 0);
ima->id.name + 2, main_ibuf, arraywidth, arrayheight, arraylayers, use_high_bitdepth);
/* Upload each tile one by one. */
LISTBASE_FOREACH (ImageTile *, tile, &ima->tiles) {
@@ -225,8 +223,6 @@ static GPUTexture *gpu_texture_create_tile_array(Image *ima, ImBuf *main_ibuf)
GPU_texture_mipmap_mode(tex, false, true);
}
GPU_texture_unbind(tex);
return tex;
}
@@ -251,6 +247,7 @@ static GPUTexture **get_image_gpu_texture_ptr(Image *ima,
static GPUTexture *image_gpu_texture_error_create(eGPUTextureTarget textarget)
{
fprintf(stderr, "GPUTexture: Blender Texture Not Loaded!\n");
switch (textarget) {
case TEXTARGET_2D_ARRAY:
return GPU_texture_create_error(2, true);
@@ -320,12 +317,11 @@ static GPUTexture *image_get_gpu_texture(Image *ima,
(ima ? (ima->alpha_mode != IMA_ALPHA_STRAIGHT) : false) :
(ima ? (ima->alpha_mode == IMA_ALPHA_PREMUL) : true);
*tex = IMB_create_gpu_texture(ibuf_intern, use_high_bitdepth, store_premultiplied);
*tex = IMB_create_gpu_texture(
ima->id.name + 2, ibuf_intern, use_high_bitdepth, store_premultiplied);
if (GPU_mipmap_enabled()) {
GPU_texture_bind(*tex, 0);
GPU_texture_generate_mipmap(*tex);
GPU_texture_unbind(*tex);
if (ima) {
ima->gpuflag |= IMA_GPU_MIPMAP_COMPLETE;
}
@@ -666,8 +662,6 @@ static void gpu_texture_update_from_ibuf(
}
}
GPU_texture_bind(tex, 0);
if (scaled) {
/* Slower update where we first have to scale the input pixels. */
if (tile != NULL) {

View File

@@ -1917,6 +1917,7 @@ GPUTexture *BKE_movieclip_get_gpu_texture(MovieClip *clip, MovieClipUser *cuser)
/* check if we have a valid image buffer */
ImBuf *ibuf = BKE_movieclip_get_ibuf(clip, cuser);
if (ibuf == NULL) {
fprintf(stderr, "GPUTexture: Blender Texture Not Loaded!\n");
*tex = GPU_texture_create_error(2, false);
return *tex;
}
@@ -1924,7 +1925,7 @@ GPUTexture *BKE_movieclip_get_gpu_texture(MovieClip *clip, MovieClipUser *cuser)
/* This only means RGBA16F instead of RGBA32F. */
const bool high_bitdepth = false;
const bool store_premultiplied = ibuf->rect_float ? false : true;
*tex = IMB_create_gpu_texture(ibuf, high_bitdepth, store_premultiplied);
*tex = IMB_create_gpu_texture(clip->id.name + 2, ibuf, high_bitdepth, store_premultiplied);
/* Do not generate mips for movieclips... too slow. */
GPU_texture_mipmap_mode(*tex, false, true);

View File

@@ -500,7 +500,7 @@ static void studiolight_create_equirect_radiance_gputexture(StudioLight *sl)
ImBuf *ibuf = sl->equirect_radiance_buffer;
sl->equirect_radiance_gputexture = GPU_texture_create_2d(
ibuf->x, ibuf->y, GPU_RGBA16F, ibuf->rect_float, NULL);
"studiolight_radiance", ibuf->x, ibuf->y, 1, GPU_RGBA16F, ibuf->rect_float);
GPUTexture *tex = sl->equirect_radiance_gputexture;
GPU_texture_filter_mode(tex, true);
GPU_texture_wrap_mode(tex, true, true);
@@ -520,16 +520,9 @@ static void studiolight_create_matcap_gputexture(StudioLightImage *sli)
copy_v3_v3(*offset3, *offset4);
}
sli->gputexture = GPU_texture_create_nD(ibuf->x,
ibuf->y,
0,
2,
gpu_matcap_3components,
GPU_R11F_G11F_B10F,
GPU_DATA_FLOAT,
0,
false,
NULL);
sli->gputexture = GPU_texture_create_2d("matcap", ibuf->x, ibuf->y, 1, GPU_R11F_G11F_B10F, NULL);
GPU_texture_update(sli->gputexture, GPU_DATA_FLOAT, gpu_matcap_3components);
MEM_SAFE_FREE(gpu_matcap_3components);
}
@@ -562,7 +555,7 @@ static void studiolight_create_equirect_irradiance_gputexture(StudioLight *sl)
BKE_studiolight_ensure_flag(sl, STUDIOLIGHT_EQUIRECT_IRRADIANCE_IMAGE_CALCULATED);
ImBuf *ibuf = sl->equirect_irradiance_buffer;
sl->equirect_irradiance_gputexture = GPU_texture_create_2d(
ibuf->x, ibuf->y, GPU_RGBA16F, ibuf->rect_float, NULL);
"studiolight_irradiance", ibuf->x, ibuf->y, 1, GPU_RGBA16F, ibuf->rect_float);
GPUTexture *tex = sl->equirect_irradiance_gputexture;
GPU_texture_filter_mode(tex, true);
GPU_texture_wrap_mode(tex, true, true);

View File

@@ -31,6 +31,7 @@ extern "C" {
/* Search the value from LSB to MSB for a set bit. Returns index of this bit. */
MINLINE int bitscan_forward_i(int a);
MINLINE unsigned int bitscan_forward_uint(unsigned int a);
MINLINE unsigned int bitscan_forward_uint64(unsigned long long a);
/* Similar to above, but also clears the bit. */
MINLINE int bitscan_forward_clear_i(int *a);
@@ -39,6 +40,7 @@ MINLINE unsigned int bitscan_forward_clear_uint(unsigned int *a);
/* Search the value from MSB to LSB for a set bit. Returns index of this bit. */
MINLINE int bitscan_reverse_i(int a);
MINLINE unsigned int bitscan_reverse_uint(unsigned int a);
MINLINE unsigned int bitscan_reverse_uint64(unsigned long long a);
/* Similar to above, but also clears the bit. */
MINLINE int bitscan_reverse_clear_i(int *a);

View File

@@ -338,6 +338,7 @@ MINLINE bool equals_v3v3(const float a[3], const float b[3]) ATTR_WARN_UNUSED_RE
MINLINE bool equals_v4v4(const float a[4], const float b[4]) ATTR_WARN_UNUSED_RESULT;
MINLINE bool equals_v2v2_int(const int v1[2], const int v2[2]) ATTR_WARN_UNUSED_RESULT;
MINLINE bool equals_v3v3_int(const int v1[3], const int v2[3]) ATTR_WARN_UNUSED_RESULT;
MINLINE bool equals_v4v4_int(const int v1[4], const int v2[4]) ATTR_WARN_UNUSED_RESULT;
MINLINE bool compare_v2v2(const float a[2],

View File

@@ -40,6 +40,18 @@ MINLINE unsigned int bitscan_forward_uint(unsigned int a)
#endif
}
MINLINE unsigned int bitscan_forward_uint64(unsigned long long a)
{
BLI_assert(a != 0);
#ifdef _MSC_VER
unsigned long ctz;
_BitScanForward64(&ctz, a);
return ctz;
#else
return (unsigned int)__builtin_ctz(a);
#endif
}
MINLINE int bitscan_forward_i(int a)
{
return (int)bitscan_forward_uint((unsigned int)a);
@@ -69,6 +81,18 @@ MINLINE unsigned int bitscan_reverse_uint(unsigned int a)
#endif
}
MINLINE unsigned int bitscan_reverse_uint64(unsigned long long a)
{
BLI_assert(a != 0);
#ifdef _MSC_VER
unsigned long clz;
_BitScanReverse64(&clz, a);
return 31 - clz;
#else
return (unsigned int)__builtin_clzll(a);
#endif
}
MINLINE int bitscan_reverse_i(int a)
{
return (int)bitscan_reverse_uint((unsigned int)a);

View File

@@ -1313,6 +1313,11 @@ MINLINE bool equals_v2v2_int(const int v1[2], const int v2[2])
return ((v1[0] == v2[0]) && (v1[1] == v2[1]));
}
MINLINE bool equals_v3v3_int(const int v1[3], const int v2[3])
{
return ((v1[0] == v2[0]) && (v1[1] == v2[1]) && (v1[2] == v2[2]));
}
MINLINE bool equals_v4v4_int(const int v1[4], const int v2[4])
{
return ((v1[0] == v2[0]) && (v1[1] == v2[1]) && (v1[2] == v2[2]) && (v1[3] == v2[3]));

View File

@@ -202,7 +202,7 @@ void EEVEE_effects_init(EEVEE_ViewLayerData *sldata,
* Compute Mipmap texel alignment.
*/
for (int i = 0; i < 10; i++) {
int mip_size[2];
int mip_size[3];
GPU_texture_get_mipmap_size(txl->color, i, mip_size);
common_data->mip_ratio[i][0] = viewport_size[0] / (mip_size[0] * powf(2.0f, i));
common_data->mip_ratio[i][1] = viewport_size[1] / (mip_size[1] * powf(2.0f, i));
@@ -432,7 +432,7 @@ void EEVEE_create_minmax_buffer(EEVEE_Data *vedata, GPUTexture *depth_src, int l
int minmax_size[3], depth_size[3];
GPU_texture_get_mipmap_size(depth_src, 0, depth_size);
GPU_texture_get_mipmap_size(txl->maxzbuffer, 0, minmax_size);
bool is_full_res_minmaxz = (minmax_size[0] == depth_size[0] && minmax_size[1] == depth_size[1]);
bool is_full_res_minmaxz = equals_v2v2_int(minmax_size, depth_size);
DRW_stats_group_start("Max buffer");
/* Copy depth buffer to max texture top level */

View File

@@ -401,16 +401,9 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
}
if (lcache->grid_tx.tex == NULL) {
lcache->grid_tx.tex = GPU_texture_create_nD(lcache->grid_tx.tex_size[0],
lcache->grid_tx.tex_size[1],
lcache->grid_tx.tex_size[2],
2,
lcache->grid_tx.data,
IRRADIANCE_FORMAT,
GPU_DATA_UNSIGNED_BYTE,
0,
false,
NULL);
lcache->grid_tx.tex = GPU_texture_create_2d_array(
"lightcache_irradiance", UNPACK3(lcache->grid_tx.tex_size), 1, IRRADIANCE_FORMAT, NULL);
GPU_texture_update(lcache->grid_tx.tex, GPU_DATA_UNSIGNED_BYTE, lcache->grid_tx.data);
if (lcache->grid_tx.tex == NULL) {
lcache->flag |= LIGHTCACHE_NOT_USABLE;
@@ -422,24 +415,19 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
if (lcache->cube_tx.tex == NULL) {
if (GPU_arb_texture_cube_map_array_is_supported()) {
lcache->cube_tx.tex = GPU_texture_cube_create(lcache->cube_tx.tex_size[0],
lcache->cube_tx.tex_size[2] / 6,
lcache->cube_tx.data,
GPU_R11F_G11F_B10F,
GPU_DATA_10_11_11_REV,
NULL);
lcache->cube_tx.tex = GPU_texture_create_cube_array("lightcache_cubemaps",
lcache->cube_tx.tex_size[0],
lcache->cube_tx.tex_size[2] / 6,
lcache->mips_len + 1,
GPU_R11F_G11F_B10F,
NULL);
}
else {
lcache->cube_tx.tex = GPU_texture_create_nD(lcache->cube_tx.tex_size[0],
lcache->cube_tx.tex_size[1],
lcache->cube_tx.tex_size[2],
2,
lcache->cube_tx.data,
GPU_R11F_G11F_B10F,
GPU_DATA_10_11_11_REV,
0,
false,
NULL);
lcache->cube_tx.tex = GPU_texture_create_2d_array("lightcache_cubemaps_fallback",
UNPACK3(lcache->cube_tx.tex_size),
lcache->mips_len + 1,
GPU_R11F_G11F_B10F,
NULL);
}
if (lcache->cube_tx.tex == NULL) {
@@ -447,9 +435,9 @@ static bool eevee_lightcache_static_load(LightCache *lcache)
return false;
}
for (int mip = 0; mip < lcache->mips_len; mip++) {
GPU_texture_add_mipmap(
lcache->cube_tx.tex, GPU_DATA_10_11_11_REV, mip + 1, lcache->cube_mips[mip].data);
for (int mip = 0; mip <= lcache->mips_len; mip++) {
const void *data = (mip == 0) ? lcache->cube_tx.data : lcache->cube_mips[mip - 1].data;
GPU_texture_update_mipmap(lcache->cube_tx.tex, mip, GPU_DATA_10_11_11_REV, data);
}
GPU_texture_mipmap_mode(lcache->cube_tx.tex, true, true);
}

View File

@@ -468,7 +468,7 @@ void EEVEE_motion_blur_cache_finish(EEVEE_Data *vedata)
GPU_vertbuf_use(mb_hair->psys[i].hair_pos[mb_step]);
mb_hair->psys[i].hair_pos_tx[mb_step] = GPU_texture_create_from_vertbuf(
mb_hair->psys[i].hair_pos[mb_step]);
"hair_pos_motion_blur", mb_hair->psys[i].hair_pos[mb_step]);
}
}
break;

View File

@@ -56,27 +56,13 @@ void GPENCIL_antialiasing_init(struct GPENCIL_Data *vedata)
}
if (txl->smaa_search_tx == NULL) {
txl->smaa_search_tx = GPU_texture_create_nD(SEARCHTEX_WIDTH,
SEARCHTEX_HEIGHT,
0,
2,
searchTexBytes,
GPU_R8,
GPU_DATA_UNSIGNED_BYTE,
0,
false,
NULL);
txl->smaa_search_tx = GPU_texture_create_2d(
"smaa_search", SEARCHTEX_WIDTH, SEARCHTEX_HEIGHT, 1, GPU_R8, NULL);
GPU_texture_update(txl->smaa_search_tx, GPU_DATA_UNSIGNED_BYTE, searchTexBytes);
txl->smaa_area_tx = GPU_texture_create_nD(AREATEX_WIDTH,
AREATEX_HEIGHT,
0,
2,
areaTexBytes,
GPU_RG8,
GPU_DATA_UNSIGNED_BYTE,
0,
false,
NULL);
txl->smaa_area_tx = GPU_texture_create_2d(
"smaa_area", AREATEX_WIDTH, AREATEX_HEIGHT, 1, GPU_RG8, NULL);
GPU_texture_update(txl->smaa_area_tx, GPU_DATA_UNSIGNED_BYTE, areaTexBytes);
GPU_texture_filter_mode(txl->smaa_search_tx, true);
GPU_texture_filter_mode(txl->smaa_area_tx, true);

View File

@@ -143,11 +143,9 @@ void GPENCIL_render_init(GPENCIL_Data *vedata,
int w = BLI_rcti_size_x(rect);
int h = BLI_rcti_size_y(rect);
if (pix_col) {
GPU_texture_bind(txl->render_color_tx, 0);
GPU_texture_update_sub(txl->render_color_tx, GPU_DATA_FLOAT, pix_col, x, y, 0, w, h, 0);
}
if (pix_z) {
GPU_texture_bind(txl->render_depth_tx, 0);
GPU_texture_update_sub(txl->render_depth_tx, GPU_DATA_FLOAT, pix_z, x, y, 0, w, h, 0);
}
}

View File

@@ -79,7 +79,8 @@ static void select_engine_framebuffer_setup(void)
GPU_framebuffer_texture_attach(e_data.framebuffer_select_id, dtxl->depth, 0, 0);
if (e_data.texture_u32 == NULL) {
e_data.texture_u32 = GPU_texture_create_2d(size[0], size[1], GPU_R32UI, NULL, NULL);
e_data.texture_u32 = GPU_texture_create_2d(
"select_buf_ids", size[0], size[1], 1, GPU_R32UI, NULL);
GPU_framebuffer_texture_attach(e_data.framebuffer_select_id, e_data.texture_u32, 0, 0);
GPU_framebuffer_check_valid(e_data.framebuffer_select_id, NULL);

View File

@@ -242,27 +242,13 @@ void workbench_antialiasing_engine_init(WORKBENCH_Data *vedata)
/* TODO could be shared for all viewports. */
if (txl->smaa_search_tx == NULL) {
txl->smaa_search_tx = GPU_texture_create_nD(SEARCHTEX_WIDTH,
SEARCHTEX_HEIGHT,
0,
2,
searchTexBytes,
GPU_R8,
GPU_DATA_UNSIGNED_BYTE,
0,
false,
NULL);
txl->smaa_search_tx = GPU_texture_create_2d(
"smaa_search", SEARCHTEX_WIDTH, SEARCHTEX_HEIGHT, 1, GPU_R8, NULL);
GPU_texture_update(txl->smaa_search_tx, GPU_DATA_UNSIGNED_BYTE, searchTexBytes);
txl->smaa_area_tx = GPU_texture_create_nD(AREATEX_WIDTH,
AREATEX_HEIGHT,
0,
2,
areaTexBytes,
GPU_RG8,
GPU_DATA_UNSIGNED_BYTE,
0,
false,
NULL);
txl->smaa_area_tx = GPU_texture_create_2d(
"smaa_area", AREATEX_WIDTH, AREATEX_HEIGHT, 1, GPU_RG8, NULL);
GPU_texture_update(txl->smaa_area_tx, GPU_DATA_UNSIGNED_BYTE, areaTexBytes);
GPU_texture_filter_mode(txl->smaa_search_tx, true);
GPU_texture_filter_mode(txl->smaa_area_tx, true);

View File

@@ -79,8 +79,8 @@ static bool workbench_render_framebuffers_init(void)
* the other views will reuse these buffers */
if (dtxl->color == NULL) {
BLI_assert(dtxl->depth == NULL);
dtxl->color = GPU_texture_create_2d(size[0], size[1], GPU_RGBA16F, NULL, NULL);
dtxl->depth = GPU_texture_create_2d(size[0], size[1], GPU_DEPTH24_STENCIL8, NULL, NULL);
dtxl->color = GPU_texture_create_2d("txl.color", UNPACK2(size), 1, GPU_RGBA16F, NULL);
dtxl->depth = GPU_texture_create_2d("txl.depth", UNPACK2(size), 1, GPU_DEPTH24_STENCIL8, NULL);
}
if (!(dtxl->depth && dtxl->color)) {

View File

@@ -45,9 +45,9 @@ void workbench_volume_engine_init(WORKBENCH_Data *vedata)
if (txl->dummy_volume_tx == NULL) {
const float zero[4] = {0.0f, 0.0f, 0.0f, 0.0f};
const float one[4] = {1.0f, 1.0f, 1.0f, 1.0f};
txl->dummy_volume_tx = GPU_texture_create_3d(1, 1, 1, GPU_RGBA8, zero, NULL);
txl->dummy_shadow_tx = GPU_texture_create_3d(1, 1, 1, GPU_RGBA8, one, NULL);
txl->dummy_coba_tx = GPU_texture_create_1d(1, GPU_RGBA8, zero, NULL);
txl->dummy_volume_tx = GPU_texture_create_3d("dummy_volume", 1, 1, 1, 1, GPU_RGBA8, zero);
txl->dummy_shadow_tx = GPU_texture_create_3d("dummy_shadow", 1, 1, 1, 1, GPU_RGBA8, one);
txl->dummy_coba_tx = GPU_texture_create_1d("dummy_coba", 1, 1, GPU_RGBA8, zero);
}
}

View File

@@ -192,7 +192,7 @@ static void hair_batch_cache_ensure_procedural_pos(Hair *hair, ParticleHairCache
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->proc_point_buf);
cache->point_tex = GPU_texture_create_from_vertbuf(cache->proc_point_buf);
cache->point_tex = GPU_texture_create_from_vertbuf("hair_point", cache->proc_point_buf);
}
static void hair_batch_cache_fill_strands_data(Hair *hair,
@@ -230,10 +230,11 @@ static void hair_batch_cache_ensure_procedural_strand_data(Hair *hair, ParticleH
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->proc_strand_buf);
cache->strand_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_buf);
cache->strand_tex = GPU_texture_create_from_vertbuf("hair_strand", cache->proc_strand_buf);
GPU_vertbuf_use(cache->proc_strand_seg_buf);
cache->strand_seg_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_seg_buf);
cache->strand_seg_tex = GPU_texture_create_from_vertbuf("hair_strand_seg",
cache->proc_strand_seg_buf);
}
static void hair_batch_cache_ensure_procedural_final_points(ParticleHairCache *cache, int subdiv)
@@ -252,7 +253,8 @@ static void hair_batch_cache_ensure_procedural_final_points(ParticleHairCache *c
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->final[subdiv].proc_buf);
cache->final[subdiv].proc_tex = GPU_texture_create_from_vertbuf(cache->final[subdiv].proc_buf);
cache->final[subdiv].proc_tex = GPU_texture_create_from_vertbuf("hair_proc",
cache->final[subdiv].proc_buf);
}
static void hair_batch_cache_fill_segments_indices(Hair *hair,

View File

@@ -809,7 +809,8 @@ static void particle_batch_cache_ensure_procedural_final_points(ParticleHairCach
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->final[subdiv].proc_buf);
cache->final[subdiv].proc_tex = GPU_texture_create_from_vertbuf(cache->final[subdiv].proc_buf);
cache->final[subdiv].proc_tex = GPU_texture_create_from_vertbuf("part_proc",
cache->final[subdiv].proc_buf);
}
static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit,
@@ -1008,18 +1009,19 @@ static void particle_batch_cache_ensure_procedural_strand_data(PTCacheEdit *edit
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->proc_strand_buf);
cache->strand_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_buf);
cache->strand_tex = GPU_texture_create_from_vertbuf("part_strand", cache->proc_strand_buf);
GPU_vertbuf_use(cache->proc_strand_seg_buf);
cache->strand_seg_tex = GPU_texture_create_from_vertbuf(cache->proc_strand_seg_buf);
cache->strand_seg_tex = GPU_texture_create_from_vertbuf("part_strand_seg",
cache->proc_strand_seg_buf);
for (int i = 0; i < cache->num_uv_layers; i++) {
GPU_vertbuf_use(cache->proc_uv_buf[i]);
cache->uv_tex[i] = GPU_texture_create_from_vertbuf(cache->proc_uv_buf[i]);
cache->uv_tex[i] = GPU_texture_create_from_vertbuf("part_uv", cache->proc_uv_buf[i]);
}
for (int i = 0; i < cache->num_col_layers; i++) {
GPU_vertbuf_use(cache->proc_col_buf[i]);
cache->col_tex[i] = GPU_texture_create_from_vertbuf(cache->proc_col_buf[i]);
cache->col_tex[i] = GPU_texture_create_from_vertbuf("part_col", cache->proc_col_buf[i]);
}
}
@@ -1109,7 +1111,7 @@ static void particle_batch_cache_ensure_procedural_pos(PTCacheEdit *edit,
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(cache->proc_point_buf);
cache->point_tex = GPU_texture_create_from_vertbuf(cache->proc_point_buf);
cache->point_tex = GPU_texture_create_from_vertbuf("part_point", cache->proc_point_buf);
}
static void particle_batch_cache_ensure_pos_and_seg(PTCacheEdit *edit,

View File

@@ -258,17 +258,12 @@ static DRWVolumeGrid *volume_grid_cache_get(Volume *volume,
BKE_volume_grid_dense_voxels(volume, grid, dense_min, dense_max, voxels);
/* Create GPU texture. */
cache_grid->texture = GPU_texture_create_3d(resolution[0],
resolution[1],
resolution[2],
(channels == 3) ? GPU_RGB16F : GPU_R16F,
voxels,
NULL);
eGPUTextureFormat format = (channels == 3) ? GPU_RGB16F : GPU_R16F;
cache_grid->texture = GPU_texture_create_3d(
"volume_grid", UNPACK3(resolution), 1, format, voxels);
GPU_texture_bind(cache_grid->texture, 0);
GPU_texture_swizzle_set(cache_grid->texture, (channels == 3) ? "rgb1" : "rrr1");
GPU_texture_wrap_mode(cache_grid->texture, false, false);
GPU_texture_unbind(cache_grid->texture);
MEM_freeN(voxels);

View File

@@ -53,10 +53,10 @@ void DRW_transform_none(GPUTexture *tex)
GPUBatch *geom = DRW_cache_fullscreen_quad_get();
GPU_batch_program_set_builtin(geom, GPU_SHADER_2D_IMAGE_COLOR);
GPU_batch_uniform_4f(geom, "color", 1.0f, 1.0f, 1.0f, 1.0f);
GPU_batch_uniform_1i(geom, "image", 0);
GPU_batch_texture_bind(geom, "image", tex);
GPU_texture_bind(tex, 0);
GPU_batch_draw(geom);
GPU_texture_unbind(tex);
}

View File

@@ -238,7 +238,7 @@ void DRW_globals_update(void)
BKE_colorband_evaluate_table_rgba(&ramp, &colors, &col_size);
G_draw.ramp = GPU_texture_create_1d(col_size, GPU_RGBA8, colors, NULL);
G_draw.ramp = GPU_texture_create_1d("ramp", col_size, 1, GPU_RGBA8, colors);
MEM_freeN(colors);
}
@@ -503,12 +503,11 @@ static void DRW_evaluate_weight_to_color(const float weight, float result[4])
static GPUTexture *DRW_create_weight_colorramp_texture(void)
{
char error[256];
float pixels[256][4];
for (int i = 0; i < 256; i++) {
DRW_evaluate_weight_to_color(i / 255.0f, pixels[i]);
pixels[i][3] = 1.0f;
}
return GPU_texture_create_1d(256, GPU_SRGB8_A8, pixels[0], error);
return GPU_texture_create_1d("weight_color_ramp", 256, 1, GPU_SRGB8_A8, pixels[0]);
}

View File

@@ -117,7 +117,7 @@ static GPUTexture *create_transfer_function(int type, const struct ColorBand *co
break;
}
GPUTexture *tex = GPU_texture_create_1d(TFUNC_WIDTH, GPU_SRGB8_A8, data, NULL);
GPUTexture *tex = GPU_texture_create_1d("transf_func", TFUNC_WIDTH, 1, GPU_SRGB8_A8, data);
MEM_freeN(data);
@@ -128,9 +128,100 @@ static void swizzle_texture_channel_single(GPUTexture *tex)
{
/* Swizzle texture channels so that we get useful RGBA values when sampling
* a texture with fewer channels, e.g. when using density as color. */
GPU_texture_bind(tex, 0);
GPU_texture_swizzle_set(tex, "rrr1");
GPU_texture_unbind(tex);
}
static float *rescale_3d(const int dim[3],
const int final_dim[3],
int channels,
const float *fpixels)
{
const uint w = dim[0], h = dim[1], d = dim[2];
const uint fw = final_dim[0], fh = final_dim[1], fd = final_dim[2];
const uint xf = w / fw, yf = h / fh, zf = d / fd;
const uint pixel_count = fw * fh * fd;
float *nfpixels = (float *)MEM_mallocN(channels * sizeof(float) * pixel_count, __func__);
if (nfpixels) {
printf("Performance: You need to scale a 3D texture, feel the pain!\n");
for (uint k = 0; k < fd; k++) {
for (uint j = 0; j < fh; j++) {
for (uint i = 0; i < fw; i++) {
/* Obviously doing nearest filtering here,
* it's going to be slow in any case, let's not make it worse. */
float xb = i * xf;
float yb = j * yf;
float zb = k * zf;
uint offset = k * (fw * fh) + i * fh + j;
uint offset_orig = (zb) * (w * h) + (xb)*h + (yb);
if (channels == 4) {
nfpixels[offset * 4] = fpixels[offset_orig * 4];
nfpixels[offset * 4 + 1] = fpixels[offset_orig * 4 + 1];
nfpixels[offset * 4 + 2] = fpixels[offset_orig * 4 + 2];
nfpixels[offset * 4 + 3] = fpixels[offset_orig * 4 + 3];
}
else if (channels == 1) {
nfpixels[offset] = fpixels[offset_orig];
}
else {
BLI_assert(0);
}
}
}
}
}
return nfpixels;
}
/* Will resize input to fit GL system limits. */
static GPUTexture *create_volume_texture(const int dim[3],
eGPUTextureFormat format,
const float *data)
{
GPUTexture *tex = NULL;
int final_dim[3] = {UNPACK3(dim)};
while (1) {
tex = GPU_texture_create_3d("volume", UNPACK3(final_dim), 1, format, NULL);
if (tex != NULL) {
break;
}
if (final_dim[0] == 1 && final_dim[1] == 1 && final_dim[2] == 1) {
break;
}
for (int i = 0; i < 3; i++) {
final_dim[i] = max_ii(1, final_dim[i] / 2);
}
}
if (tex == NULL) {
printf("Error: Could not create 3D texture.\n");
tex = GPU_texture_create_error(3, false);
}
else if (equals_v3v3_int(dim, final_dim)) {
/* No need to resize, just upload the data. */
GPU_texture_update_sub(tex, GPU_DATA_FLOAT, data, 0, 0, 0, UNPACK3(final_dim));
}
else {
/* We need to resize the input. */
int channels = (format == GPU_R8) ? 1 : 4;
float *rescaled_data = rescale_3d(dim, final_dim, channels, data);
if (rescaled_data) {
GPU_texture_update_sub(tex, GPU_DATA_FLOAT, rescaled_data, 0, 0, 0, UNPACK3(final_dim));
MEM_freeN(rescaled_data);
}
else {
printf("Error: Could not allocate rescaled 3d texture!\n");
GPU_texture_free(tex);
tex = GPU_texture_create_error(3, false);
}
}
return tex;
}
static GPUTexture *create_field_texture(FluidDomainSettings *fds)
@@ -184,9 +275,7 @@ static GPUTexture *create_field_texture(FluidDomainSettings *fds)
return NULL;
}
GPUTexture *tex = GPU_texture_create_nD(
UNPACK3(fds->res), 3, field, GPU_R8, GPU_DATA_FLOAT, 0, true, NULL);
GPUTexture *tex = create_volume_texture(fds->res, GPU_R8, field);
swizzle_texture_channel_single(tex);
return tex;
}
@@ -203,11 +292,8 @@ static GPUTexture *create_density_texture(FluidDomainSettings *fds, int highres)
data = manta_smoke_get_density(fds->fluid);
}
GPUTexture *tex = GPU_texture_create_nD(
UNPACK3(dim), 3, data, GPU_R8, GPU_DATA_FLOAT, 0, true, NULL);
GPUTexture *tex = create_volume_texture(dim, GPU_R8, data);
swizzle_texture_channel_single(tex);
return tex;
}
@@ -235,8 +321,7 @@ static GPUTexture *create_color_texture(FluidDomainSettings *fds, int highres)
manta_smoke_get_rgba(fds->fluid, data, 0);
}
GPUTexture *tex = GPU_texture_create_nD(
dim[0], dim[1], dim[2], 3, data, GPU_RGBA8, GPU_DATA_FLOAT, 0, true, NULL);
GPUTexture *tex = create_volume_texture(dim, GPU_RGBA8, data);
MEM_freeN(data);
@@ -261,11 +346,8 @@ static GPUTexture *create_flame_texture(FluidDomainSettings *fds, int highres)
source = manta_smoke_get_flame(fds->fluid);
}
GPUTexture *tex = GPU_texture_create_nD(
dim[0], dim[1], dim[2], 3, source, GPU_R8, GPU_DATA_FLOAT, 0, true, NULL);
GPUTexture *tex = create_volume_texture(dim, GPU_R8, source);
swizzle_texture_channel_single(tex);
return tex;
}
@@ -356,14 +438,8 @@ void DRW_smoke_ensure(FluidModifierData *fmd, int highres)
fds->tex_flame_coba = create_transfer_function(TFUNC_FLAME_SPECTRUM, NULL);
}
if (!fds->tex_shadow) {
fds->tex_shadow = GPU_texture_create_nD(UNPACK3(fds->res),
3,
manta_smoke_get_shadow(fds->fluid),
GPU_R8,
GPU_DATA_FLOAT,
0,
true,
NULL);
fds->tex_shadow = create_volume_texture(
fds->res, GPU_R8, manta_smoke_get_shadow(fds->fluid));
}
}
#endif /* WITH_FLUID */
@@ -386,9 +462,9 @@ void DRW_smoke_ensure_velocity(FluidModifierData *fmd)
}
if (!fds->tex_velocity_x) {
fds->tex_velocity_x = GPU_texture_create_3d(UNPACK3(fds->res), GPU_R16F, vel_x, NULL);
fds->tex_velocity_y = GPU_texture_create_3d(UNPACK3(fds->res), GPU_R16F, vel_y, NULL);
fds->tex_velocity_z = GPU_texture_create_3d(UNPACK3(fds->res), GPU_R16F, vel_z, NULL);
fds->tex_velocity_x = GPU_texture_create_3d("velx", UNPACK3(fds->res), 1, GPU_R16F, vel_x);
fds->tex_velocity_y = GPU_texture_create_3d("vely", UNPACK3(fds->res), 1, GPU_R16F, vel_y);
fds->tex_velocity_z = GPU_texture_create_3d("velz", UNPACK3(fds->res), 1, GPU_R16F, vel_z);
}
}
#endif /* WITH_FLUID */

View File

@@ -120,7 +120,7 @@ void DRW_hair_init(void)
/* Create vbo immediately to bind to texture buffer. */
GPU_vertbuf_use(g_dummy_vbo);
g_dummy_texture = GPU_texture_create_from_vertbuf(g_dummy_vbo);
g_dummy_texture = GPU_texture_create_from_vertbuf("hair_dummy_attr", g_dummy_vbo);
}
}

View File

@@ -1929,7 +1929,7 @@ static void draw_select_framebuffer_depth_only_setup(const int size[2])
if (g_select_buffer.texture_depth == NULL) {
g_select_buffer.texture_depth = GPU_texture_create_2d(
size[0], size[1], GPU_DEPTH_COMPONENT24, NULL, NULL);
"select_depth", size[0], size[1], 1, GPU_DEPTH_COMPONENT24, NULL);
GPU_framebuffer_texture_attach(
g_select_buffer.framebuffer_depth_only, g_select_buffer.texture_depth, 0, 0);

View File

@@ -67,9 +67,7 @@ void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
if (flags & DRW_TEX_MIPMAP) {
GPU_texture_mipmap_mode(tex, true, flags & DRW_TEX_FILTER);
GPU_texture_bind(tex, 0);
GPU_texture_generate_mipmap(tex);
GPU_texture_unbind(tex);
}
else {
GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
@@ -83,7 +81,8 @@ GPUTexture *DRW_texture_create_1d(int w,
DRWTextureFlag flags,
const float *fpixels)
{
GPUTexture *tex = GPU_texture_create_1d(w, format, fpixels, NULL);
int mips = (flags & DRW_TEX_MIPMAP) ? 9999 : 1;
GPUTexture *tex = GPU_texture_create_1d(__func__, w, mips, format, fpixels);
drw_texture_set_parameters(tex, flags);
return tex;
@@ -92,7 +91,8 @@ GPUTexture *DRW_texture_create_1d(int w,
GPUTexture *DRW_texture_create_2d(
int w, int h, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
GPUTexture *tex = GPU_texture_create_2d(w, h, format, fpixels, NULL);
int mips = (flags & DRW_TEX_MIPMAP) ? 9999 : 1;
GPUTexture *tex = GPU_texture_create_2d(__func__, w, h, mips, format, fpixels);
drw_texture_set_parameters(tex, flags);
return tex;
@@ -101,7 +101,8 @@ GPUTexture *DRW_texture_create_2d(
GPUTexture *DRW_texture_create_2d_array(
int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
GPUTexture *tex = GPU_texture_create_2d_array(w, h, d, format, fpixels, NULL);
int mips = (flags & DRW_TEX_MIPMAP) ? 9999 : 1;
GPUTexture *tex = GPU_texture_create_2d_array(__func__, w, h, d, mips, format, fpixels);
drw_texture_set_parameters(tex, flags);
return tex;
@@ -110,7 +111,8 @@ GPUTexture *DRW_texture_create_2d_array(
GPUTexture *DRW_texture_create_3d(
int w, int h, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
GPUTexture *tex = GPU_texture_create_3d(w, h, d, format, fpixels, NULL);
int mips = (flags & DRW_TEX_MIPMAP) ? 9999 : 1;
GPUTexture *tex = GPU_texture_create_3d(__func__, w, h, d, mips, format, fpixels);
drw_texture_set_parameters(tex, flags);
return tex;
@@ -121,7 +123,8 @@ GPUTexture *DRW_texture_create_cube(int w,
DRWTextureFlag flags,
const float *fpixels)
{
GPUTexture *tex = GPU_texture_create_cube(w, format, fpixels, NULL);
int mips = (flags & DRW_TEX_MIPMAP) ? 9999 : 1;
GPUTexture *tex = GPU_texture_create_cube(__func__, w, mips, format, fpixels);
drw_texture_set_parameters(tex, flags);
return tex;
}
@@ -129,7 +132,8 @@ GPUTexture *DRW_texture_create_cube(int w,
GPUTexture *DRW_texture_create_cube_array(
int w, int d, eGPUTextureFormat format, DRWTextureFlag flags, const float *fpixels)
{
GPUTexture *tex = GPU_texture_create_cube_array(w, d, format, fpixels, NULL);
int mips = (flags & DRW_TEX_MIPMAP) ? 9999 : 1;
GPUTexture *tex = GPU_texture_create_cube_array(__func__, w, d, mips, format, fpixels);
drw_texture_set_parameters(tex, flags);
return tex;
}
@@ -172,9 +176,7 @@ void DRW_texture_ensure_2d(
void DRW_texture_generate_mipmaps(GPUTexture *tex)
{
GPU_texture_bind(tex, 0);
GPU_texture_generate_mipmap(tex);
GPU_texture_unbind(tex);
}
void DRW_texture_free(GPUTexture *tex)

View File

@@ -868,31 +868,16 @@ void UI_icons_reload_internal_textures(void)
icongltex.invw = 1.0f / b32buf->x;
icongltex.invh = 1.0f / b32buf->y;
icongltex.tex[0] = GPU_texture_create_nD(b32buf->x,
b32buf->y,
0,
2,
b32buf->rect,
GPU_RGBA8,
GPU_DATA_UNSIGNED_BYTE,
0,
false,
NULL);
GPU_texture_add_mipmap(icongltex.tex[0], GPU_DATA_UNSIGNED_BYTE, 1, b16buf->rect);
icongltex.tex[0] = GPU_texture_create_2d("icons", b32buf->x, b32buf->y, 2, GPU_RGBA8, NULL);
GPU_texture_update_mipmap(icongltex.tex[0], 0, GPU_DATA_UNSIGNED_BYTE, b32buf->rect);
GPU_texture_update_mipmap(icongltex.tex[0], 1, GPU_DATA_UNSIGNED_BYTE, b16buf->rect);
}
if (need_icons_with_border && icongltex.tex[1] == NULL) {
icongltex.tex[1] = GPU_texture_create_nD(b32buf_border->x,
b32buf_border->y,
0,
2,
b32buf_border->rect,
GPU_RGBA8,
GPU_DATA_UNSIGNED_BYTE,
0,
false,
NULL);
GPU_texture_add_mipmap(icongltex.tex[1], GPU_DATA_UNSIGNED_BYTE, 1, b16buf_border->rect);
icongltex.tex[1] = GPU_texture_create_2d(
"icons_border", b32buf_border->x, b32buf_border->y, 2, GPU_RGBA8, NULL);
GPU_texture_update_mipmap(icongltex.tex[1], 0, GPU_DATA_UNSIGNED_BYTE, b32buf_border->rect);
GPU_texture_update_mipmap(icongltex.tex[1], 1, GPU_DATA_UNSIGNED_BYTE, b16buf_border->rect);
}
}
@@ -1569,8 +1554,7 @@ static void icon_draw_cache_texture_flush_ex(GPUTexture *texture,
const int img_binding = GPU_shader_get_texture_binding(shader, "image");
const int data_loc = GPU_shader_get_uniform(shader, "calls_data");
GPU_texture_bind(texture, img_binding);
GPU_sampler_icon_bind(img_binding);
GPU_texture_bind_ex(texture, GPU_SAMPLER_ICON, img_binding, false);
GPU_shader_uniform_vector(
shader, data_loc, 4, ICON_DRAW_CACHE_SIZE * 3, (float *)texture_draw_calls->drawcall_cache);
@@ -1721,8 +1705,7 @@ static void icon_draw_texture(float x,
GPU_shader_uniform_vector(shader, rect_tex_loc, 4, 1, (float[4]){x1, y1, x2, y2});
GPU_shader_uniform_vector(shader, rect_geom_loc, 4, 1, (float[4]){x, y, x + w, y + h});
GPU_texture_bind(texture, img_binding);
GPU_sampler_icon_bind(img_binding);
GPU_texture_bind_ex(texture, GPU_SAMPLER_ICON, img_binding, false);
GPUBatch *quad = GPU_batch_preset_quad();
GPU_batch_set_shader(quad, shader);

View File

@@ -132,7 +132,7 @@ void immDrawPixelsTexScaled_clipping(IMMDrawPixelsTexState *state,
eGPUDataFormat gpu_data = (use_float_data) ? GPU_DATA_FLOAT : GPU_DATA_UNSIGNED_BYTE;
size_t stride = components * ((use_float_data) ? sizeof(float) : sizeof(uchar));
GPUTexture *tex = GPU_texture_create_2d(tex_w, tex_h, gpu_format, NULL, NULL);
GPUTexture *tex = GPU_texture_create_2d("immDrawPixels", tex_w, tex_h, 1, gpu_format, NULL);
GPU_texture_filter_mode(tex, use_filter);
GPU_texture_wrap_mode(tex, false, true);

View File

@@ -348,13 +348,12 @@ static int load_tex(Brush *br, ViewContext *vc, float zoom, bool col, bool prima
if (!target->overlay_texture) {
eGPUTextureFormat format = col ? GPU_RGBA8 : GPU_R8;
target->overlay_texture = GPU_texture_create_nD(
size, size, 0, 2, buffer, format, GPU_DATA_UNSIGNED_BYTE, 0, false, NULL);
target->overlay_texture = GPU_texture_create_2d(
"paint_cursor_overlay", size, size, 1, format, NULL);
GPU_texture_update(target->overlay_texture, GPU_DATA_UNSIGNED_BYTE, buffer);
if (!col) {
GPU_texture_bind(target->overlay_texture, 0);
GPU_texture_swizzle_set(target->overlay_texture, "rrrr");
GPU_texture_unbind(target->overlay_texture);
}
}
@@ -468,12 +467,11 @@ static int load_tex_cursor(Brush *br, ViewContext *vc, float zoom)
BLI_task_parallel_range(0, size, &data, load_tex_cursor_task_cb, &settings);
if (!cursor_snap.overlay_texture) {
cursor_snap.overlay_texture = GPU_texture_create_nD(
size, size, 0, 2, buffer, GPU_R8, GPU_DATA_UNSIGNED_BYTE, 0, false, NULL);
cursor_snap.overlay_texture = GPU_texture_create_2d(
"cursor_snap_overaly", size, size, 1, GPU_R8, NULL);
GPU_texture_update(cursor_snap.overlay_texture, GPU_DATA_UNSIGNED_BYTE, buffer);
GPU_texture_bind(cursor_snap.overlay_texture, 0);
GPU_texture_swizzle_set(cursor_snap.overlay_texture, "rrrr");
GPU_texture_unbind(cursor_snap.overlay_texture);
}
if (init) {

View File

@@ -1211,16 +1211,9 @@ static void draw_plane_marker_image(Scene *scene,
GPU_blend(GPU_BLEND_ALPHA);
}
GPUTexture *texture = GPU_texture_create_nD(ibuf->x,
ibuf->y,
0,
2,
display_buffer,
GPU_RGBA8,
GPU_DATA_UNSIGNED_BYTE,
0,
false,
NULL);
GPUTexture *texture = GPU_texture_create_2d(
"plane_marker_image", ibuf->x, ibuf->y, 1, GPU_RGBA8, NULL);
GPU_texture_update(texture, GPU_DATA_UNSIGNED_BYTE, display_buffer);
GPU_texture_filter_mode(texture, false);
GPU_matrix_push();

View File

@@ -1639,8 +1639,9 @@ static void sequencer_draw_display_buffer(const bContext *C,
GPU_matrix_identity_projection_set();
}
GPUTexture *texture = GPU_texture_create_nD(
ibuf->x, ibuf->y, 0, 2, display_buffer, format, data, 0, false, NULL);
GPUTexture *texture = GPU_texture_create_2d(
"seq_display_buf", ibuf->x, ibuf->y, 1, format, NULL);
GPU_texture_update(texture, data, display_buffer);
GPU_texture_filter_mode(texture, false);
GPU_texture_bind(texture, 0);

View File

@@ -96,6 +96,7 @@ set(SRC
opengl/gl_shader.cc
opengl/gl_shader_interface.cc
opengl/gl_state.cc
opengl/gl_texture.cc
opengl/gl_uniform_buffer.cc
opengl/gl_vertex_array.cc
@@ -143,6 +144,7 @@ set(SRC
intern/gpu_shader_private.hh
intern/gpu_shader_interface.hh
intern/gpu_state_private.hh
intern/gpu_texture_private.hh
intern/gpu_uniform_buffer_private.hh
intern/gpu_vertex_format_private.h

View File

@@ -146,6 +146,8 @@ void GPU_batch_program_set_builtin_with_config(GPUBatch *batch,
GPU_shader_uniform_4fv_array((batch)->shader, name, len, val);
#define GPU_batch_uniform_mat4(batch, name, val) \
GPU_shader_uniform_mat4((batch)->shader, name, val);
#define GPU_batch_texture_bind(batch, name, tex) \
GPU_texture_bind(tex, GPU_shader_get_texture_binding((batch)->shader, name));
void GPU_batch_draw(GPUBatch *batch);
void GPU_batch_draw_range(GPUBatch *batch, int v_first, int v_count);

View File

@@ -30,6 +30,7 @@ extern "C" {
/* GPU extensions support */
int GPU_max_texture_size(void);
int GPU_max_texture_3d_size(void);
int GPU_max_texture_layers(void);
int GPU_max_textures(void);
int GPU_max_textures_vert(void);

View File

@@ -32,6 +32,14 @@
#include "GPU_texture.h"
typedef enum eGPUFrameBufferBits {
GPU_COLOR_BIT = (1 << 0),
GPU_DEPTH_BIT = (1 << 1),
GPU_STENCIL_BIT = (1 << 2),
} eGPUFrameBufferBits;
ENUM_OPERATORS(eGPUFrameBufferBits)
#ifdef __cplusplus
extern "C" {
#endif
@@ -41,12 +49,6 @@ typedef struct GPUAttachment {
int layer, mip;
} GPUAttachment;
typedef enum eGPUFrameBufferBits {
GPU_COLOR_BIT = (1 << 0),
GPU_DEPTH_BIT = (1 << 1),
GPU_STENCIL_BIT = (1 << 2),
} eGPUFrameBufferBits;
typedef enum eGPUBackBuffer {
GPU_BACKBUFFER_LEFT = 0,
GPU_BACKBUFFER_RIGHT,

View File

@@ -44,6 +44,7 @@ typedef struct GPUTexture GPUTexture;
* - All states are created at startup to avoid runtime costs.
*/
typedef enum eGPUSamplerState {
GPU_SAMPLER_DEFAULT = 0,
GPU_SAMPLER_FILTER = (1 << 0),
GPU_SAMPLER_MIPMAP = (1 << 1),
GPU_SAMPLER_REPEAT_S = (1 << 2),
@@ -52,8 +53,11 @@ typedef enum eGPUSamplerState {
GPU_SAMPLER_CLAMP_BORDER = (1 << 5), /* Clamp to border color instead of border texel. */
GPU_SAMPLER_COMPARE = (1 << 6),
GPU_SAMPLER_ANISO = (1 << 7),
GPU_SAMPLER_ICON = (1 << 8),
GPU_SAMPLER_REPEAT = (GPU_SAMPLER_REPEAT_S | GPU_SAMPLER_REPEAT_T | GPU_SAMPLER_REPEAT_R),
/* Don't use that. */
GPU_SAMPLER_MAX = (1 << 8),
GPU_SAMPLER_MAX = (GPU_SAMPLER_ICON + 1),
} eGPUSamplerState;
ENUM_OPERATORS(eGPUSamplerState)
@@ -62,11 +66,7 @@ ENUM_OPERATORS(eGPUSamplerState)
extern "C" {
#endif
#define GPU_SAMPLER_DEFAULT GPU_SAMPLER_FILTER
#define GPU_SAMPLER_REPEAT (GPU_SAMPLER_REPEAT_S | GPU_SAMPLER_REPEAT_T | GPU_SAMPLER_REPEAT_R)
void GPU_samplers_init(void);
void GPU_samplers_free(void);
void GPU_samplers_update(void);
/* GPU Texture
* - always returns unsigned char RGBA textures
@@ -183,61 +183,40 @@ typedef enum eGPUDataFormat {
unsigned int GPU_texture_memory_usage_get(void);
/* TODO make it static function again. (create function with eGPUDataFormat exposed) */
GPUTexture *GPU_texture_create_nD(int w,
int h,
int d,
int n,
const void *pixels,
eGPUTextureFormat tex_format,
eGPUDataFormat gpu_data_format,
int samples,
const bool can_rescale,
char err_out[256]);
GPUTexture *GPU_texture_cube_create(int w,
int d,
const void *pixels,
eGPUTextureFormat tex_format,
eGPUDataFormat gpu_data_format,
char err_out[256]);
GPUTexture *GPU_texture_create_1d(int w,
eGPUTextureFormat data_type,
const float *pixels,
char err_out[256]);
/**
* \note \a data is expected to be float. If the \a format is not compatible with float data or if
* the data is not in float format, use GPU_texture_update to upload the data with the right data
* format.
* \a mips is the number of mip level to allocate. It must be >= 1.
*/
GPUTexture *GPU_texture_create_1d(
const char *name, int w, int mips, eGPUTextureFormat format, const float *data);
GPUTexture *GPU_texture_create_1d_array(
int w, int h, eGPUTextureFormat data_type, const float *pixels, char err_out[256]);
const char *name, int w, int h, int mips, eGPUTextureFormat format, const float *data);
GPUTexture *GPU_texture_create_2d(
int w, int h, eGPUTextureFormat data_type, const float *pixels, char err_out[256]);
GPUTexture *GPU_texture_create_2d_multisample(int w,
int h,
eGPUTextureFormat data_type,
const float *pixels,
int samples,
char err_out[256]);
const char *name, int w, int h, int mips, eGPUTextureFormat format, const float *data);
GPUTexture *GPU_texture_create_2d_array(
int w, int h, int d, eGPUTextureFormat data_type, const float *pixels, char err_out[256]);
const char *name, int w, int h, int d, int mips, eGPUTextureFormat format, const float *data);
GPUTexture *GPU_texture_create_3d(
int w, int h, int d, eGPUTextureFormat data_type, const float *pixels, char err_out[256]);
GPUTexture *GPU_texture_create_cube(int w,
eGPUTextureFormat data_type,
const float *pixels,
char err_out[256]);
const char *name, int w, int h, int d, int mips, eGPUTextureFormat format, const float *data);
GPUTexture *GPU_texture_create_cube(
const char *name, int w, int mips, eGPUTextureFormat format, const float *data);
GPUTexture *GPU_texture_create_cube_array(
int w, int d, eGPUTextureFormat data_type, const float *pixels, char err_out[256]);
GPUTexture *GPU_texture_create_from_vertbuf(struct GPUVertBuf *vert);
GPUTexture *GPU_texture_create_buffer(eGPUTextureFormat data_type, const uint buffer);
GPUTexture *GPU_texture_create_compressed(
int w, int h, int miplen, eGPUTextureFormat format, const void *data);
const char *name, int w, int d, int mips, eGPUTextureFormat format, const float *data);
/* Special textures. */
GPUTexture *GPU_texture_create_from_vertbuf(const char *name, struct GPUVertBuf *vert);
/**
* \a data should hold all the data for all mipmaps.
*/
GPUTexture *GPU_texture_create_compressed_2d(
const char *name, int w, int h, int miplen, eGPUTextureFormat format, const void *data);
GPUTexture *GPU_texture_create_error(int dimension, bool array);
void GPU_texture_add_mipmap(GPUTexture *tex,
eGPUDataFormat gpu_data_format,
int miplvl,
const void *pixels);
void GPU_texture_update_mipmap(GPUTexture *tex,
int miplvl,
eGPUDataFormat gpu_data_format,
const void *pixels);
void GPU_texture_update(GPUTexture *tex, eGPUDataFormat data_format, const void *pixels);
void GPU_texture_update_sub(GPUTexture *tex,
@@ -275,18 +254,12 @@ void GPU_texture_mipmap_mode(GPUTexture *tex, bool use_mipmap, bool use_filter);
void GPU_texture_wrap_mode(GPUTexture *tex, bool use_repeat, bool use_clamp);
void GPU_texture_swizzle_set(GPUTexture *tex, const char swizzle[4]);
/* TODO should be private internal functions. */
void GPU_texture_attach_framebuffer(GPUTexture *tex, struct GPUFrameBuffer *fb, int attachment);
void GPU_texture_detach_framebuffer(GPUTexture *tex, struct GPUFrameBuffer *fb);
int GPU_texture_framebuffer_attachment_get(GPUTexture *tex, struct GPUFrameBuffer *fb);
int GPU_texture_target(const GPUTexture *tex);
int GPU_texture_width(const GPUTexture *tex);
int GPU_texture_height(const GPUTexture *tex);
int GPU_texture_orig_width(const GPUTexture *tex);
int GPU_texture_orig_height(const GPUTexture *tex);
void GPU_texture_orig_size_set(GPUTexture *tex, int w, int h);
int GPU_texture_layers(const GPUTexture *tex);
eGPUTextureFormat GPU_texture_format(const GPUTexture *tex);
int GPU_texture_samples(const GPUTexture *tex);
bool GPU_texture_array(const GPUTexture *tex);
@@ -298,8 +271,6 @@ int GPU_texture_opengl_bindcode(const GPUTexture *tex);
void GPU_texture_get_mipmap_size(GPUTexture *tex, int lvl, int *size);
void GPU_sampler_icon_bind(int number);
#ifdef __cplusplus
}
#endif

View File

@@ -34,6 +34,7 @@ class Batch;
class DrawList;
class FrameBuffer;
class Shader;
class Texture;
class UniformBuf;
class GPUBackend {
@@ -42,13 +43,15 @@ class GPUBackend {
static GPUBackend *get(void);
virtual void samplers_update(void) = 0;
virtual GPUContext *context_alloc(void *ghost_window) = 0;
virtual Batch *batch_alloc(void) = 0;
virtual DrawList *drawlist_alloc(int list_length) = 0;
virtual FrameBuffer *framebuffer_alloc(const char *name) = 0;
virtual Shader *shader_alloc(const char *name) = 0;
// virtual Texture *texture_alloc(void) = 0;
virtual Texture *texture_alloc(const char *name) = 0;
virtual UniformBuf *uniformbuf_alloc(int size, const char *name) = 0;
};

View File

@@ -62,6 +62,7 @@
static struct GPUGlobal {
GLint maxtexsize;
GLint maxtex3dsize;
GLint maxtexlayers;
GLint maxcubemapsize;
GLint maxtextures;
@@ -107,7 +108,7 @@ static void gpu_detect_mip_render_workaround(void)
float *source_pix = (float *)MEM_callocN(sizeof(float[4][6]) * cube_size * cube_size, __func__);
float clear_color[4] = {1.0f, 0.5f, 0.0f, 0.0f};
GPUTexture *tex = GPU_texture_create_cube(cube_size, GPU_RGBA16F, source_pix, NULL);
GPUTexture *tex = GPU_texture_create_cube(__func__, cube_size, 2, GPU_RGBA16F, source_pix);
MEM_freeN(source_pix);
GPU_texture_bind(tex, 0);
@@ -137,6 +138,11 @@ int GPU_max_texture_size(void)
return GG.maxtexsize;
}
int GPU_max_texture_3d_size(void)
{
return GG.maxtex3dsize;
}
int GPU_max_texture_layers(void)
{
return GG.maxtexlayers;
@@ -249,6 +255,7 @@ void gpu_extensions_init(void)
glGetIntegerv(GL_MAX_COMBINED_TEXTURE_IMAGE_UNITS, &GG.maxtextures);
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &GG.maxtexsize);
glGetIntegerv(GL_MAX_3D_TEXTURE_SIZE, &GG.maxtex3dsize);
glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &GG.maxtexlayers);
glGetIntegerv(GL_MAX_CUBE_MAP_TEXTURE_SIZE, &GG.maxcubemapsize);
@@ -386,13 +393,11 @@ void gpu_extensions_init(void)
}
GPU_invalid_tex_init();
GPU_samplers_init();
}
void gpu_extensions_exit(void)
{
GPU_invalid_tex_free();
GPU_samplers_free();
}
bool GPU_mem_stats_supported(void)

View File

@@ -66,10 +66,9 @@ FrameBuffer::FrameBuffer(const char *name)
FrameBuffer::~FrameBuffer()
{
GPUFrameBuffer *gpu_fb = reinterpret_cast<GPUFrameBuffer *>(this);
for (int i = 0; i < ARRAY_SIZE(attachments_); i++) {
if (attachments_[i].tex != NULL) {
GPU_texture_detach_framebuffer(attachments_[i].tex, gpu_fb);
reinterpret_cast<Texture *>(attachments_[i].tex)->detach_from(this);
}
}
}
@@ -96,10 +95,7 @@ void FrameBuffer::attachment_set(GPUAttachmentType type, const GPUAttachment &ne
if (new_attachment.tex) {
if (new_attachment.layer > 0) {
BLI_assert(ELEM(GPU_texture_target(new_attachment.tex),
GL_TEXTURE_2D_ARRAY,
GL_TEXTURE_CUBE_MAP,
GL_TEXTURE_CUBE_MAP_ARRAY_ARB));
BLI_assert(GPU_texture_cube(new_attachment.tex) || GPU_texture_array(new_attachment.tex));
}
if (GPU_texture_stencil(new_attachment.tex)) {
BLI_assert(ELEM(type, GPU_FB_DEPTH_STENCIL_ATTACHMENT));
@@ -118,14 +114,14 @@ void FrameBuffer::attachment_set(GPUAttachmentType type, const GPUAttachment &ne
/* Unbind previous and bind new. */
/* TODO(fclem) cleanup the casts. */
if (attachment.tex) {
GPU_texture_detach_framebuffer(attachment.tex, reinterpret_cast<GPUFrameBuffer *>(this));
reinterpret_cast<Texture *>(attachment.tex)->detach_from(this);
}
attachment = new_attachment;
/* Might be null if this is for unbinding. */
if (attachment.tex) {
GPU_texture_attach_framebuffer(attachment.tex, reinterpret_cast<GPUFrameBuffer *>(this), type);
reinterpret_cast<Texture *>(attachment.tex)->attach_to(this, type);
}
else {
/* GPU_ATTACHMENT_NONE */
@@ -134,42 +130,33 @@ void FrameBuffer::attachment_set(GPUAttachmentType type, const GPUAttachment &ne
dirty_attachments_ = true;
}
void FrameBuffer::attachment_remove(GPUAttachmentType type)
{
attachments_[type] = GPU_ATTACHMENT_NONE;
dirty_attachments_ = true;
}
void FrameBuffer::recursive_downsample(int max_lvl,
void (*callback)(void *userData, int level),
void *userData)
{
GPUContext *ctx = GPU_context_active_get();
/* Bind to make sure the frame-buffer is up to date. */
this->bind(true);
if (width_ == 1 && height_ == 1) {
return;
}
/* HACK: Make the frame-buffer appear not bound to avoid assert in GPU_texture_bind. */
ctx->active_fb = NULL;
/* FIXME(fclem): This assumes all mips are defined which may not be the case. */
max_lvl = min_ii(max_lvl, floor(log2(max_ii(width_, height_))));
int levels = floor(log2(max_ii(width_, height_)));
max_lvl = min_ii(max_lvl, levels);
int current_dim[2] = {width_, height_};
int mip_lvl;
for (mip_lvl = 1; mip_lvl < max_lvl + 1; mip_lvl++) {
/* calculate next viewport size */
current_dim[0] = max_ii(current_dim[0] / 2, 1);
current_dim[1] = max_ii(current_dim[1] / 2, 1);
for (int mip_lvl = 1; mip_lvl <= max_lvl; mip_lvl++) {
/* Replace attached mip-level for each attachment. */
for (int att = 0; att < ARRAY_SIZE(attachments_); att++) {
GPUTexture *tex = attachments_[att].tex;
Texture *tex = reinterpret_cast<Texture *>(attachments_[att].tex);
if (tex != NULL) {
/* Some Intel HDXXX have issue with rendering to a mipmap that is below
* the texture GL_TEXTURE_MAX_LEVEL. So even if it not correct, in this case
* we allow GL_TEXTURE_MAX_LEVEL to be one level lower. In practice it does work! */
int map_lvl = (GPU_mip_render_workaround()) ? mip_lvl : (mip_lvl - 1);
int mip_max = (GPU_mip_render_workaround()) ? mip_lvl : (mip_lvl - 1);
/* Restrict fetches only to previous level. */
GPU_texture_bind(tex, 0);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_BASE_LEVEL, mip_lvl - 1);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_MAX_LEVEL, map_lvl);
GPU_texture_unbind(tex);
tex->mip_range_set(mip_lvl - 1, mip_max);
/* Bind next level. */
attachments_[att].mip = mip_lvl;
}
@@ -177,25 +164,14 @@ void FrameBuffer::recursive_downsample(int max_lvl,
/* Update the internal attachments and viewport size. */
dirty_attachments_ = true;
this->bind(true);
/* HACK: Make the frame-buffer appear not bound to avoid assert in GPU_texture_bind. */
ctx->active_fb = NULL;
callback(userData, mip_lvl);
/* This is the last mipmap level. Exit loop without incrementing mip_lvl. */
if (current_dim[0] == 1 && current_dim[1] == 1) {
break;
}
}
for (int att = 0; att < ARRAY_SIZE(attachments_); att++) {
if (attachments_[att].tex != NULL) {
/* Reset mipmap level range. */
GPUTexture *tex = attachments_[att].tex;
GPU_texture_bind(tex, 0);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_BASE_LEVEL, 0);
glTexParameteri(GPU_texture_target(tex), GL_TEXTURE_MAX_LEVEL, mip_lvl);
GPU_texture_unbind(tex);
reinterpret_cast<Texture *>(attachments_[att].tex)->mip_range_set(0, max_lvl);
/* Reset base level. NOTE: might not be the one bound at the start of this function. */
attachments_[att].mip = 0;
}
@@ -292,7 +268,8 @@ bool GPU_framebuffer_check_valid(GPUFrameBuffer *gpu_fb, char err_out[256])
void GPU_framebuffer_texture_attach_ex(GPUFrameBuffer *gpu_fb, GPUAttachment attachment, int slot)
{
GPUAttachmentType type = blender::gpu::Texture::attachment_type(attachment.tex, slot);
Texture *tex = reinterpret_cast<Texture *>(attachment.tex);
GPUAttachmentType type = tex->attachment_type(slot);
reinterpret_cast<FrameBuffer *>(gpu_fb)->attachment_set(type, attachment);
}
@@ -318,14 +295,8 @@ void GPU_framebuffer_texture_cubeface_attach(
void GPU_framebuffer_texture_detach(GPUFrameBuffer *gpu_fb, GPUTexture *tex)
{
GPUAttachment attachment = GPU_ATTACHMENT_NONE;
int type = GPU_texture_framebuffer_attachment_get(tex, gpu_fb);
if (type != -1) {
reinterpret_cast<FrameBuffer *>(gpu_fb)->attachment_set((GPUAttachmentType)type, attachment);
}
else {
BLI_assert(!"Error: Texture: Framebuffer is not attached");
}
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(gpu_fb);
reinterpret_cast<Texture *>(tex)->detach_from(fb);
}
/**
@@ -605,13 +576,14 @@ GPUOffScreen *GPU_offscreen_create(
width = max_ii(1, width);
ofs->color = GPU_texture_create_2d(
width, height, (high_bitdepth) ? GPU_RGBA16F : GPU_RGBA8, NULL, err_out);
"ofs_color", width, height, 1, (high_bitdepth) ? GPU_RGBA16F : GPU_RGBA8, NULL);
if (depth) {
ofs->depth = GPU_texture_create_2d(width, height, GPU_DEPTH24_STENCIL8, NULL, err_out);
ofs->depth = GPU_texture_create_2d("ofs_depth", width, height, 1, GPU_DEPTH24_STENCIL8, NULL);
}
if ((depth && !ofs->depth) || !ofs->color) {
BLI_snprintf(err_out, 256, "GPUTexture: Texture allocation failed.");
GPU_offscreen_free(ofs);
return NULL;
}

View File

@@ -115,6 +115,9 @@ class FrameBuffer {
float clear_depth,
uint clear_stencil) = 0;
virtual void clear_multi(const float (*clear_col)[4]) = 0;
virtual void clear_attachment(GPUAttachmentType type,
eGPUDataFormat data_format,
const void *clear_value) = 0;
virtual void read(eGPUFrameBufferBits planes,
eGPUDataFormat format,
@@ -131,6 +134,7 @@ class FrameBuffer {
int dst_offset_y) = 0;
void attachment_set(GPUAttachmentType type, const GPUAttachment &new_attachment);
void attachment_remove(GPUAttachmentType type);
void recursive_downsample(int max_lvl,
void (*callback)(void *userData, int level),

View File

@@ -157,7 +157,7 @@ static void gpu_material_ramp_texture_build(GPUMaterial *mat)
GPUColorBandBuilder *builder = mat->coba_builder;
mat->coba_tex = GPU_texture_create_1d_array(
CM_TABLE + 1, builder->current_layer, GPU_RGBA16F, (float *)builder->pixels, NULL);
"mat_ramp", CM_TABLE + 1, builder->current_layer, 1, GPU_RGBA16F, (float *)builder->pixels);
MEM_freeN(builder);
mat->coba_builder = NULL;
@@ -546,7 +546,8 @@ struct GPUUniformBuf *GPU_material_sss_profile_get(GPUMaterial *material,
GPU_texture_free(material->sss_tex_profile);
}
material->sss_tex_profile = GPU_texture_create_1d(64, GPU_RGBA16F, translucence_profile, NULL);
material->sss_tex_profile = GPU_texture_create_1d(
"sss_tex_profile", 64, 1, GPU_RGBA16F, translucence_profile);
MEM_freeN(translucence_profile);

View File

@@ -93,6 +93,11 @@ class ShaderInterface {
return input_lookup(inputs_ + attr_len_ + ubo_len_, uniform_len_, name);
}
inline const ShaderInput *texture_get(const int binding) const
{
return input_lookup(inputs_ + attr_len_ + ubo_len_, uniform_len_, binding);
}
inline const char *input_name_get(const ShaderInput *input) const
{
return name_buffer_ + input->name_offset;

View File

@@ -26,6 +26,8 @@
#include "GPU_state.h"
#include "gpu_texture_private.hh"
#include <cstring>
namespace blender {
@@ -160,6 +162,10 @@ class GPUStateManager {
virtual ~GPUStateManager(){};
virtual void apply_state(void) = 0;
virtual void texture_bind(Texture *tex, eGPUSamplerState sampler, int unit) = 0;
virtual void texture_unbind(Texture *tex) = 0;
virtual void texture_unbind_all(void) = 0;
};
} // namespace gpu

File diff suppressed because it is too large Load Diff

View File

@@ -25,15 +25,203 @@
#include "BLI_assert.h"
#include "GPU_vertex_buffer.h"
#include "gpu_framebuffer_private.hh"
namespace blender {
namespace gpu {
typedef enum eGPUTextureFormatFlag {
GPU_FORMAT_DEPTH = (1 << 0),
GPU_FORMAT_STENCIL = (1 << 1),
GPU_FORMAT_INTEGER = (1 << 2),
GPU_FORMAT_FLOAT = (1 << 3),
GPU_FORMAT_COMPRESSED = (1 << 4),
GPU_FORMAT_DEPTH_STENCIL = (GPU_FORMAT_DEPTH | GPU_FORMAT_STENCIL),
} eGPUTextureFormatFlag;
ENUM_OPERATORS(eGPUTextureFormatFlag)
typedef enum eGPUTextureType {
GPU_TEXTURE_1D = (1 << 0),
GPU_TEXTURE_2D = (1 << 1),
GPU_TEXTURE_3D = (1 << 2),
GPU_TEXTURE_CUBE = (1 << 3),
GPU_TEXTURE_ARRAY = (1 << 4),
GPU_TEXTURE_BUFFER = (1 << 5),
GPU_TEXTURE_1D_ARRAY = (GPU_TEXTURE_1D | GPU_TEXTURE_ARRAY),
GPU_TEXTURE_2D_ARRAY = (GPU_TEXTURE_2D | GPU_TEXTURE_ARRAY),
GPU_TEXTURE_CUBE_ARRAY = (GPU_TEXTURE_CUBE | GPU_TEXTURE_ARRAY),
} eGPUTextureType;
ENUM_OPERATORS(eGPUTextureType)
#ifdef DEBUG
# define DEBUG_NAME_LEN 64
#else
# define DEBUG_NAME_LEN 8
#endif
/* Maximum number of FBOs a texture can be attached to. */
#define GPU_TEX_MAX_FBO_ATTACHED 13
class Texture {
public:
/** TODO(fclem): make it a non-static function. */
static GPUAttachmentType attachment_type(GPUTexture *tex, int slot)
/** Internal Sampler state. */
eGPUSamplerState sampler_state = GPU_SAMPLER_DEFAULT;
/** Reference counter. */
int refcount = 1;
/** Width & Height (of source data), optional. */
int src_w = 0, src_h = 0;
protected:
/* ---- Texture format (immutable after init). ---- */
/** Width & Height & Depth. For cubemap arrays, d is number of facelayers. */
int w_, h_, d_;
/** Internal data format. */
eGPUTextureFormat format_;
/** Format caracteristics. */
eGPUTextureFormatFlag format_flag_;
/** Texture type. */
eGPUTextureType type_;
/** Number of mipmaps this texture has (Max miplvl). */
/* TODO(fclem) Should become immutable and the need for mipmaps should be specified upfront. */
int mipmaps_ = -1;
/** For error checking */
int mip_min_ = 0, mip_max_ = 0;
/** For debugging */
char name_[DEBUG_NAME_LEN];
/** Framebuffer references to update on deletion. */
GPUAttachmentType fb_attachment_[GPU_TEX_MAX_FBO_ATTACHED];
FrameBuffer *fb_[GPU_TEX_MAX_FBO_ATTACHED];
public:
Texture(const char *name);
virtual ~Texture();
/* Return true on success. */
bool init_1D(int w, int layers, eGPUTextureFormat format);
bool init_2D(int w, int h, int layers, eGPUTextureFormat format);
bool init_3D(int w, int h, int d, eGPUTextureFormat format);
bool init_cubemap(int w, int layers, eGPUTextureFormat format);
bool init_buffer(GPUVertBuf *vbo, eGPUTextureFormat format);
virtual void generate_mipmap(void) = 0;
virtual void copy_to(Texture *tex) = 0;
virtual void clear(eGPUDataFormat format, const void *data) = 0;
virtual void swizzle_set(const char swizzle_mask[4]) = 0;
virtual void mip_range_set(int min, int max) = 0;
virtual void *read(int mip, eGPUDataFormat format) = 0;
void attach_to(FrameBuffer *fb, GPUAttachmentType type);
void detach_from(FrameBuffer *fb);
void update(eGPUDataFormat format, const void *data);
virtual void update_sub(
int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data) = 0;
/* TODO(fclem) Legacy. Should be removed at some point. */
virtual uint gl_bindcode_get(void) const = 0;
int width_get(void) const
{
switch (GPU_texture_format(tex)) {
return w_;
}
int height_get(void) const
{
return h_;
}
int depth_get(void) const
{
return d_;
}
void mip_size_get(int mip, int r_size[3]) const
{
/* TODO assert if lvl is below the limit of 1px in each dimension. */
int div = 1 << mip;
r_size[0] = max_ii(1, w_ / div);
if (type_ == GPU_TEXTURE_1D_ARRAY) {
r_size[1] = h_;
}
else if (h_ > 0) {
r_size[1] = max_ii(1, h_ / div);
}
if (type_ & (GPU_TEXTURE_ARRAY | GPU_TEXTURE_CUBE)) {
r_size[2] = d_;
}
else if (d_ > 0) {
r_size[2] = max_ii(1, d_ / div);
}
}
int mip_width_get(int mip) const
{
return max_ii(1, w_ / (1 << mip));
}
int mip_height_get(int mip) const
{
return (type_ == GPU_TEXTURE_1D_ARRAY) ? h_ : max_ii(1, h_ / (1 << mip));
}
int mip_depth_get(int mip) const
{
return (type_ & (GPU_TEXTURE_ARRAY | GPU_TEXTURE_CUBE)) ? d_ : max_ii(1, d_ / (1 << mip));
}
/* Return number of dimension taking the array type into account. */
int dimensions_count(void) const
{
const int array = (type_ & GPU_TEXTURE_ARRAY) ? 1 : 0;
switch (type_ & ~GPU_TEXTURE_ARRAY) {
case GPU_TEXTURE_BUFFER:
return 1;
case GPU_TEXTURE_1D:
return 1 + array;
case GPU_TEXTURE_2D:
return 2 + array;
case GPU_TEXTURE_CUBE:
case GPU_TEXTURE_3D:
default:
return 3;
}
}
/* Return number of array layer (or face layer) for texture array or 1 for the others. */
int layer_count(void) const
{
switch (type_) {
case GPU_TEXTURE_1D_ARRAY:
return h_;
case GPU_TEXTURE_2D_ARRAY:
case GPU_TEXTURE_CUBE_ARRAY:
return d_;
default:
return 1;
}
}
eGPUTextureFormat format_get(void) const
{
return format_;
}
eGPUTextureFormatFlag format_flag_get(void) const
{
return format_flag_;
}
eGPUTextureType type_get(void) const
{
return type_;
}
GPUAttachmentType attachment_type(int slot) const
{
switch (format_) {
case GPU_DEPTH_COMPONENT32F:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
@@ -44,10 +232,313 @@ class Texture {
BLI_assert(slot == 0);
return GPU_FB_DEPTH_STENCIL_ATTACHMENT;
default:
return static_cast<GPUAttachmentType>(GPU_FB_COLOR_ATTACHMENT0 + slot);
return GPU_FB_COLOR_ATTACHMENT0 + slot;
}
}
protected:
virtual bool init_internal(void) = 0;
virtual bool init_internal(GPUVertBuf *vbo) = 0;
};
#undef DEBUG_NAME_LEN
inline size_t to_bytesize(eGPUTextureFormat format)
{
switch (format) {
case GPU_RGBA32F:
return 32;
case GPU_RG32F:
case GPU_RGBA16F:
case GPU_RGBA16:
return 16;
case GPU_RGB16F:
return 12;
case GPU_DEPTH32F_STENCIL8: /* 32-bit depth, 8 bits stencil, and 24 unused bits. */
return 8;
case GPU_RG16F:
case GPU_RG16I:
case GPU_RG16UI:
case GPU_RG16:
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH_COMPONENT32F:
case GPU_RGBA8UI:
case GPU_RGBA8:
case GPU_SRGB8_A8:
case GPU_R11F_G11F_B10F:
case GPU_R32F:
case GPU_R32UI:
case GPU_R32I:
return 4;
case GPU_DEPTH_COMPONENT24:
return 3;
case GPU_DEPTH_COMPONENT16:
case GPU_R16F:
case GPU_R16UI:
case GPU_R16I:
case GPU_RG8:
case GPU_R16:
return 2;
case GPU_R8:
case GPU_R8UI:
return 1;
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
return 1; /* Incorrect but actual size is fractional. */
default:
BLI_assert(!"Texture format incorrect or unsupported\n");
return 0;
}
}
inline size_t to_block_size(eGPUTextureFormat data_type)
{
switch (data_type) {
case GPU_SRGB8_A8_DXT1:
case GPU_RGBA8_DXT1:
return 8;
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
return 16;
default:
BLI_assert(!"Texture format is not a compressed format\n");
return 0;
}
}
inline eGPUTextureFormatFlag to_format_flag(eGPUTextureFormat format)
{
switch (format) {
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
case GPU_DEPTH_COMPONENT32F:
return GPU_FORMAT_DEPTH;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return GPU_FORMAT_DEPTH_STENCIL;
case GPU_R8UI:
case GPU_RG16I:
case GPU_R16I:
case GPU_RG16UI:
case GPU_R16UI:
case GPU_R32UI:
return GPU_FORMAT_INTEGER;
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
return GPU_FORMAT_COMPRESSED;
default:
return GPU_FORMAT_FLOAT;
}
}
inline int to_component_len(eGPUTextureFormat format)
{
switch (format) {
case GPU_RGBA8:
case GPU_RGBA8UI:
case GPU_RGBA16F:
case GPU_RGBA16:
case GPU_RGBA32F:
case GPU_SRGB8_A8:
return 4;
case GPU_RGB16F:
case GPU_R11F_G11F_B10F:
return 3;
case GPU_RG8:
case GPU_RG16:
case GPU_RG16F:
case GPU_RG16I:
case GPU_RG16UI:
case GPU_RG32F:
return 2;
default:
return 1;
}
}
inline size_t to_bytesize(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
{
switch (data_format) {
case GPU_DATA_UNSIGNED_BYTE:
return 1 * to_component_len(tex_format);
case GPU_DATA_FLOAT:
case GPU_DATA_INT:
case GPU_DATA_UNSIGNED_INT:
return 4 * to_component_len(tex_format);
case GPU_DATA_UNSIGNED_INT_24_8:
case GPU_DATA_10_11_11_REV:
return 4;
default:
BLI_assert(!"Data format incorrect or unsupported\n");
return 0;
}
}
/* Definitely not complete, edit according to the gl specification. */
inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
{
switch (tex_format) {
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
case GPU_DEPTH_COMPONENT32F:
return data_format == GPU_DATA_FLOAT;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return data_format == GPU_DATA_UNSIGNED_INT_24_8;
case GPU_R8UI:
case GPU_R16UI:
case GPU_RG16UI:
case GPU_R32UI:
return data_format == GPU_DATA_UNSIGNED_INT;
case GPU_RG16I:
case GPU_R16I:
return data_format == GPU_DATA_INT;
case GPU_R8:
case GPU_RG8:
case GPU_RGBA8:
case GPU_RGBA8UI:
case GPU_SRGB8_A8:
return ELEM(data_format, GPU_DATA_UNSIGNED_BYTE, GPU_DATA_FLOAT);
case GPU_R11F_G11F_B10F:
return ELEM(data_format, GPU_DATA_10_11_11_REV, GPU_DATA_FLOAT);
default:
return data_format == GPU_DATA_FLOAT;
}
}
/* Definitely not complete, edit according to the gl specification. */
inline eGPUDataFormat to_data_format(eGPUTextureFormat tex_format)
{
switch (tex_format) {
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
case GPU_DEPTH_COMPONENT32F:
return GPU_DATA_FLOAT;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return GPU_DATA_UNSIGNED_INT_24_8;
case GPU_R8UI:
case GPU_R16UI:
case GPU_RG16UI:
case GPU_R32UI:
return GPU_DATA_UNSIGNED_INT;
case GPU_RG16I:
case GPU_R16I:
return GPU_DATA_INT;
case GPU_R8:
case GPU_RG8:
case GPU_RGBA8:
case GPU_RGBA8UI:
case GPU_SRGB8_A8:
return GPU_DATA_UNSIGNED_BYTE;
case GPU_R11F_G11F_B10F:
return GPU_DATA_10_11_11_REV;
default:
return GPU_DATA_FLOAT;
}
}
inline eGPUFrameBufferBits to_framebuffer_bits(eGPUTextureFormat tex_format)
{
switch (tex_format) {
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
case GPU_DEPTH_COMPONENT32F:
return GPU_DEPTH_BIT;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return GPU_DEPTH_BIT | GPU_STENCIL_BIT;
default:
return GPU_COLOR_BIT;
}
}
static inline eGPUTextureFormat to_texture_format(const GPUVertFormat *format)
{
if (format->attr_len > 1 || format->attr_len == 0) {
BLI_assert(!"Incorrect vertex format for buffer texture");
return GPU_DEPTH_COMPONENT24;
}
switch (format->attrs[0].comp_len) {
case 1:
switch (format->attrs[0].comp_type) {
case GPU_COMP_I8:
return GPU_R8I;
case GPU_COMP_U8:
return GPU_R8UI;
case GPU_COMP_I16:
return GPU_R16I;
case GPU_COMP_U16:
return GPU_R16UI;
case GPU_COMP_I32:
return GPU_R32I;
case GPU_COMP_U32:
return GPU_R32UI;
case GPU_COMP_F32:
return GPU_R32F;
default:
break;
}
break;
case 2:
switch (format->attrs[0].comp_type) {
case GPU_COMP_I8:
return GPU_RG8I;
case GPU_COMP_U8:
return GPU_RG8UI;
case GPU_COMP_I16:
return GPU_RG16I;
case GPU_COMP_U16:
return GPU_RG16UI;
case GPU_COMP_I32:
return GPU_RG32I;
case GPU_COMP_U32:
return GPU_RG32UI;
case GPU_COMP_F32:
return GPU_RG32F;
default:
break;
}
break;
case 3:
/* Not supported until GL 4.0 */
break;
case 4:
switch (format->attrs[0].comp_type) {
case GPU_COMP_I8:
return GPU_RGBA8I;
case GPU_COMP_U8:
return GPU_RGBA8UI;
case GPU_COMP_I16:
return GPU_RGBA16I;
case GPU_COMP_U16:
return GPU_RGBA16UI;
case GPU_COMP_I32:
return GPU_RGBA32I;
case GPU_COMP_U32:
return GPU_RGBA32UI;
case GPU_COMP_F32:
return GPU_RGBA32F;
default:
break;
}
break;
default:
break;
}
BLI_assert(!"Unsupported vertex format for buffer texture");
return GPU_DEPTH_COMPONENT24;
}
} // namespace gpu
} // namespace blender

View File

@@ -380,7 +380,7 @@ GPUTexture *GPU_viewport_texture_pool_query(
}
}
tex = GPU_texture_create_2d(width, height, format, NULL, NULL);
tex = GPU_texture_create_2d("temp_from_pool", width, height, 1, format, NULL);
/* Doing filtering for depth does not make sense when not doing shadow mapping,
* and enabling texture filtering on integer texture make them unreadable. */
bool do_filter = !GPU_texture_depth(tex) && !GPU_texture_integer(tex);
@@ -453,16 +453,21 @@ static void gpu_viewport_default_fb_create(GPUViewport *viewport)
int *size = viewport->size;
bool ok = true;
dtxl->color = GPU_texture_create_2d(size[0], size[1], GPU_RGBA16F, NULL, NULL);
dtxl->color_overlay = GPU_texture_create_2d(size[0], size[1], GPU_SRGB8_A8, NULL, NULL);
if (((viewport->flag & GPU_VIEWPORT_STEREO) != 0)) {
dtxl->color_stereo = GPU_texture_create_2d(size[0], size[1], GPU_RGBA16F, NULL, NULL);
dtxl->color_overlay_stereo = GPU_texture_create_2d(size[0], size[1], GPU_SRGB8_A8, NULL, NULL);
dtxl->color = GPU_texture_create_2d("dtxl_color", UNPACK2(size), 1, GPU_RGBA16F, NULL);
dtxl->color_overlay = GPU_texture_create_2d(
"dtxl_color_overlay", UNPACK2(size), 1, GPU_SRGB8_A8, NULL);
if (viewport->flag & GPU_VIEWPORT_STEREO) {
dtxl->color_stereo = GPU_texture_create_2d(
"dtxl_color_stereo", UNPACK2(size), 1, GPU_RGBA16F, NULL);
dtxl->color_overlay_stereo = GPU_texture_create_2d(
"dtxl_color_overlay_stereo", UNPACK2(size), 1, GPU_SRGB8_A8, NULL);
}
/* Can be shared with GPUOffscreen. */
if (dtxl->depth == NULL) {
dtxl->depth = GPU_texture_create_2d(size[0], size[1], GPU_DEPTH24_STENCIL8, NULL, NULL);
dtxl->depth = GPU_texture_create_2d(
"dtxl_depth", UNPACK2(size), 1, GPU_DEPTH24_STENCIL8, NULL);
}
if (!dtxl->depth || !dtxl->color) {

View File

@@ -32,6 +32,7 @@
#include "gl_drawlist.hh"
#include "gl_framebuffer.hh"
#include "gl_shader.hh"
#include "gl_texture.hh"
#include "gl_uniform_buffer.hh"
namespace blender {
@@ -42,11 +43,25 @@ class GLBackend : public GPUBackend {
GLSharedOrphanLists shared_orphan_list_;
public:
GLBackend()
{
GLTexture::samplers_init();
}
~GLBackend()
{
GLTexture::samplers_free();
}
static GLBackend *get(void)
{
return static_cast<GLBackend *>(GPUBackend::get());
}
void samplers_update(void) override
{
GLTexture::samplers_update();
};
GPUContext *context_alloc(void *ghost_window)
{
return new GLContext(ghost_window, shared_orphan_list_);
@@ -72,6 +87,11 @@ class GLBackend : public GPUBackend {
return new GLShader(name);
};
Texture *texture_alloc(const char *name)
{
return new GLTexture(name);
};
UniformBuf *uniformbuf_alloc(int size, const char *name)
{
return new GLUniformBuf(size, name);

View File

@@ -30,6 +30,8 @@
#include "BLI_set.hh"
#include "BLI_vector.hh"
#include "gl_state.hh"
#include "glew-mx.h"
#include <mutex>
@@ -83,6 +85,12 @@ class GLContext : public GPUContext {
void activate(void) override;
void deactivate(void) override;
static inline GLStateManager *state_manager_active_get()
{
GLContext *ctx = static_cast<GLContext *>(GPU_context_active_get());
return static_cast<GLStateManager *>(ctx->state_manager);
};
/* TODO(fclem) these needs to become private. */
public:
void orphans_add(Vector<GLuint> &orphan_list, std::mutex &list_mutex, GLuint id);

View File

@@ -28,6 +28,10 @@
#include "BLI_system.h"
#include "BLI_utildefines.h"
#include "BKE_global.h"
#include "GPU_platform.h"
#include "glew-mx.h"
#include "gl_context.hh"
@@ -37,6 +41,9 @@
#include <stdio.h>
/* Avoid too much NVidia buffer info in the output log. */
#define TRIM_NVIDIA_BUFFER_INFO 1
namespace blender::gpu::debug {
/* -------------------------------------------------------------------- */
@@ -65,6 +72,13 @@ static void APIENTRY debug_callback(GLenum UNUSED(source),
{
const char format[] = "GPUDebug: %s%s\033[0m\n";
if (TRIM_NVIDIA_BUFFER_INFO &&
GPU_type_matches(GPU_DEVICE_NVIDIA, GPU_OS_ANY, GPU_DRIVER_OFFICIAL) &&
STREQLEN("Buffer detailed info", message, 20)) {
/** Supress buffer infos flooding the output. */
return;
}
if (ELEM(severity, GL_DEBUG_SEVERITY_LOW, GL_DEBUG_SEVERITY_NOTIFICATION)) {
if (VERBOSE) {
fprintf(stderr, format, "\033[2m", message);
@@ -179,6 +193,10 @@ void check_gl_error(const char *info)
void check_gl_resources(const char *info)
{
if (!(G.debug & G_DEBUG_GPU)) {
return;
}
GLContext *ctx = static_cast<GLContext *>(GPU_context_active_get());
ShaderInterface *interface = ctx->shader->interface;
/* NOTE: This only check binding. To be valid, the bound ubo needs to
@@ -186,7 +204,12 @@ void check_gl_resources(const char *info)
uint16_t ubo_needed = interface->enabled_ubo_mask_;
ubo_needed &= ~ctx->bound_ubo_slots;
if (ubo_needed == 0) {
/* NOTE: This only check binding. To be valid, the bound texture needs to
* be the same format/target the shader expects. */
uint64_t tex_needed = interface->enabled_tex_mask_;
tex_needed &= ~ctx->state_manager_active_get()->bound_texture_slots();
if (ubo_needed == 0 && tex_needed == 0) {
return;
}
@@ -200,6 +223,22 @@ void check_gl_resources(const char *info)
debug_callback(0, GL_DEBUG_TYPE_ERROR, 0, GL_DEBUG_SEVERITY_HIGH, 0, msg, NULL);
}
}
for (int i = 0; tex_needed != 0; i++, tex_needed >>= 1) {
if ((tex_needed & 1) != 0) {
const ShaderInput *tex_input = interface->texture_get(i);
const char *tex_name = interface->input_name_get(tex_input);
const char *sh_name = ctx->shader->name_get();
char msg[256];
SNPRINTF(msg, "Missing Texture bind at slot %d : %s > %s : %s", i, sh_name, tex_name, info);
debug_callback(0, GL_DEBUG_TYPE_ERROR, 0, GL_DEBUG_SEVERITY_HIGH, 0, msg, NULL);
}
}
}
void raise_gl_error(const char *msg)
{
debug_callback(0, GL_DEBUG_TYPE_ERROR, 0, GL_DEBUG_SEVERITY_HIGH, 0, msg, NULL);
}
/** \} */

View File

@@ -37,6 +37,7 @@ namespace debug {
# define GL_CHECK_RESOURCES(info)
#endif
void raise_gl_error(const char *info);
void check_gl_error(const char *info);
void check_gl_resources(const char *info);
void init_gl_callbacks(void);

View File

@@ -356,7 +356,9 @@ void GLFrameBuffer::clear(eGPUFrameBufferBits buffers,
}
}
void GLFrameBuffer::clear_multi(const float (*clear_cols)[4])
void GLFrameBuffer::clear_attachment(GPUAttachmentType type,
eGPUDataFormat data_format,
const void *clear_value)
{
BLI_assert(GPU_context_active_get() == context_);
BLI_assert(context_->active_fb == this);
@@ -367,17 +369,56 @@ void GLFrameBuffer::clear_multi(const float (*clear_cols)[4])
context_->state_manager->apply_state();
if (type == GPU_FB_DEPTH_STENCIL_ATTACHMENT) {
BLI_assert(data_format == GPU_DATA_UNSIGNED_INT_24_8);
float depth = ((*(uint32_t *)clear_value) & 0x00FFFFFFu) / (float)0x00FFFFFFu;
int stencil = ((*(uint32_t *)clear_value) >> 24);
glClearBufferfi(GL_DEPTH_STENCIL, 0, depth, stencil);
}
else if (type == GPU_FB_DEPTH_ATTACHMENT) {
if (data_format == GPU_DATA_FLOAT) {
glClearBufferfv(GL_DEPTH, 0, (GLfloat *)clear_value);
}
else if (data_format == GPU_DATA_UNSIGNED_INT) {
float depth = *(uint32_t *)clear_value / (float)0xFFFFFFFFu;
glClearBufferfv(GL_DEPTH, 0, &depth);
}
else {
BLI_assert(!"Unhandled data format");
}
}
else {
int slot = type - GPU_FB_COLOR_ATTACHMENT0;
switch (data_format) {
case GPU_DATA_FLOAT:
glClearBufferfv(GL_COLOR, slot, (GLfloat *)clear_value);
break;
case GPU_DATA_UNSIGNED_INT:
glClearBufferuiv(GL_COLOR, slot, (GLuint *)clear_value);
break;
case GPU_DATA_INT:
glClearBufferiv(GL_COLOR, slot, (GLint *)clear_value);
break;
default:
BLI_assert(!"Unhandled data format");
break;
}
}
GPU_write_mask(write_mask);
}
void GLFrameBuffer::clear_multi(const float (*clear_cols)[4])
{
/* WATCH: This can easily access clear_cols out of bounds it clear_cols is not big enough for
* all attachments.
* TODO(fclem) fix this insecurity? */
int type = GPU_FB_COLOR_ATTACHMENT0;
for (int i = 0; type < GPU_FB_MAX_ATTACHMENT; i++, type++) {
if (attachments_[type].tex != NULL) {
glClearBufferfv(GL_COLOR, i, clear_cols[i]);
this->clear_attachment(GPU_FB_COLOR_ATTACHMENT0 + i, GPU_DATA_FLOAT, clear_cols[i]);
}
}
GPU_write_mask(write_mask);
}
void GLFrameBuffer::read(eGPUFrameBufferBits plane,

View File

@@ -39,6 +39,9 @@ class GLStateManager;
* Implementation of FrameBuffer object using OpenGL.
**/
class GLFrameBuffer : public FrameBuffer {
/* For debugging purpose. */
friend class GLTexture;
private:
/** OpenGL handle. */
GLuint fbo_id_ = 0;
@@ -81,6 +84,9 @@ class GLFrameBuffer : public FrameBuffer {
float clear_depth,
uint clear_stencil) override;
void clear_multi(const float (*clear_cols)[4]) override;
void clear_attachment(GPUAttachmentType type,
eGPUDataFormat data_format,
const void *clear_value) override;
void read(eGPUFrameBufferBits planes,
eGPUDataFormat format,

View File

@@ -20,7 +20,10 @@
* \ingroup gpu
*/
#include "BKE_global.h"
#include "BLI_math_base.h"
#include "BLI_math_bits.h"
#include "GPU_extensions.h"
@@ -28,9 +31,11 @@
#include "gl_context.hh"
#include "gl_framebuffer.hh"
#include "gl_texture.hh"
#include "gl_state.hh"
using namespace blender::gpu;
namespace blender::gpu {
/* -------------------------------------------------------------------- */
/** \name GLStateManager
@@ -69,6 +74,7 @@ void GLStateManager::apply_state(void)
{
this->set_state(this->state);
this->set_mutable_state(this->mutable_state);
this->texture_bind_apply();
active_fb->apply_state();
};
@@ -419,3 +425,111 @@ void GLStateManager::set_blend(const eGPUBlend value)
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Texture state managment
* \{ */
void GLStateManager::texture_bind(Texture *tex_, eGPUSamplerState sampler_type, int unit)
{
BLI_assert(unit < GPU_max_textures());
GLTexture *tex = static_cast<GLTexture *>(tex_);
if (G.debug & G_DEBUG_GPU) {
tex->check_feedback_loop();
}
/* Eliminate redundant binds. */
if ((textures_[unit] == tex->tex_id_) &&
(samplers_[unit] == GLTexture::samplers_[sampler_type])) {
return;
}
targets_[unit] = tex->target_;
textures_[unit] = tex->tex_id_;
samplers_[unit] = GLTexture::samplers_[sampler_type];
tex->is_bound_ = true;
dirty_texture_binds_ |= 1 << unit;
}
/* Bind the texture to slot 0 for editing purpose. Used by legacy pipeline. */
void GLStateManager::texture_bind_temp(GLTexture *tex)
{
// BLI_assert(!GLEW_ARB_direct_state_access);
glActiveTexture(GL_TEXTURE0);
glBindTexture(tex->target_, tex->tex_id_);
/* Will reset the first texture that was originaly bound to slot 0 back before drawing. */
dirty_texture_binds_ |= 1;
/* NOTE: This might leave this texture attached to this target even after update.
* In practice it is not causing problems as we have incorrect binding detection
* at higher level. */
}
void GLStateManager::texture_unbind(Texture *tex_)
{
GLTexture *tex = static_cast<GLTexture *>(tex_);
if (!tex->is_bound_) {
return;
}
GLuint tex_id = tex->tex_id_;
for (int i = 0; i < ARRAY_SIZE(textures_); i++) {
if (textures_[i] == tex_id) {
textures_[i] = 0;
samplers_[i] = 0;
dirty_texture_binds_ |= 1 << i;
}
}
tex->is_bound_ = false;
}
void GLStateManager::texture_unbind_all(void)
{
for (int i = 0; i < ARRAY_SIZE(textures_); i++) {
if (textures_[i] != 0) {
textures_[i] = 0;
samplers_[i] = 0;
dirty_texture_binds_ |= 1 << i;
}
}
this->texture_bind_apply();
}
void GLStateManager::texture_bind_apply(void)
{
if (dirty_texture_binds_ == 0) {
return;
}
uint64_t dirty_bind = dirty_texture_binds_;
dirty_texture_binds_ = 0;
int first = bitscan_forward_uint64(dirty_bind);
int last = 64 - bitscan_reverse_uint64(dirty_bind);
int count = last - first;
if (GLEW_ARB_multi_bind) {
glBindTextures(first, count, textures_ + first);
glBindSamplers(first, count, samplers_ + first);
}
else {
for (int unit = first; unit < last; unit++) {
if ((dirty_bind >> unit) & 1UL) {
glActiveTexture(GL_TEXTURE0 + unit);
glBindTexture(targets_[unit], textures_[unit]);
glBindSampler(unit, samplers_[unit]);
}
}
}
}
uint64_t GLStateManager::bound_texture_slots(void)
{
uint64_t bound_slots = 0;
for (int i = 0; i < ARRAY_SIZE(textures_); i++) {
if (textures_[i] != 0) {
bound_slots |= 1 << i;
}
}
return bound_slots;
}
/** \} */
} // namespace blender::gpu

View File

@@ -20,6 +20,8 @@
* \ingroup gpu
*/
#pragma once
#include "MEM_guardedalloc.h"
#include "BLI_utildefines.h"
@@ -32,6 +34,7 @@ namespace blender {
namespace gpu {
class GLFrameBuffer;
class GLTexture;
/**
* State manager keeping track of the draw state and applying it before drawing.
@@ -49,11 +52,30 @@ class GLStateManager : public GPUStateManager {
/** Limits. */
float line_width_range_[2];
/** Texture state:
* We keep the full stack of textures and sampler bounds to use multi bind, and to be able to
* edit and restore texture binds on the fly without querying the context.
* Also this allows us to keep track of textures bounds to many texture units.
* Keep the targets to know what target to set to 0 for unbinding (legacy).
* Init first target to GL_TEXTURE_2D for texture_bind_temp to work.
*/
GLuint targets_[64] = {GL_TEXTURE_2D};
GLuint textures_[64] = {0};
GLuint samplers_[64] = {0};
uint64_t dirty_texture_binds_ = 0;
public:
GLStateManager();
void apply_state(void) override;
void texture_bind(Texture *tex, eGPUSamplerState sampler, int unit) override;
void texture_bind_temp(GLTexture *tex);
void texture_unbind(Texture *tex) override;
void texture_unbind_all(void) override;
uint64_t bound_texture_slots(void);
private:
static void set_write_mask(const eGPUWriteMask value);
static void set_depth_test(const eGPUDepthTest value);
@@ -70,6 +92,8 @@ class GLStateManager : public GPUStateManager {
void set_state(const GPUState &state);
void set_mutable_state(const GPUStateMutable &state);
void texture_bind_apply(void);
MEM_CXX_CLASS_ALLOC_FUNCS("GLStateManager")
};

View File

@@ -0,0 +1,689 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2020 Blender Foundation.
* All rights reserved.
*/
/** \file
* \ingroup gpu
*/
#include "BKE_global.h"
#include "DNA_userdef_types.h"
#include "GPU_extensions.h"
#include "GPU_framebuffer.h"
#include "GPU_platform.h"
#include "gl_backend.hh"
#include "gl_debug.hh"
#include "gl_state.hh"
#include "gl_texture.hh"
namespace blender::gpu {
/* -------------------------------------------------------------------- */
/** \name Creation & Deletion
* \{ */
GLTexture::GLTexture(const char *name) : Texture(name)
{
BLI_assert(GPU_context_active_get() != NULL);
glGenTextures(1, &tex_id_);
}
GLTexture::~GLTexture()
{
if (framebuffer_) {
GPU_framebuffer_free(framebuffer_);
}
GPUContext *ctx = GPU_context_active_get();
if (ctx != NULL && is_bound_) {
/* This avoid errors when the texture is still inside the bound texture array. */
ctx->state_manager->texture_unbind(this);
}
GLBackend::get()->tex_free(tex_id_);
}
/* Return true on success. */
bool GLTexture::init_internal(void)
{
if ((format_ == GPU_DEPTH24_STENCIL8) && GPU_depth_blitting_workaround()) {
/* MacOS + Radeon Pro fails to blit depth on GPU_DEPTH24_STENCIL8
* but works on GPU_DEPTH32F_STENCIL8. */
format_ = GPU_DEPTH32F_STENCIL8;
}
if ((type_ == GPU_TEXTURE_CUBE_ARRAY) && !GPU_arb_texture_cube_map_array_is_supported()) {
debug::raise_gl_error("Attempt to create a cubemap array without hardware support!");
return false;
}
target_ = to_gl_target(type_);
/* We need to bind once to define the texture type. */
GLContext::state_manager_active_get()->texture_bind_temp(this);
if (!this->proxy_check(0)) {
return false;
}
this->ensure_mipmaps(0);
/* Avoid issue with incomplete textures. */
if (GLEW_ARB_direct_state_access) {
glTextureParameteri(tex_id_, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
else {
glTexParameteri(target_, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
#ifndef __APPLE__
if ((G.debug & G_DEBUG_GPU) && (GLEW_VERSION_4_3 || GLEW_KHR_debug)) {
char sh_name[64];
SNPRINTF(sh_name, "Texture-%s", name_);
/* Binding before setting the label is needed on some drivers. */
glObjectLabel(GL_TEXTURE, tex_id_, -1, sh_name);
}
#endif
GL_CHECK_ERROR("Post-texture creation");
return true;
}
/* Return true on success. */
bool GLTexture::init_internal(GPUVertBuf *vbo)
{
target_ = to_gl_target(type_);
/* We need to bind once to define the texture type. */
GLContext::state_manager_active_get()->texture_bind_temp(this);
GLenum internal_format = to_gl_internal_format(format_);
if (GLEW_ARB_direct_state_access) {
glTextureBuffer(tex_id_, internal_format, vbo->vbo_id);
}
else {
glTexBuffer(target_, internal_format, vbo->vbo_id);
}
#ifndef __APPLE__
if ((G.debug & G_DEBUG_GPU) && (GLEW_VERSION_4_3 || GLEW_KHR_debug)) {
char sh_name[64];
SNPRINTF(sh_name, "Texture-%s", name_);
/* Binding before setting the label is needed on some drivers. */
glObjectLabel(GL_TEXTURE, tex_id_, -1, sh_name);
}
#endif
GL_CHECK_ERROR("Post-texture buffer creation");
return true;
}
/* Will create enough mipmaps up to get to the given level. */
void GLTexture::ensure_mipmaps(int miplvl)
{
int effective_h = (type_ == GPU_TEXTURE_1D_ARRAY) ? 0 : h_;
int effective_d = (type_ != GPU_TEXTURE_3D) ? 0 : d_;
int max_dimension = max_iii(w_, effective_h, effective_d);
int max_miplvl = floor(log2(max_dimension));
miplvl = min_ii(miplvl, max_miplvl);
while (mipmaps_ < miplvl) {
int mip = ++mipmaps_;
const int dimensions = this->dimensions_count();
int w = mip_width_get(mip);
int h = mip_height_get(mip);
int d = mip_depth_get(mip);
GLenum internal_format = to_gl_internal_format(format_);
GLenum gl_format = to_gl_data_format(format_);
GLenum gl_type = to_gl(to_data_format(format_));
GLContext::state_manager_active_get()->texture_bind_temp(this);
if (type_ == GPU_TEXTURE_CUBE) {
for (int i = 0; i < d; i++) {
GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + i;
glTexImage2D(target, mip, internal_format, w, h, 0, gl_format, gl_type, NULL);
}
}
else if (format_flag_ & GPU_FORMAT_COMPRESSED) {
size_t size = ((w + 3) / 4) * ((h + 3) / 4) * to_block_size(format_);
switch (dimensions) {
default:
case 1:
glCompressedTexImage1D(target_, mip, internal_format, w, 0, size, NULL);
break;
case 2:
glCompressedTexImage2D(target_, mip, internal_format, w, h, 0, size, NULL);
break;
case 3:
glCompressedTexImage3D(target_, mip, internal_format, w, h, d, 0, size, NULL);
break;
}
}
else {
switch (dimensions) {
default:
case 1:
glTexImage1D(target_, mip, internal_format, w, 0, gl_format, gl_type, NULL);
break;
case 2:
glTexImage2D(target_, mip, internal_format, w, h, 0, gl_format, gl_type, NULL);
break;
case 3:
glTexImage3D(target_, mip, internal_format, w, h, d, 0, gl_format, gl_type, NULL);
break;
}
}
GL_CHECK_ERROR("Post-mipmap creation");
}
this->mip_range_set(0, mipmaps_);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Operations
* \{ */
void GLTexture::update_sub_direct_state_access(
int mip, int offset[3], int extent[3], GLenum format, GLenum type, const void *data)
{
if (format_flag_ & GPU_FORMAT_COMPRESSED) {
size_t size = ((extent[0] + 3) / 4) * ((extent[1] + 3) / 4) * to_block_size(format_);
switch (this->dimensions_count()) {
default:
case 1:
glCompressedTextureSubImage1D(tex_id_, mip, offset[0], extent[0], format, size, data);
break;
case 2:
glCompressedTextureSubImage2D(
tex_id_, mip, UNPACK2(offset), UNPACK2(extent), format, size, data);
break;
case 3:
glCompressedTextureSubImage3D(
tex_id_, mip, UNPACK3(offset), UNPACK3(extent), format, size, data);
break;
}
}
else {
switch (this->dimensions_count()) {
default:
case 1:
glTextureSubImage1D(tex_id_, mip, offset[0], extent[0], format, type, data);
break;
case 2:
glTextureSubImage2D(tex_id_, mip, UNPACK2(offset), UNPACK2(extent), format, type, data);
break;
case 3:
glTextureSubImage3D(tex_id_, mip, UNPACK3(offset), UNPACK3(extent), format, type, data);
break;
}
}
GL_CHECK_ERROR("Post-update_sub_direct_state_access");
}
void GLTexture::update_sub(
int mip, int offset[3], int extent[3], eGPUDataFormat type, const void *data)
{
BLI_assert(validate_data_format(format_, type));
BLI_assert(data != NULL);
this->ensure_mipmaps(mip);
if (mip > mipmaps_) {
debug::raise_gl_error("Updating a miplvl on a texture too small to have this many levels.");
return;
}
const int dimensions = this->dimensions_count();
GLenum gl_format = to_gl_data_format(format_);
GLenum gl_type = to_gl(type);
if (GLEW_ARB_direct_state_access) {
this->update_sub_direct_state_access(mip, offset, extent, gl_format, gl_type, data);
return;
}
GLContext::state_manager_active_get()->texture_bind_temp(this);
if (type_ == GPU_TEXTURE_CUBE) {
for (int i = 0; i < extent[2]; i++) {
GLenum target = GL_TEXTURE_CUBE_MAP_POSITIVE_X + offset[2] + i;
glTexSubImage2D(target, mip, UNPACK2(offset), UNPACK2(extent), gl_format, gl_type, data);
}
}
else if (format_flag_ & GPU_FORMAT_COMPRESSED) {
size_t size = ((extent[0] + 3) / 4) * ((extent[1] + 3) / 4) * to_block_size(format_);
switch (dimensions) {
default:
case 1:
glCompressedTexSubImage1D(target_, mip, offset[0], extent[0], gl_format, size, data);
break;
case 2:
glCompressedTexSubImage2D(
target_, mip, UNPACK2(offset), UNPACK2(extent), gl_format, size, data);
break;
case 3:
glCompressedTexSubImage3D(
target_, mip, UNPACK3(offset), UNPACK3(extent), gl_format, size, data);
break;
}
}
else {
switch (dimensions) {
default:
case 1:
glTexSubImage1D(target_, mip, offset[0], extent[0], gl_format, gl_type, data);
break;
case 2:
glTexSubImage2D(target_, mip, UNPACK2(offset), UNPACK2(extent), gl_format, gl_type, data);
break;
case 3:
glTexSubImage3D(target_, mip, UNPACK3(offset), UNPACK3(extent), gl_format, gl_type, data);
break;
}
}
GL_CHECK_ERROR("Post-update_sub");
}
/** This will create the mipmap images and populate them with filtered data from base level.
* WARNING: Depth textures are not populated but they have their mips correctly defined.
* WARNING: This resets the mipmap range.
*/
void GLTexture::generate_mipmap(void)
{
this->ensure_mipmaps(9999);
/* Some drivers have bugs when using glGenerateMipmap with depth textures (see T56789).
* In this case we just create a complete texture with mipmaps manually without
* down-sampling. You must initialize the texture levels using other methods like
* GPU_framebuffer_recursive_downsample(). */
if (format_flag_ & GPU_FORMAT_DEPTH) {
return;
}
/* Downsample from mip 0 using implementation. */
if (GLEW_ARB_direct_state_access) {
glGenerateTextureMipmap(tex_id_);
}
else {
GLContext::state_manager_active_get()->texture_bind_temp(this);
glGenerateMipmap(target_);
}
}
void GLTexture::clear(eGPUDataFormat data_format, const void *data)
{
BLI_assert(validate_data_format(format_, data_format));
if (GLEW_ARB_clear_texture && !(G.debug & G_DEBUG_GPU_FORCE_WORKAROUNDS)) {
int mip = 0;
GLenum gl_format = to_gl_data_format(format_);
GLenum gl_type = to_gl(data_format);
glClearTexImage(tex_id_, mip, gl_format, gl_type, data);
}
else {
/* Fallback for older GL. */
GPUFrameBuffer *prev_fb = GPU_framebuffer_active_get();
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(this->framebuffer_get());
fb->bind(true);
fb->clear_attachment(this->attachment_type(0), data_format, data);
GPU_framebuffer_bind(prev_fb);
}
}
void GLTexture::copy_to(Texture *dst_)
{
GLTexture *dst = static_cast<GLTexture *>(dst_);
GLTexture *src = this;
BLI_assert((dst->w_ == src->w_) && (dst->h_ == src->h_) && (dst->d_ == src->d_));
BLI_assert(dst->format_ == src->format_);
BLI_assert(dst->type_ == src->type_);
/* TODO support array / 3D textures. */
BLI_assert(dst->d_ == 0);
if (GLEW_ARB_copy_image && !GPU_texture_copy_workaround()) {
/* Opengl 4.3 */
int mip = 0;
/* NOTE: mip_size_get() won't override any dimension that is equal to 0. */
int extent[3] = {1, 1, 1};
this->mip_size_get(mip, extent);
glCopyImageSubData(
src->tex_id_, target_, mip, 0, 0, 0, dst->tex_id_, target_, mip, 0, 0, 0, UNPACK3(extent));
}
else {
/* Fallback for older GL. */
GPU_framebuffer_blit(
src->framebuffer_get(), 0, dst->framebuffer_get(), 0, to_framebuffer_bits(format_));
}
}
void *GLTexture::read(int mip, eGPUDataFormat type)
{
BLI_assert(!(format_flag_ & GPU_FORMAT_COMPRESSED));
BLI_assert(mip <= mipmaps_);
BLI_assert(validate_data_format(format_, type));
/* NOTE: mip_size_get() won't override any dimension that is equal to 0. */
int extent[3] = {1, 1, 1};
this->mip_size_get(mip, extent);
size_t sample_len = extent[0] * extent[1] * extent[2];
size_t sample_size = to_bytesize(format_, type);
size_t texture_size = sample_len * sample_size;
/* AMD Pro driver have a bug that write 8 bytes past buffer size
* if the texture is big. (see T66573) */
void *data = MEM_mallocN(texture_size + 8, "GPU_texture_read");
GLenum gl_format = to_gl_data_format(format_);
GLenum gl_type = to_gl(type);
if (GLEW_ARB_direct_state_access) {
glGetTextureImage(tex_id_, mip, gl_format, gl_type, texture_size, data);
}
else {
GLContext::state_manager_active_get()->texture_bind_temp(this);
if (type_ == GPU_TEXTURE_CUBE) {
size_t cube_face_size = texture_size / 6;
char *face_data = (char *)data;
for (int i = 0; i < 6; i++, face_data += cube_face_size) {
glGetTexImage(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, mip, gl_format, gl_type, face_data);
}
}
else {
glGetTexImage(target_, mip, gl_format, gl_type, data);
}
}
return data;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Getters & setters
* \{ */
void GLTexture::swizzle_set(const char swizzle[4])
{
GLint gl_swizzle[4] = {(GLint)swizzle_to_gl(swizzle[0]),
(GLint)swizzle_to_gl(swizzle[1]),
(GLint)swizzle_to_gl(swizzle[2]),
(GLint)swizzle_to_gl(swizzle[3])};
if (GLEW_ARB_direct_state_access) {
glTextureParameteriv(tex_id_, GL_TEXTURE_SWIZZLE_RGBA, gl_swizzle);
}
else {
GLContext::state_manager_active_get()->texture_bind_temp(this);
glTexParameteriv(target_, GL_TEXTURE_SWIZZLE_RGBA, gl_swizzle);
}
}
void GLTexture::mip_range_set(int min, int max)
{
BLI_assert(min <= max && min >= 0 && max <= mipmaps_);
mip_min_ = min;
mip_max_ = max;
if (GLEW_ARB_direct_state_access) {
glTextureParameteri(tex_id_, GL_TEXTURE_BASE_LEVEL, min);
glTextureParameteri(tex_id_, GL_TEXTURE_MAX_LEVEL, max);
}
else {
GLContext::state_manager_active_get()->texture_bind_temp(this);
glTexParameteri(target_, GL_TEXTURE_BASE_LEVEL, min);
glTexParameteri(target_, GL_TEXTURE_MAX_LEVEL, max);
}
}
struct GPUFrameBuffer *GLTexture::framebuffer_get(void)
{
if (framebuffer_) {
return framebuffer_;
}
BLI_assert(!(type_ & (GPU_TEXTURE_ARRAY | GPU_TEXTURE_CUBE | GPU_TEXTURE_1D | GPU_TEXTURE_3D)));
/* TODO(fclem) cleanup this. Don't use GPU object but blender::gpu ones. */
GPUTexture *gputex = reinterpret_cast<GPUTexture *>(static_cast<Texture *>(this));
framebuffer_ = GPU_framebuffer_create(name_);
GPU_framebuffer_texture_attach(framebuffer_, gputex, 0, 0);
return framebuffer_;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Sampler objects
* \{ */
GLuint GLTexture::samplers_[GPU_SAMPLER_MAX] = {0};
void GLTexture::samplers_init(void)
{
glGenSamplers(GPU_SAMPLER_MAX, samplers_);
for (int i = 0; i <= GPU_SAMPLER_ICON - 1; i++) {
eGPUSamplerState state = static_cast<eGPUSamplerState>(i);
GLenum clamp_type = (state & GPU_SAMPLER_CLAMP_BORDER) ? GL_CLAMP_TO_BORDER : GL_CLAMP_TO_EDGE;
GLenum wrap_s = (state & GPU_SAMPLER_REPEAT_S) ? GL_REPEAT : clamp_type;
GLenum wrap_t = (state & GPU_SAMPLER_REPEAT_T) ? GL_REPEAT : clamp_type;
GLenum wrap_r = (state & GPU_SAMPLER_REPEAT_R) ? GL_REPEAT : clamp_type;
GLenum mag_filter = (state & GPU_SAMPLER_FILTER) ? GL_LINEAR : GL_NEAREST;
GLenum min_filter = (state & GPU_SAMPLER_FILTER) ?
((state & GPU_SAMPLER_MIPMAP) ? GL_LINEAR_MIPMAP_LINEAR : GL_LINEAR) :
((state & GPU_SAMPLER_MIPMAP) ? GL_NEAREST_MIPMAP_LINEAR : GL_NEAREST);
GLenum compare_mode = (state & GPU_SAMPLER_COMPARE) ? GL_COMPARE_REF_TO_TEXTURE : GL_NONE;
glSamplerParameteri(samplers_[i], GL_TEXTURE_WRAP_S, wrap_s);
glSamplerParameteri(samplers_[i], GL_TEXTURE_WRAP_T, wrap_t);
glSamplerParameteri(samplers_[i], GL_TEXTURE_WRAP_R, wrap_r);
glSamplerParameteri(samplers_[i], GL_TEXTURE_MIN_FILTER, min_filter);
glSamplerParameteri(samplers_[i], GL_TEXTURE_MAG_FILTER, mag_filter);
glSamplerParameteri(samplers_[i], GL_TEXTURE_COMPARE_MODE, compare_mode);
glSamplerParameteri(samplers_[i], GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
/** Other states are left to default:
* - GL_TEXTURE_BORDER_COLOR is {0, 0, 0, 0}.
* - GL_TEXTURE_MIN_LOD is -1000.
* - GL_TEXTURE_MAX_LOD is 1000.
* - GL_TEXTURE_LOD_BIAS is 0.0f.
**/
}
samplers_update();
/* Custom sampler for icons. */
GLuint icon_sampler = samplers_[GPU_SAMPLER_ICON];
glSamplerParameteri(icon_sampler, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glSamplerParameteri(icon_sampler, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glSamplerParameterf(icon_sampler, GL_TEXTURE_LOD_BIAS, -0.5f);
}
void GLTexture::samplers_update(void)
{
if (!GLEW_EXT_texture_filter_anisotropic) {
return;
}
float max_anisotropy = 1.0f;
glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &max_anisotropy);
float aniso_filter = max_ff(max_anisotropy, U.anisotropic_filter);
for (int i = 0; i <= GPU_SAMPLER_ICON - 1; i++) {
eGPUSamplerState state = static_cast<eGPUSamplerState>(i);
if (state & GPU_SAMPLER_MIPMAP) {
glSamplerParameterf(samplers_[i], GL_TEXTURE_MAX_ANISOTROPY_EXT, aniso_filter);
}
}
}
void GLTexture::samplers_free(void)
{
glDeleteSamplers(GPU_SAMPLER_MAX, samplers_);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Proxy texture
*
* Dummy texture to see if the implementation supports the requested size.
* \{ */
/* NOTE: This only checks if this mipmap is valid / supported.
* TODO(fclem) make the check cover the whole mipmap chain. */
bool GLTexture::proxy_check(int mip)
{
/* Manual validation first, since some implementation have issues with proxy creation. */
int max_size = GPU_max_texture_size();
int max_3d_size = GPU_max_texture_3d_size();
int max_cube_size = GPU_max_cube_map_size();
int size[3] = {1, 1, 1};
this->mip_size_get(mip, size);
if (type_ & GPU_TEXTURE_ARRAY) {
if (this->layer_count() > GPU_max_texture_layers()) {
return false;
}
}
if (type_ == GPU_TEXTURE_3D) {
if (size[0] > max_3d_size || size[1] > max_3d_size || size[2] > max_3d_size) {
return false;
}
}
else if ((type_ & ~GPU_TEXTURE_ARRAY) == GPU_TEXTURE_2D) {
if (size[0] > max_size || size[1] > max_size) {
return false;
}
}
else if ((type_ & ~GPU_TEXTURE_ARRAY) == GPU_TEXTURE_1D) {
if (size[0] > max_size) {
return false;
}
}
else if ((type_ & ~GPU_TEXTURE_ARRAY) == GPU_TEXTURE_CUBE) {
if (size[0] > max_cube_size) {
return false;
}
}
if (GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_WIN, GPU_DRIVER_ANY) ||
GPU_type_matches(GPU_DEVICE_NVIDIA, GPU_OS_MAC, GPU_DRIVER_OFFICIAL) ||
GPU_type_matches(GPU_DEVICE_ATI, GPU_OS_UNIX, GPU_DRIVER_OFFICIAL)) {
/* Some AMD drivers have a faulty `GL_PROXY_TEXTURE_..` check.
* (see T55888, T56185, T59351).
* Checking with `GL_PROXY_TEXTURE_..` doesn't prevent `Out Of Memory` issue,
* it just states that the OGL implementation can support the texture.
* So we already manually check the maximum size and maximum number of layers.
* Same thing happens on Nvidia/macOS 10.15 (T78175). */
return true;
}
if ((type_ == GPU_TEXTURE_CUBE_ARRAY) &&
GPU_type_matches(GPU_DEVICE_ANY, GPU_OS_MAC, GPU_DRIVER_ANY)) {
/* Special fix for T79703. */
return true;
}
GLenum gl_proxy = to_gl_proxy(type_);
GLenum internal_format = to_gl_internal_format(format_);
GLenum gl_format = to_gl_data_format(format_);
GLenum gl_type = to_gl(to_data_format(format_));
/* Small exception. */
int dimensions = (type_ == GPU_TEXTURE_CUBE) ? 2 : this->dimensions_count();
if (format_flag_ & GPU_FORMAT_COMPRESSED) {
size_t img_size = ((size[0] + 3) / 4) * ((size[1] + 3) / 4) * to_block_size(format_);
switch (dimensions) {
default:
case 1:
glCompressedTexImage1D(gl_proxy, mip, size[0], 0, gl_format, img_size, NULL);
break;
case 2:
glCompressedTexImage2D(gl_proxy, mip, UNPACK2(size), 0, gl_format, img_size, NULL);
break;
case 3:
glCompressedTexImage3D(gl_proxy, mip, UNPACK3(size), 0, gl_format, img_size, NULL);
break;
}
}
else {
switch (dimensions) {
default:
case 1:
glTexImage1D(gl_proxy, mip, internal_format, size[0], 0, gl_format, gl_type, NULL);
break;
case 2:
glTexImage2D(gl_proxy, mip, internal_format, UNPACK2(size), 0, gl_format, gl_type, NULL);
break;
case 3:
glTexImage3D(gl_proxy, mip, internal_format, UNPACK3(size), 0, gl_format, gl_type, NULL);
break;
}
}
int width = 0;
glGetTexLevelParameteriv(gl_proxy, 0, GL_TEXTURE_WIDTH, &width);
return (width > 0);
}
/** \} */
void GLTexture::check_feedback_loop(void)
{
/* Recursive downsample workaround break this check.
* See recursive_downsample() for more infos. */
if (GPU_mip_render_workaround()) {
return;
}
GLFrameBuffer *fb = static_cast<GLFrameBuffer *>(GPU_context_active_get()->active_fb);
for (int i = 0; i < ARRAY_SIZE(fb_); i++) {
if (fb_[i] == fb) {
GPUAttachmentType type = fb_attachment_[i];
GPUAttachment attachment = fb->attachments_[type];
if (attachment.mip <= mip_max_ && attachment.mip >= mip_min_) {
char msg[256];
SNPRINTF(msg,
"Feedback loop: Trying to bind a texture (%s) with mip range %d-%d but mip %d is "
"attached to the active framebuffer (%s)",
name_,
mip_min_,
mip_max_,
attachment.mip,
fb->name_);
debug::raise_gl_error(msg);
}
return;
}
}
}
/* TODO(fclem) Legacy. Should be removed at some point. */
uint GLTexture::gl_bindcode_get(void) const
{
return tex_id_;
}
} // namespace blender::gpu

View File

@@ -31,14 +31,250 @@
#pragma once
#include "MEM_guardedalloc.h"
#include "BLI_assert.h"
#include "gpu_texture_private.hh"
#include "glew-mx.h"
struct GPUFrameBuffer;
namespace blender {
namespace gpu {
static GLenum to_gl(eGPUDataFormat format)
class GLTexture : public Texture {
friend class GLStateManager;
private:
/** All samplers states. */
static GLuint samplers_[GPU_SAMPLER_MAX];
/** Target to bind the texture to (GL_TEXTURE_1D, GL_TEXTURE_2D, etc...)*/
GLenum target_ = -1;
/** opengl identifier for texture. */
GLuint tex_id_ = 0;
/** Legacy workaround for texture copy. Created when using framebuffer_get(). */
struct GPUFrameBuffer *framebuffer_ = NULL;
/** True if this texture is bound to at least one texture unit. */
/* TODO(fclem) How do we ensure thread safety here? */
bool is_bound_;
public:
GLTexture(const char *name);
~GLTexture();
void update_sub(
int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data) override;
void generate_mipmap(void) override;
void copy_to(Texture *tex) override;
void clear(eGPUDataFormat format, const void *data) override;
void swizzle_set(const char swizzle_mask[4]) override;
void mip_range_set(int min, int max) override;
void *read(int mip, eGPUDataFormat format) override;
void check_feedback_loop(void);
/* TODO(fclem) Legacy. Should be removed at some point. */
uint gl_bindcode_get(void) const override;
static void samplers_init(void);
static void samplers_free(void);
static void samplers_update(void);
protected:
bool init_internal(void) override;
bool init_internal(GPUVertBuf *vbo) override;
private:
bool proxy_check(int mip);
void ensure_mipmaps(int mip);
void update_sub_direct_state_access(
int mip, int offset[3], int extent[3], GLenum gl_format, GLenum gl_type, const void *data);
GPUFrameBuffer *framebuffer_get(void);
MEM_CXX_CLASS_ALLOC_FUNCS("GLTexture")
};
inline GLenum to_gl_internal_format(eGPUTextureFormat format)
{
/* You can add any of the available type to this list
* For available types see GPU_texture.h */
switch (format) {
/* Formats texture & renderbuffer */
case GPU_RGBA8UI:
return GL_RGBA8UI;
case GPU_RGBA8I:
return GL_RGBA8I;
case GPU_RGBA8:
return GL_RGBA8;
case GPU_RGBA32UI:
return GL_RGBA32UI;
case GPU_RGBA32I:
return GL_RGBA32I;
case GPU_RGBA32F:
return GL_RGBA32F;
case GPU_RGBA16UI:
return GL_RGBA16UI;
case GPU_RGBA16I:
return GL_RGBA16I;
case GPU_RGBA16F:
return GL_RGBA16F;
case GPU_RGBA16:
return GL_RGBA16;
case GPU_RG8UI:
return GL_RG8UI;
case GPU_RG8I:
return GL_RG8I;
case GPU_RG8:
return GL_RG8;
case GPU_RG32UI:
return GL_RG32UI;
case GPU_RG32I:
return GL_RG32I;
case GPU_RG32F:
return GL_RG32F;
case GPU_RG16UI:
return GL_RG16UI;
case GPU_RG16I:
return GL_RG16I;
case GPU_RG16F:
return GL_RG16F;
case GPU_RG16:
return GL_RG16;
case GPU_R8UI:
return GL_R8UI;
case GPU_R8I:
return GL_R8I;
case GPU_R8:
return GL_R8;
case GPU_R32UI:
return GL_R32UI;
case GPU_R32I:
return GL_R32I;
case GPU_R32F:
return GL_R32F;
case GPU_R16UI:
return GL_R16UI;
case GPU_R16I:
return GL_R16I;
case GPU_R16F:
return GL_R16F;
case GPU_R16:
return GL_R16;
/* Special formats texture & renderbuffer */
case GPU_R11F_G11F_B10F:
return GL_R11F_G11F_B10F;
case GPU_DEPTH32F_STENCIL8:
return GL_DEPTH32F_STENCIL8;
case GPU_DEPTH24_STENCIL8:
return GL_DEPTH24_STENCIL8;
case GPU_SRGB8_A8:
return GL_SRGB8_ALPHA8;
/* Texture only format */
case GPU_RGB16F:
return GL_RGB16F;
/* Special formats texture only */
case GPU_SRGB8_A8_DXT1:
return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT1_EXT;
case GPU_SRGB8_A8_DXT3:
return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT3_EXT;
case GPU_SRGB8_A8_DXT5:
return GL_COMPRESSED_SRGB_ALPHA_S3TC_DXT5_EXT;
case GPU_RGBA8_DXT1:
return GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
case GPU_RGBA8_DXT3:
return GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
case GPU_RGBA8_DXT5:
return GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
/* Depth Formats */
case GPU_DEPTH_COMPONENT32F:
return GL_DEPTH_COMPONENT32F;
case GPU_DEPTH_COMPONENT24:
return GL_DEPTH_COMPONENT24;
case GPU_DEPTH_COMPONENT16:
return GL_DEPTH_COMPONENT16;
default:
BLI_assert(!"Texture format incorrect or unsupported\n");
return 0;
}
}
inline GLenum to_gl_target(eGPUTextureType type)
{
switch (type) {
case GPU_TEXTURE_1D:
return GL_TEXTURE_1D;
case GPU_TEXTURE_1D_ARRAY:
return GL_TEXTURE_1D_ARRAY;
case GPU_TEXTURE_2D:
return GL_TEXTURE_2D;
case GPU_TEXTURE_2D_ARRAY:
return GL_TEXTURE_2D_ARRAY;
case GPU_TEXTURE_3D:
return GL_TEXTURE_3D;
case GPU_TEXTURE_CUBE:
return GL_TEXTURE_CUBE_MAP;
case GPU_TEXTURE_CUBE_ARRAY:
return GL_TEXTURE_CUBE_MAP_ARRAY_ARB;
case GPU_TEXTURE_BUFFER:
return GL_TEXTURE_BUFFER;
default:
BLI_assert(0);
return GL_TEXTURE_1D;
}
}
inline GLenum to_gl_proxy(eGPUTextureType type)
{
switch (type) {
case GPU_TEXTURE_1D:
return GL_PROXY_TEXTURE_1D;
case GPU_TEXTURE_1D_ARRAY:
return GL_PROXY_TEXTURE_1D_ARRAY;
case GPU_TEXTURE_2D:
return GL_PROXY_TEXTURE_2D;
case GPU_TEXTURE_2D_ARRAY:
return GL_PROXY_TEXTURE_2D_ARRAY;
case GPU_TEXTURE_3D:
return GL_PROXY_TEXTURE_3D;
case GPU_TEXTURE_CUBE:
return GL_PROXY_TEXTURE_CUBE_MAP;
case GPU_TEXTURE_CUBE_ARRAY:
return GL_PROXY_TEXTURE_CUBE_MAP_ARRAY_ARB;
case GPU_TEXTURE_BUFFER:
default:
BLI_assert(0);
return GL_TEXTURE_1D;
}
}
inline GLenum swizzle_to_gl(const char swizzle)
{
switch (swizzle) {
default:
case 'x':
case 'r':
return GL_RED;
case 'y':
case 'g':
return GL_GREEN;
case 'z':
case 'b':
return GL_BLUE;
case 'w':
case 'a':
return GL_ALPHA;
case '0':
return GL_ZERO;
case '1':
return GL_ONE;
}
}
inline GLenum to_gl(eGPUDataFormat format)
{
switch (format) {
case GPU_DATA_FLOAT:
@@ -59,8 +295,67 @@ static GLenum to_gl(eGPUDataFormat format)
}
}
/* Definitely not complete, edit according to the gl specification. */
inline GLenum to_gl_data_format(eGPUTextureFormat format)
{
/* You can add any of the available type to this list
* For available types see GPU_texture.h */
switch (format) {
case GPU_R8I:
case GPU_R8UI:
case GPU_R16I:
case GPU_R16UI:
case GPU_R32I:
case GPU_R32UI:
return GL_RED_INTEGER;
case GPU_RG8I:
case GPU_RG8UI:
case GPU_RG16I:
case GPU_RG16UI:
case GPU_RG32I:
case GPU_RG32UI:
return GL_RG_INTEGER;
case GPU_RGBA8I:
case GPU_RGBA8UI:
case GPU_RGBA16I:
case GPU_RGBA16UI:
case GPU_RGBA32I:
case GPU_RGBA32UI:
return GL_RGBA_INTEGER;
case GPU_R8:
case GPU_R16:
case GPU_R16F:
case GPU_R32F:
return GL_RED;
case GPU_RG8:
case GPU_RG16:
case GPU_RG16F:
case GPU_RG32F:
return GL_RG;
case GPU_R11F_G11F_B10F:
case GPU_RGB16F:
return GL_RGB;
case GPU_RGBA8:
case GPU_SRGB8_A8:
case GPU_RGBA16:
case GPU_RGBA16F:
case GPU_RGBA32F:
return GL_RGBA;
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return GL_DEPTH_STENCIL;
case GPU_DEPTH_COMPONENT16:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT32F:
return GL_DEPTH_COMPONENT;
default:
BLI_assert(!"Texture format incorrect or unsupported\n");
return 0;
}
}
/* Assume Unorm / Float target. Used with glReadPixels. */
static GLenum channel_len_to_gl(int channel_len)
inline GLenum channel_len_to_gl(int channel_len)
{
switch (channel_len) {
case 1:

View File

@@ -736,11 +736,12 @@ const char *IMB_ffmpeg_last_error(void);
*
* \attention defined in util_gpu.c
*/
struct GPUTexture *IMB_create_gpu_texture(struct ImBuf *ibuf,
struct GPUTexture *IMB_create_gpu_texture(const char *name,
struct ImBuf *ibuf,
bool use_high_bitdepth,
bool use_premult);
struct GPUTexture *IMB_touch_gpu_texture(
struct ImBuf *ibuf, int w, int h, int layers, bool use_high_bitdepth);
const char *name, struct ImBuf *ibuf, int w, int h, int layers, bool use_high_bitdepth);
void IMB_update_gpu_texture_sub(struct GPUTexture *tex,
struct ImBuf *ibuf,
int x,

View File

@@ -159,14 +159,20 @@ static void *imb_gpu_get_data(const ImBuf *ibuf,
/* The ibuf is only here to detect the storage type. The produced texture will have undefined
* content. It will need to be populated by using IMB_update_gpu_texture_sub(). */
GPUTexture *IMB_touch_gpu_texture(ImBuf *ibuf, int w, int h, int layers, bool use_high_bitdepth)
GPUTexture *IMB_touch_gpu_texture(
const char *name, ImBuf *ibuf, int w, int h, int layers, bool use_high_bitdepth)
{
eGPUDataFormat data_format;
eGPUTextureFormat tex_format;
imb_gpu_get_format(ibuf, use_high_bitdepth, &data_format, &tex_format);
GPUTexture *tex = GPU_texture_create_nD(
w, h, layers, 2, NULL, tex_format, data_format, 0, false, NULL);
GPUTexture *tex;
if (layers > 0) {
tex = GPU_texture_create_2d_array(name, w, h, layers, 1, tex_format, NULL);
}
else {
tex = GPU_texture_create_2d(name, w, h, 9999, tex_format, NULL);
}
GPU_texture_anisotropic_filter(tex, true);
return tex;
@@ -205,7 +211,10 @@ void IMB_update_gpu_texture_sub(GPUTexture *tex,
}
}
GPUTexture *IMB_create_gpu_texture(ImBuf *ibuf, bool use_high_bitdepth, bool use_premult)
GPUTexture *IMB_create_gpu_texture(const char *name,
ImBuf *ibuf,
bool use_high_bitdepth,
bool use_premult)
{
GPUTexture *tex = NULL;
const int size[2] = {GPU_texture_size_with_limit(ibuf->x), GPU_texture_size_with_limit(ibuf->y)};
@@ -224,8 +233,12 @@ GPUTexture *IMB_create_gpu_texture(ImBuf *ibuf, bool use_high_bitdepth, bool use
fprintf(stderr, "Unable to load non-power-of-two DXT image resolution,");
}
else {
tex = GPU_texture_create_compressed(
ibuf->x, ibuf->y, ibuf->dds_data.nummipmaps, compressed_format, ibuf->dds_data.data);
tex = GPU_texture_create_compressed_2d(name,
ibuf->x,
ibuf->y,
ibuf->dds_data.nummipmaps,
compressed_format,
ibuf->dds_data.data);
if (tex != NULL) {
return tex;
@@ -248,7 +261,8 @@ GPUTexture *IMB_create_gpu_texture(ImBuf *ibuf, bool use_high_bitdepth, bool use
void *data = imb_gpu_get_data(ibuf, do_rescale, size, compress_as_srgb, use_premult, &freebuf);
/* Create Texture. */
tex = GPU_texture_create_nD(UNPACK2(size), 0, 2, data, tex_format, data_format, 0, false, NULL);
tex = GPU_texture_create_2d(name, UNPACK2(size), 9999, tex_format, NULL);
GPU_texture_update(tex, data_format, data);
GPU_texture_anisotropic_filter(tex, true);

View File

@@ -364,8 +364,7 @@ static void rna_userdef_load_ui_update(Main *UNUSED(bmain), Scene *UNUSED(scene)
static void rna_userdef_anisotropic_update(Main *bmain, Scene *scene, PointerRNA *ptr)
{
GPU_samplers_free();
GPU_samplers_init();
GPU_samplers_update();
rna_userdef_update(bmain, scene, ptr);
}

View File

@@ -2217,13 +2217,11 @@ static void radial_control_set_tex(RadialControl *rc)
rc->use_secondary_tex,
!ELEM(rc->subtype, PROP_NONE, PROP_PIXEL, PROP_DISTANCE)))) {
rc->texture = GPU_texture_create_nD(
ibuf->x, ibuf->y, 0, 2, ibuf->rect_float, GPU_R8, GPU_DATA_FLOAT, 0, false, NULL);
GPU_texture_filter_mode(rc->texture, true);
rc->texture = GPU_texture_create_2d(
"radial_control", ibuf->x, ibuf->y, 1, GPU_R8, ibuf->rect_float);
GPU_texture_bind(rc->texture, 0);
GPU_texture_filter_mode(rc->texture, true);
GPU_texture_swizzle_set(rc->texture, "111r");
GPU_texture_unbind(rc->texture);
MEM_freeN(ibuf->rect_float);
MEM_freeN(ibuf);