This repository has been archived on 2023-10-09. You can view files and clone it, but cannot push or open issues or pull requests.
Files
blender-archive/source/blender/gpu/intern/gpu_draw.c

1679 lines
44 KiB
C
Raw Normal View History

/*
* ***** BEGIN GPL LICENSE BLOCK *****
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
2010-02-12 13:34:04 +00:00
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2005 Blender Foundation.
* All rights reserved.
*
* The Original Code is: all of this file.
*
* Contributor(s): Brecht Van Lommel.
*
* ***** END GPL LICENSE BLOCK *****
*/
2011-02-27 20:25:53 +00:00
/** \file blender/gpu/intern/gpu_draw.c
* \ingroup gpu
*
* Utility functions for dealing with OpenGL texture & material context,
* mipmap generation and light objects.
*
* These are some obscure rendering functions shared between the game engine (not anymore)
* and the blender, in this module to avoid duplication
* and abstract them away from the rest a bit.
2011-02-27 20:25:53 +00:00
*/
#include <string.h>
#include "BLI_blenlib.h"
#include "BLI_hash.h"
#include "BLI_linklist.h"
Changes to Color Management After testing and feedback, I've decided to slightly modify the way color management works internally. While the previous method worked well for rendering, was a smaller transition and had some advantages over this new method, it was a bit more ambiguous, and was making things difficult for other areas such as compositing. This implementation now considers all color data (with only a couple of exceptions such as brush colors) to be stored in linear RGB color space, rather than sRGB as previously. This brings it in line with Nuke, which also operates this way, quite successfully. Color swatches, pickers, color ramp display are now gamma corrected to display gamma so you can see what you're doing, but the numbers themselves are considered linear. This makes understanding blending modes more clear (a 0.5 value on overlay will not change the result now) as well as making color swatches act more predictably in the compositor, however bringing over color values from applications like photoshop or gimp, that operate in a gamma space, will give identical results. This commit will convert over existing files saved by earlier 2.5 versions to work generally the same, though there may be some slight differences with things like textures. Now that we're set on changing other areas of shading, this won't be too disruptive overall. I've made a diagram explaining the pipeline here: http://mke3.net/blender/devel/2.5/25_linear_workflow_pipeline.png and some docs here: http://www.blender.org/development/release-logs/blender-250/color-management/
2009-12-02 07:56:34 +00:00
#include "BLI_math.h"
#include "BLI_threads.h"
#include "BLI_utildefines.h"
Changes to Color Management After testing and feedback, I've decided to slightly modify the way color management works internally. While the previous method worked well for rendering, was a smaller transition and had some advantages over this new method, it was a bit more ambiguous, and was making things difficult for other areas such as compositing. This implementation now considers all color data (with only a couple of exceptions such as brush colors) to be stored in linear RGB color space, rather than sRGB as previously. This brings it in line with Nuke, which also operates this way, quite successfully. Color swatches, pickers, color ramp display are now gamma corrected to display gamma so you can see what you're doing, but the numbers themselves are considered linear. This makes understanding blending modes more clear (a 0.5 value on overlay will not change the result now) as well as making color swatches act more predictably in the compositor, however bringing over color values from applications like photoshop or gimp, that operate in a gamma space, will give identical results. This commit will convert over existing files saved by earlier 2.5 versions to work generally the same, though there may be some slight differences with things like textures. Now that we're set on changing other areas of shading, this won't be too disruptive overall. I've made a diagram explaining the pipeline here: http://mke3.net/blender/devel/2.5/25_linear_workflow_pipeline.png and some docs here: http://www.blender.org/development/release-logs/blender-250/color-management/
2009-12-02 07:56:34 +00:00
#include "DNA_lamp_types.h"
#include "DNA_material_types.h"
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_modifier_types.h"
#include "DNA_node_types.h"
#include "DNA_object_types.h"
#include "DNA_scene_types.h"
#include "DNA_smoke_types.h"
#include "DNA_view3d_types.h"
#include "DNA_particle_types.h"
#include "MEM_guardedalloc.h"
#include "IMB_imbuf.h"
#include "IMB_imbuf_types.h"
#include "BKE_colorband.h"
#include "BKE_global.h"
#include "BKE_image.h"
#include "BKE_main.h"
#include "BKE_material.h"
#include "BKE_node.h"
#include "BKE_scene.h"
#include "GPU_draw.h"
#include "GPU_extensions.h"
#include "GPU_glew.h"
#include "GPU_material.h"
#include "GPU_matrix.h"
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "PIL_time.h"
2016-06-09 05:37:46 +10:00
#ifdef WITH_SMOKE
# include "smoke_API.h"
#endif
//* Checking powers of two for images since OpenGL ES requires it */
2017-03-02 00:36:33 +11:00
#ifdef WITH_DDS
2017-02-23 11:03:56 +01:00
static bool is_power_of_2_resolution(int w, int h)
{
return is_power_of_2_i(w) && is_power_of_2_i(h);
}
2017-03-02 00:36:33 +11:00
#endif
static bool is_over_resolution_limit(GLenum textarget, int w, int h)
{
2016-01-28 17:23:12 +11:00
int size = (textarget == GL_TEXTURE_2D) ?
GPU_max_texture_size() : GPU_max_cube_map_size();
2016-01-10 07:12:10 +11:00
int reslimit = (U.glreslimit != 0) ?
2018-07-18 23:09:31 +10:00
min_ii(U.glreslimit, size) : size;
return (w > reslimit || h > reslimit);
}
static int smaller_power_of_2_limit(int num)
{
2016-01-10 07:12:10 +11:00
int reslimit = (U.glreslimit != 0) ?
2016-01-28 17:23:12 +11:00
min_ii(U.glreslimit, GPU_max_texture_size()) :
GPU_max_texture_size();
/* take texture clamping into account */
if (num > reslimit)
return reslimit;
return power_of_2_min_i(num);
}
/* Current OpenGL state caching for GPU_set_tpage */
static struct GPUTextureState {
/* also controls min/mag filtering */
bool domipmap;
/* only use when 'domipmap' is set */
bool linearmipmap;
/* store this so that new images created while texture painting won't be set to mipmapped */
bool texpaint;
float anisotropic;
int gpu_mipmap;
} GTS = {1, 0, 0, 1.0f, 0};
/* Mipmap settings */
void GPU_set_gpu_mipmapping(Main *bmain, int gpu_mipmap)
2012-06-20 16:43:48 +00:00
{
int old_value = GTS.gpu_mipmap;
/* only actually enable if it's supported */
GTS.gpu_mipmap = gpu_mipmap;
if (old_value != GTS.gpu_mipmap) {
GPU_free_images(bmain);
}
}
void GPU_set_mipmap(Main *bmain, bool mipmap)
{
2015-04-28 01:03:28 +10:00
if (GTS.domipmap != mipmap) {
GPU_free_images(bmain);
2015-04-28 01:03:28 +10:00
GTS.domipmap = mipmap;
}
}
2015-04-28 01:03:28 +10:00
void GPU_set_linear_mipmap(bool linear)
{
2015-04-28 01:03:28 +10:00
if (GTS.linearmipmap != linear) {
GTS.linearmipmap = linear;
}
}
2015-04-28 01:03:28 +10:00
bool GPU_get_mipmap(void)
{
return GTS.domipmap && !GTS.texpaint;
}
2015-04-28 01:03:28 +10:00
bool GPU_get_linear_mipmap(void)
{
return GTS.linearmipmap;
}
2015-04-28 01:03:28 +10:00
static GLenum gpu_get_mipmap_filter(bool mag)
{
/* linearmipmap is off by default *when mipmapping is off,
* use unfiltered display */
if (mag) {
if (GTS.domipmap)
return GL_LINEAR;
else
return GL_NEAREST;
}
else {
if (GTS.domipmap) {
if (GTS.linearmipmap) {
return GL_LINEAR_MIPMAP_LINEAR;
}
else {
return GL_LINEAR_MIPMAP_NEAREST;
}
}
else {
return GL_NEAREST;
}
}
}
/* Anisotropic filtering settings */
void GPU_set_anisotropic(Main *bmain, float value)
{
if (GTS.anisotropic != value) {
GPU_free_images(bmain);
/* Clamp value to the maximum value the graphics card supports */
const float max = GPU_max_texture_anisotropy();
if (value > max)
value = max;
GTS.anisotropic = value;
}
}
float GPU_get_anisotropic(void)
{
return GTS.anisotropic;
}
/* Set OpenGL state for an MTFace */
static GPUTexture **gpu_get_image_gputexture(Image *ima, GLenum textarget)
{
if (textarget == GL_TEXTURE_2D)
return &ima->gputexture[TEXTARGET_TEXTURE_2D];
else if (textarget == GL_TEXTURE_CUBE_MAP)
return &ima->gputexture[TEXTARGET_TEXTURE_CUBE_MAP];
return NULL;
}
typedef struct VerifyThreadData {
ImBuf *ibuf;
float *srgb_frect;
} VerifyThreadData;
2018-07-18 23:09:31 +10:00
static void gpu_verify_high_bit_srgb_buffer_slice(
float *srgb_frect,
ImBuf *ibuf,
const int start_line,
const int height)
{
size_t offset = ibuf->channels * start_line * ibuf->x;
float *current_srgb_frect = srgb_frect + offset;
float *current_rect_float = ibuf->rect_float + offset;
2018-07-18 23:09:31 +10:00
IMB_buffer_float_from_float(
current_srgb_frect,
current_rect_float,
ibuf->channels,
IB_PROFILE_SRGB,
IB_PROFILE_LINEAR_RGB, true,
ibuf->x, height,
ibuf->x, ibuf->x);
IMB_buffer_float_unpremultiply(current_srgb_frect, ibuf->x, height);
}
2018-07-18 23:09:31 +10:00
static void verify_thread_do(
void *data_v,
int start_scanline,
int num_scanlines)
{
VerifyThreadData *data = (VerifyThreadData *)data_v;
2018-07-18 23:09:31 +10:00
gpu_verify_high_bit_srgb_buffer_slice(
data->srgb_frect,
data->ibuf,
start_scanline,
num_scanlines);
}
2018-07-18 23:09:31 +10:00
static void gpu_verify_high_bit_srgb_buffer(
float *srgb_frect,
ImBuf *ibuf)
{
if (ibuf->y < 64) {
2018-07-18 23:09:31 +10:00
gpu_verify_high_bit_srgb_buffer_slice(
srgb_frect,
ibuf,
0, ibuf->y);
}
else {
VerifyThreadData data;
data.ibuf = ibuf;
data.srgb_frect = srgb_frect;
IMB_processor_apply_threaded_scanlines(ibuf->y, verify_thread_do, &data);
}
}
2018-07-18 23:09:31 +10:00
GPUTexture *GPU_texture_from_blender(
Image *ima,
ImageUser *iuser,
int textarget,
bool is_data,
double UNUSED(time))
{
if (ima == NULL) {
return NULL;
}
/* Test if we already have a texture. */
GPUTexture **tex = gpu_get_image_gputexture(ima, textarget);
if (*tex) {
return *tex;
}
/* Check if we have a valid image. If not, we return a dummy
* texture with zero bindcode so we don't keep trying. */
2018-09-12 12:18:35 +10:00
uint bindcode = 0;
if (ima->ok == 0) {
*tex = GPU_texture_from_bindcode(textarget, bindcode);
return *tex;
}
/* currently, tpage refresh is used by ima sequences */
if (ima->tpageflag & IMA_TPAGE_REFRESH) {
GPU_free_image(ima);
ima->tpageflag &= ~IMA_TPAGE_REFRESH;
}
/* check if we have a valid image buffer */
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, NULL);
if (ibuf == NULL) {
*tex = GPU_texture_from_bindcode(textarget, bindcode);
return *tex;
}
/* flag to determine whether deep format is used */
bool use_high_bit_depth = false, do_color_management = false;
if (ibuf->rect_float) {
if (U.use_16bit_textures) {
/* use high precision textures. This is relatively harmless because OpenGL gives us
2012-03-09 18:28:30 +00:00
* a high precision format only if it is available */
use_high_bit_depth = true;
}
else if (ibuf->rect == NULL) {
IMB_rect_from_float(ibuf);
}
/* we may skip this in high precision, but if not, we need to have a valid buffer here */
else if (ibuf->userflags & IB_RECT_INVALID) {
IMB_rect_from_float(ibuf);
}
/* TODO unneeded when float images are correctly treated as linear always */
if (!is_data) {
do_color_management = true;
}
}
const int rectw = ibuf->x;
const int recth = ibuf->y;
2018-09-12 12:18:35 +10:00
uint *rect = ibuf->rect;
float *frect = NULL;
float *srgb_frect = NULL;
if (use_high_bit_depth) {
if (do_color_management) {
frect = srgb_frect = MEM_mallocN(ibuf->x * ibuf->y * sizeof(*srgb_frect) * 4, "floar_buf_col_cor");
gpu_verify_high_bit_srgb_buffer(srgb_frect, ibuf);
}
else {
frect = ibuf->rect_float;
}
}
const bool mipmap = GPU_get_mipmap();
#ifdef WITH_DDS
2018-07-18 23:09:31 +10:00
if (ibuf->ftype == IMB_FTYPE_DDS) {
GPU_create_gl_tex_compressed(&bindcode, rect, rectw, recth, textarget, mipmap, ima, ibuf);
2018-07-18 23:09:31 +10:00
}
else
#endif
2018-07-18 23:09:31 +10:00
{
GPU_create_gl_tex(&bindcode, rect, frect, rectw, recth, textarget, mipmap, use_high_bit_depth, ima);
2018-07-18 23:09:31 +10:00
}
/* mark as non-color data texture */
if (bindcode) {
if (is_data)
ima->tpageflag |= IMA_GLBIND_IS_DATA;
else
ima->tpageflag &= ~IMA_GLBIND_IS_DATA;
}
/* clean up */
if (srgb_frect)
MEM_freeN(srgb_frect);
BKE_image_release_ibuf(ima, ibuf, NULL);
*tex = GPU_texture_from_bindcode(textarget, bindcode);
return *tex;
}
2018-09-12 12:18:35 +10:00
static void **gpu_gen_cube_map(uint *rect, float *frect, int rectw, int recth, bool use_high_bit_depth)
{
2018-09-12 12:18:35 +10:00
size_t block_size = use_high_bit_depth ? sizeof(float[4]) : sizeof(uchar[4]);
void **sides = NULL;
int h = recth / 2;
int w = rectw / 3;
if ((use_high_bit_depth && frect == NULL) || (!use_high_bit_depth && rect == NULL) || w != h)
return sides;
/* PosX, NegX, PosY, NegY, PosZ, NegZ */
2016-01-28 17:23:12 +11:00
sides = MEM_mallocN(sizeof(void *) * 6, "");
for (int i = 0; i < 6; i++)
sides[i] = MEM_mallocN(block_size * w * h, "");
/* divide image into six parts */
/* ______________________
* | | | |
* | NegX | NegY | PosX |
* |______|______|______|
* | | | |
* | NegZ | PosZ | PosY |
* |______|______|______|
2016-07-15 02:36:21 +10:00
*/
if (use_high_bit_depth) {
float (*frectb)[4] = (float(*)[4])frect;
float (**fsides)[4] = (float(**)[4])sides;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
memcpy(&fsides[0][x * h + y], &frectb[(recth - y - 1) * rectw + 2 * w + x], block_size);
memcpy(&fsides[1][x * h + y], &frectb[(y + h) * rectw + w - 1 - x], block_size);
memcpy(&fsides[3][y * w + x], &frectb[(recth - y - 1) * rectw + 2 * w - 1 - x], block_size);
memcpy(&fsides[5][y * w + x], &frectb[(h - y - 1) * rectw + w - 1 - x], block_size);
}
memcpy(&fsides[2][y * w], frectb[y * rectw + 2 * w], block_size * w);
memcpy(&fsides[4][y * w], frectb[y * rectw + w], block_size * w);
}
}
else {
2018-09-12 12:18:35 +10:00
uint **isides = (uint **)sides;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
isides[0][x * h + y] = rect[(recth - y - 1) * rectw + 2 * w + x];
isides[1][x * h + y] = rect[(y + h) * rectw + w - 1 - x];
isides[3][y * w + x] = rect[(recth - y - 1) * rectw + 2 * w - 1 - x];
isides[5][y * w + x] = rect[(h - y - 1) * rectw + w - 1 - x];
}
memcpy(&isides[2][y * w], &rect[y * rectw + 2 * w], block_size * w);
memcpy(&isides[4][y * w], &rect[y * rectw + w], block_size * w);
}
}
return sides;
}
static void gpu_del_cube_map(void **cube_map)
{
int i;
if (cube_map == NULL)
return;
for (i = 0; i < 6; i++)
MEM_freeN(cube_map[i]);
MEM_freeN(cube_map);
}
/* Image *ima can be NULL */
2016-06-09 05:44:25 +10:00
void GPU_create_gl_tex(
2018-09-12 12:18:35 +10:00
uint *bind, uint *rect, float *frect, int rectw, int recth,
2016-06-09 05:44:25 +10:00
int textarget, bool mipmap, bool use_high_bit_depth, Image *ima)
{
ImBuf *ibuf = NULL;
2012-06-30 16:56:23 +00:00
int tpx = rectw;
int tpy = recth;
/* create image */
glGenTextures(1, (GLuint *)bind);
glBindTexture(textarget, *bind);
if (textarget == GL_TEXTURE_2D) {
if (use_high_bit_depth) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA16F, rectw, recth, 0, GL_RGBA, GL_FLOAT, frect);
}
else {
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, rectw, recth, 0, GL_RGBA, GL_UNSIGNED_BYTE, rect);
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
if (GPU_get_mipmap() && mipmap) {
if (GTS.gpu_mipmap) {
glGenerateMipmap(GL_TEXTURE_2D);
}
else {
int i;
if (!ibuf) {
if (use_high_bit_depth) {
ibuf = IMB_allocFromBuffer(NULL, frect, tpx, tpy);
}
else {
ibuf = IMB_allocFromBuffer(rect, NULL, tpx, tpy);
}
}
IMB_makemipmap(ibuf, true);
for (i = 1; i < ibuf->miptot; i++) {
ImBuf *mip = ibuf->mipmap[i - 1];
if (use_high_bit_depth) {
glTexImage2D(GL_TEXTURE_2D, i, GL_RGBA16F, mip->x, mip->y, 0, GL_RGBA, GL_FLOAT, mip->rect_float);
}
else {
glTexImage2D(GL_TEXTURE_2D, i, GL_RGBA8, mip->x, mip->y, 0, GL_RGBA, GL_UNSIGNED_BYTE, mip->rect);
}
}
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, gpu_get_mipmap_filter(0));
if (ima)
ima->tpageflag |= IMA_MIPMAP_COMPLETE;
}
else {
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
}
else if (textarget == GL_TEXTURE_CUBE_MAP) {
int w = rectw / 3, h = recth / 2;
if (h == w && is_power_of_2_i(h) && !is_over_resolution_limit(textarget, h, w)) {
void **cube_map = gpu_gen_cube_map(rect, frect, rectw, recth, use_high_bit_depth);
GLenum informat = use_high_bit_depth ? GL_RGBA16F : GL_RGBA8;
GLenum type = use_high_bit_depth ? GL_FLOAT : GL_UNSIGNED_BYTE;
if (cube_map)
for (int i = 0; i < 6; i++)
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, informat, w, h, 0, GL_RGBA, type, cube_map[i]);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
if (GPU_get_mipmap() && mipmap) {
if (GTS.gpu_mipmap) {
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
}
else {
if (!ibuf) {
if (use_high_bit_depth) {
ibuf = IMB_allocFromBuffer(NULL, frect, tpx, tpy);
}
else {
ibuf = IMB_allocFromBuffer(rect, NULL, tpx, tpy);
}
}
IMB_makemipmap(ibuf, true);
for (int i = 1; i < ibuf->miptot; i++) {
ImBuf *mip = ibuf->mipmap[i - 1];
2016-01-28 17:23:12 +11:00
void **mip_cube_map = gpu_gen_cube_map(
mip->rect, mip->rect_float,
mip->x, mip->y, use_high_bit_depth);
int mipw = mip->x / 3, miph = mip->y / 2;
if (mip_cube_map) {
for (int j = 0; j < 6; j++) {
2018-07-18 23:09:31 +10:00
glTexImage2D(
GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, i,
informat, mipw, miph, 0, GL_RGBA, type, mip_cube_map[j]);
}
}
gpu_del_cube_map(mip_cube_map);
}
}
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, gpu_get_mipmap_filter(0));
if (ima)
ima->tpageflag |= IMA_MIPMAP_COMPLETE;
}
else {
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
}
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
gpu_del_cube_map(cube_map);
}
else {
printf("Incorrect envmap size\n");
}
}
if (GLEW_EXT_texture_filter_anisotropic)
glTexParameterf(textarget, GL_TEXTURE_MAX_ANISOTROPY_EXT, GPU_get_anisotropic());
glBindTexture(textarget, 0);
if (ibuf)
IMB_freeImBuf(ibuf);
}
/**
* GPU_upload_dxt_texture() assumes that the texture is already bound and ready to go.
* This is so the viewport and the BGE can share some code.
* Returns false if the provided ImBuf doesn't have a supported DXT compression format
*/
bool GPU_upload_dxt_texture(ImBuf *ibuf)
{
#ifdef WITH_DDS
GLint format = 0;
int blocksize, height, width, i, size, offset = 0;
width = ibuf->x;
height = ibuf->y;
if (GLEW_EXT_texture_compression_s3tc) {
if (ibuf->dds_data.fourcc == FOURCC_DXT1)
format = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
else if (ibuf->dds_data.fourcc == FOURCC_DXT3)
format = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
else if (ibuf->dds_data.fourcc == FOURCC_DXT5)
format = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
}
if (format == 0) {
fprintf(stderr, "Unable to find a suitable DXT compression, falling back to uncompressed\n");
return false;
}
if (!is_power_of_2_resolution(width, height)) {
fprintf(stderr, "Unable to load non-power-of-two DXT image resolution, falling back to uncompressed\n");
return false;
}
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, gpu_get_mipmap_filter(0));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
if (GLEW_EXT_texture_filter_anisotropic)
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAX_ANISOTROPY_EXT, GPU_get_anisotropic());
blocksize = (ibuf->dds_data.fourcc == FOURCC_DXT1) ? 8 : 16;
2016-01-10 07:12:10 +11:00
for (i = 0; i < ibuf->dds_data.nummipmaps && (width || height); ++i) {
if (width == 0)
width = 1;
if (height == 0)
height = 1;
2016-01-10 07:12:10 +11:00
size = ((width + 3) / 4) * ((height + 3) / 4) * blocksize;
2018-07-18 23:09:31 +10:00
glCompressedTexImage2D(
GL_TEXTURE_2D, i, format, width, height,
0, size, ibuf->dds_data.data + offset);
offset += size;
width >>= 1;
height >>= 1;
}
/* set number of mipmap levels we have, needed in case they don't go down to 1x1 */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, i - 1);
return true;
#else
2012-06-30 16:56:23 +00:00
(void)ibuf;
return false;
#endif
}
2016-01-10 07:12:10 +11:00
void GPU_create_gl_tex_compressed(
2018-09-12 12:18:35 +10:00
uint *bind, uint *pix, int x, int y,
int textarget, int mipmap, Image *ima, ImBuf *ibuf)
{
#ifndef WITH_DDS
2012-06-30 16:56:23 +00:00
(void)ibuf;
/* Fall back to uncompressed if DDS isn't enabled */
GPU_create_gl_tex(bind, pix, NULL, x, y, textarget, mipmap, 0, ima);
#else
glGenTextures(1, (GLuint *)bind);
glBindTexture(textarget, *bind);
if (textarget == GL_TEXTURE_2D && GPU_upload_dxt_texture(ibuf) == 0) {
2012-12-28 14:19:05 +00:00
glDeleteTextures(1, (GLuint *)bind);
GPU_create_gl_tex(bind, pix, NULL, x, y, textarget, mipmap, 0, ima);
}
glBindTexture(textarget, 0);
#endif
}
/* these two functions are called on entering and exiting texture paint mode,
2012-03-09 18:28:30 +00:00
* temporary disabling/enabling mipmapping on all images for quick texture
* updates with glTexSubImage2D. images that didn't change don't have to be
* re-uploaded to OpenGL */
void GPU_paint_set_mipmap(Main *bmain, bool mipmap)
{
if (!GTS.domipmap)
return;
GTS.texpaint = !mipmap;
if (mipmap) {
for (Image *ima = bmain->image.first; ima; ima = ima->id.next) {
if (BKE_image_has_opengl_texture(ima)) {
if (ima->tpageflag & IMA_MIPMAP_COMPLETE) {
if (ima->gputexture[TEXTARGET_TEXTURE_2D]) {
GPU_texture_bind(ima->gputexture[TEXTARGET_TEXTURE_2D], 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, gpu_get_mipmap_filter(0));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
GPU_texture_unbind(ima->gputexture[TEXTARGET_TEXTURE_2D]);
}
}
else
GPU_free_image(ima);
}
else
ima->tpageflag &= ~IMA_MIPMAP_COMPLETE;
}
}
else {
for (Image *ima = bmain->image.first; ima; ima = ima->id.next) {
if (BKE_image_has_opengl_texture(ima)) {
if (ima->gputexture[TEXTARGET_TEXTURE_2D]) {
GPU_texture_bind(ima->gputexture[TEXTARGET_TEXTURE_2D], 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, gpu_get_mipmap_filter(1));
GPU_texture_unbind(ima->gputexture[TEXTARGET_TEXTURE_2D]);
}
}
else
ima->tpageflag &= ~IMA_MIPMAP_COMPLETE;
}
}
}
/* check if image has been downscaled and do scaled partial update */
static bool gpu_check_scaled_image(ImBuf *ibuf, Image *ima, float *frect, int x, int y, int w, int h)
{
if (is_over_resolution_limit(GL_TEXTURE_2D, ibuf->x, ibuf->y)) {
int x_limit = smaller_power_of_2_limit(ibuf->x);
int y_limit = smaller_power_of_2_limit(ibuf->y);
float xratio = x_limit / (float)ibuf->x;
float yratio = y_limit / (float)ibuf->y;
/* find new width, height and x,y gpu texture coordinates */
/* take ceiling because we will be losing 1 pixel due to rounding errors in x,y... */
int rectw = (int)ceil(xratio * w);
int recth = (int)ceil(yratio * h);
x *= xratio;
y *= yratio;
/* ...but take back if we are over the limit! */
if (rectw + x > x_limit) rectw--;
if (recth + y > y_limit) recth--;
GPU_texture_bind(ima->gputexture[TEXTARGET_TEXTURE_2D], 0);
/* float rectangles are already continuous in memory so we can use IMB_scaleImBuf */
if (frect) {
ImBuf *ibuf_scale = IMB_allocFromBuffer(NULL, frect, w, h);
IMB_scaleImBuf(ibuf_scale, rectw, recth);
2018-07-18 23:09:31 +10:00
glTexSubImage2D(
GL_TEXTURE_2D, 0, x, y, rectw, recth, GL_RGBA,
GL_FLOAT, ibuf_scale->rect_float);
IMB_freeImBuf(ibuf_scale);
}
/* byte images are not continuous in memory so do manual interpolation */
else {
2018-09-12 12:18:35 +10:00
uchar *scalerect = MEM_mallocN(rectw * recth * sizeof(*scalerect) * 4, "scalerect");
uint *p = (uint *)scalerect;
int i, j;
float inv_xratio = 1.0f / xratio;
float inv_yratio = 1.0f / yratio;
for (i = 0; i < rectw; i++) {
float u = (x + i) * inv_xratio;
for (j = 0; j < recth; j++) {
float v = (y + j) * inv_yratio;
2018-09-12 12:18:35 +10:00
bilinear_interpolation_color_wrap(ibuf, (uchar *)(p + i + j * (rectw)), NULL, u, v);
}
}
2018-07-18 23:09:31 +10:00
glTexSubImage2D(
GL_TEXTURE_2D, 0, x, y, rectw, recth, GL_RGBA,
GL_UNSIGNED_BYTE, scalerect);
MEM_freeN(scalerect);
}
if (GPU_get_mipmap()) {
glGenerateMipmap(GL_TEXTURE_2D);
}
else {
ima->tpageflag &= ~IMA_MIPMAP_COMPLETE;
}
GPU_texture_unbind(ima->gputexture[TEXTARGET_TEXTURE_2D]);
return true;
}
return false;
}
Multi-View and Stereo 3D Official Documentation: http://www.blender.org/manual/render/workflows/multiview.html Implemented Features ==================== Builtin Stereo Camera * Convergence Mode * Interocular Distance * Convergence Distance * Pivot Mode Viewport * Cameras * Plane * Volume Compositor * View Switch Node * Image Node Multi-View OpenEXR support Sequencer * Image/Movie Strips 'Use Multiview' UV/Image Editor * Option to see Multi-View images in Stereo-3D or its individual images * Save/Open Multi-View (OpenEXR, Stereo3D, individual views) images I/O * Save/Open Multi-View (OpenEXR, Stereo3D, individual views) images Scene Render Views * Ability to have an arbitrary number of views in the scene Missing Bits ============ First rule of Multi-View bug report: If something is not working as it should *when Views is off* this is a severe bug, do mention this in the report. Second rule is, if something works *when Views is off* but doesn't (or crashes) when *Views is on*, this is a important bug. Do mention this in the report. Everything else is likely small todos, and may wait until we are sure none of the above is happening. Apart from that there are those known issues: * Compositor Image Node poorly working for Multi-View OpenEXR (this was working prefectly before the 'Use Multi-View' functionality) * Selecting camera from Multi-View when looking from camera is problematic * Animation Playback (ctrl+F11) doesn't support stereo formats * Wrong filepath when trying to play back animated scene * Viewport Rendering doesn't support Multi-View * Overscan Rendering * Fullscreen display modes need to warn the user * Object copy should be aware of views suffix Acknowledgments =============== * Francesco Siddi for the help with the original feature specs and design * Brecht Van Lommel for the original review of the code and design early on * Blender Foundation for the Development Fund to support the project wrap up Final patch reviewers: * Antony Riakiotakis (psy-fi) * Campbell Barton (ideasman42) * Julian Eisel (Severin) * Sergey Sharybin (nazgul) * Thomas Dinged (dingto) Code contributors of the original branch in github: * Alexey Akishin * Gabriel Caraballo
2015-04-06 10:40:12 -03:00
void GPU_paint_update_image(Image *ima, ImageUser *iuser, int x, int y, int w, int h)
{
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, NULL);
if ((!GTS.gpu_mipmap && GPU_get_mipmap()) ||
(ima->gputexture[TEXTARGET_TEXTURE_2D] == NULL) ||
(ibuf == NULL) ||
2016-01-10 07:12:10 +11:00
(w == 0) || (h == 0))
{
/* these cases require full reload still */
GPU_free_image(ima);
}
else {
/* for the special case, we can do a partial update
* which is much quicker for painting */
GLint row_length, skip_pixels, skip_rows;
/* if color correction is needed, we must update the part that needs updating. */
Color Management, Stage 2: Switch color pipeline to use OpenColorIO Replace old color pipeline which was supporting linear/sRGB color spaces only with OpenColorIO-based pipeline. This introduces two configurable color spaces: - Input color space for images and movie clips. This space is used to convert images/movies from color space in which file is saved to Blender's linear space (for float images, byte images are not internally converted, only input space is stored for such images and used later). This setting could be found in image/clip data block settings. - Display color space which defines space in which particular display is working. This settings could be found in scene's Color Management panel. When render result is being displayed on the screen, apart from converting image to display space, some additional conversions could happen. This conversions are: - View, which defines tone curve applying before display transformation. These are different ways to view the image on the same display device. For example it could be used to emulate film view on sRGB display. - Exposure affects on image exposure before tone map is applied. - Gamma is post-display gamma correction, could be used to match particular display gamma. - RGB curves are user-defined curves which are applying before display transformation, could be used for different purposes. All this settings by default are only applying on render result and does not affect on other images. If some particular image needs to be affected by this transformation, "View as Render" setting of image data block should be set to truth. Movie clips are always affected by all display transformations. This commit also introduces configurable color space in which sequencer is working. This setting could be found in scene's Color Management panel and it should be used if such stuff as grading needs to be done in color space different from sRGB (i.e. when Film view on sRGB display is use, using VD16 space as sequencer's internal space would make grading working in space which is close to the space using for display). Some technical notes: - Image buffer's float buffer is now always in linear space, even if it was created from 16bit byte images. - Space of byte buffer is stored in image buffer's rect_colorspace property. - Profile of image buffer was removed since it's not longer meaningful. - OpenGL and GLSL is supposed to always work in sRGB space. It is possible to support other spaces, but it's quite large project which isn't so much important. - Legacy Color Management option disabled is emulated by using None display. It could have some regressions, but there's no clear way to avoid them. - If OpenColorIO is disabled on build time, it should make blender behaving in the same way as previous release with color management enabled. More details could be found at this page (more details would be added soon): http://wiki.blender.org/index.php/Dev:Ref/Release_Notes/2.64/Color_Management -- Thanks to Xavier Thomas, Lukas Toene for initial work on OpenColorIO integration and to Brecht van Lommel for some further development and code/ usecase review!
2012-09-15 10:05:07 +00:00
if (ibuf->rect_float) {
float *buffer = MEM_mallocN(w * h * sizeof(float) * 4, "temp_texpaint_float_buf");
2014-04-11 11:25:41 +10:00
bool is_data = (ima->tpageflag & IMA_GLBIND_IS_DATA) != 0;
IMB_partial_rect_from_float(ibuf, buffer, x, y, w, h, is_data);
if (gpu_check_scaled_image(ibuf, ima, buffer, x, y, w, h)) {
MEM_freeN(buffer);
BKE_image_release_ibuf(ima, ibuf, NULL);
return;
}
GPU_texture_bind(ima->gputexture[TEXTARGET_TEXTURE_2D], 0);
glTexSubImage2D(GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA, GL_FLOAT, buffer);
MEM_freeN(buffer);
/* we have already accounted for the case where GTS.gpu_mipmap is false
* so we will be using GPU mipmap generation here */
if (GPU_get_mipmap()) {
glGenerateMipmap(GL_TEXTURE_2D);
2012-09-20 12:29:28 +00:00
}
else {
ima->tpageflag &= ~IMA_MIPMAP_COMPLETE;
}
GPU_texture_unbind(ima->gputexture[TEXTARGET_TEXTURE_2D]);
BKE_image_release_ibuf(ima, ibuf, NULL);
return;
}
if (gpu_check_scaled_image(ibuf, ima, NULL, x, y, w, h)) {
BKE_image_release_ibuf(ima, ibuf, NULL);
return;
}
GPU_texture_bind(ima->gputexture[TEXTARGET_TEXTURE_2D], 0);
glGetIntegerv(GL_UNPACK_ROW_LENGTH, &row_length);
glGetIntegerv(GL_UNPACK_SKIP_PIXELS, &skip_pixels);
glGetIntegerv(GL_UNPACK_SKIP_ROWS, &skip_rows);
glPixelStorei(GL_UNPACK_ROW_LENGTH, ibuf->x);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, x);
glPixelStorei(GL_UNPACK_SKIP_ROWS, y);
2018-07-18 23:09:31 +10:00
glTexSubImage2D(
GL_TEXTURE_2D, 0, x, y, w, h, GL_RGBA,
GL_UNSIGNED_BYTE, ibuf->rect);
glPixelStorei(GL_UNPACK_ROW_LENGTH, row_length);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, skip_pixels);
glPixelStorei(GL_UNPACK_SKIP_ROWS, skip_rows);
/* see comment above as to why we are using gpu mipmap generation here */
if (GPU_get_mipmap()) {
glGenerateMipmap(GL_TEXTURE_2D);
2012-09-20 12:29:28 +00:00
}
else {
ima->tpageflag &= ~IMA_MIPMAP_COMPLETE;
}
GPU_texture_unbind(ima->gputexture[TEXTARGET_TEXTURE_2D]);
}
BKE_image_release_ibuf(ima, ibuf, NULL);
}
/* *************************** Transfer functions *************************** */
enum {
TFUNC_FLAME_SPECTRUM = 0,
TFUNC_COLOR_RAMP = 1,
};
#define TFUNC_WIDTH 256
2018-10-09 10:40:09 +11:00
#ifdef WITH_SMOKE
static void create_flame_spectrum_texture(float *data)
{
#define FIRE_THRESH 7
#define MAX_FIRE_ALPHA 0.06f
#define FULL_ON_FIRE 100
float *spec_pixels = MEM_mallocN(TFUNC_WIDTH * 4 * 16 * 16 * sizeof(float), "spec_pixels");
blackbody_temperature_to_rgb_table(data, TFUNC_WIDTH, 1500, 3000);
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++) {
for (int k = 0; k < TFUNC_WIDTH; k++) {
int index = (j * TFUNC_WIDTH * 16 + i * TFUNC_WIDTH + k) * 4;
if (k >= FIRE_THRESH) {
spec_pixels[index] = (data[k * 4]);
spec_pixels[index + 1] = (data[k * 4 + 1]);
spec_pixels[index + 2] = (data[k * 4 + 2]);
spec_pixels[index + 3] = MAX_FIRE_ALPHA * (
(k > FULL_ON_FIRE) ? 1.0f : (k - FIRE_THRESH) / ((float)FULL_ON_FIRE - FIRE_THRESH));
}
else {
zero_v4(&spec_pixels[index]);
}
}
}
}
memcpy(data, spec_pixels, sizeof(float) * 4 * TFUNC_WIDTH);
MEM_freeN(spec_pixels);
#undef FIRE_THRESH
#undef MAX_FIRE_ALPHA
#undef FULL_ON_FIRE
}
static void create_color_ramp(const ColorBand *coba, float *data)
{
for (int i = 0; i < TFUNC_WIDTH; i++) {
BKE_colorband_evaluate(coba, (float)i / TFUNC_WIDTH, &data[i * 4]);
}
}
static GPUTexture *create_transfer_function(int type, const ColorBand *coba)
{
float *data = MEM_mallocN(sizeof(float) * 4 * TFUNC_WIDTH, __func__);
switch (type) {
case TFUNC_FLAME_SPECTRUM:
create_flame_spectrum_texture(data);
break;
case TFUNC_COLOR_RAMP:
create_color_ramp(coba, data);
break;
}
GPUTexture *tex = GPU_texture_create_1D(TFUNC_WIDTH, GPU_RGBA8, data, NULL);
MEM_freeN(data);
return tex;
}
2018-10-09 11:13:00 +02:00
static void swizzle_texture_channel_rrrr(GPUTexture *tex)
{
GPU_texture_bind(tex, 0);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_SWIZZLE_R, GL_RED);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_SWIZZLE_G, GL_RED);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_SWIZZLE_B, GL_RED);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_SWIZZLE_A, GL_RED);
GPU_texture_unbind(tex);
}
static GPUTexture *create_field_texture(SmokeDomainSettings *sds)
{
float *field = NULL;
switch (sds->coba_field) {
case FLUID_FIELD_DENSITY: field = smoke_get_density(sds->fluid); break;
case FLUID_FIELD_HEAT: field = smoke_get_heat(sds->fluid); break;
case FLUID_FIELD_FUEL: field = smoke_get_fuel(sds->fluid); break;
case FLUID_FIELD_REACT: field = smoke_get_react(sds->fluid); break;
case FLUID_FIELD_FLAME: field = smoke_get_flame(sds->fluid); break;
case FLUID_FIELD_VELOCITY_X: field = smoke_get_velocity_x(sds->fluid); break;
case FLUID_FIELD_VELOCITY_Y: field = smoke_get_velocity_y(sds->fluid); break;
case FLUID_FIELD_VELOCITY_Z: field = smoke_get_velocity_z(sds->fluid); break;
case FLUID_FIELD_COLOR_R: field = smoke_get_color_r(sds->fluid); break;
case FLUID_FIELD_COLOR_G: field = smoke_get_color_g(sds->fluid); break;
case FLUID_FIELD_COLOR_B: field = smoke_get_color_b(sds->fluid); break;
case FLUID_FIELD_FORCE_X: field = smoke_get_force_x(sds->fluid); break;
case FLUID_FIELD_FORCE_Y: field = smoke_get_force_y(sds->fluid); break;
case FLUID_FIELD_FORCE_Z: field = smoke_get_force_z(sds->fluid); break;
default: return NULL;
}
2018-10-09 11:13:00 +02:00
GPUTexture *tex = GPU_texture_create_nD(
sds->res[0], sds->res[1], sds->res[2], 3,
field, GPU_R8, GPU_DATA_FLOAT, 0, true, NULL);
swizzle_texture_channel_rrrr(tex);
return tex;
}
static GPUTexture *create_density_texture(SmokeDomainSettings *sds, int highres)
{
float *data = NULL, *source;
int cell_count = (highres) ? smoke_turbulence_get_cells(sds->wt) : sds->total_cells;
const bool has_color = (highres) ? smoke_turbulence_has_colors(sds->wt) : smoke_has_colors(sds->fluid);
int *dim = (highres) ? sds->res_wt : sds->res;
GPUTextureFormat format = (has_color) ? GPU_RGBA8 : GPU_R8;
if (has_color) {
data = MEM_callocN(sizeof(float) * cell_count * 4, "smokeColorTexture");
}
if (highres) {
if (has_color) {
smoke_turbulence_get_rgba(sds->wt, data, 0);
}
else {
source = smoke_turbulence_get_density(sds->wt);
}
}
else {
if (has_color) {
smoke_get_rgba(sds->fluid, data, 0);
}
else {
source = smoke_get_density(sds->fluid);
}
}
GPUTexture *tex = GPU_texture_create_nD(
dim[0], dim[1], dim[2], 3,
2018-10-18 12:19:06 +02:00
(has_color) ? data : source,
2018-10-09 11:13:00 +02:00
format, GPU_DATA_FLOAT, 0, true, NULL);
if (data) {
MEM_freeN(data);
}
if (format == GPU_R8) {
/* Swizzle the RGBA components to read the Red channel so
* that the shader stay the same for colored and non color
* density textures. */
swizzle_texture_channel_rrrr(tex);
}
return tex;
}
static GPUTexture *create_flame_texture(SmokeDomainSettings *sds, int highres)
{
float *source = NULL;
const bool has_fuel = (highres) ? smoke_turbulence_has_fuel(sds->wt) : smoke_has_fuel(sds->fluid);
int *dim = (highres) ? sds->res_wt : sds->res;
if (!has_fuel)
return NULL;
if (highres) {
source = smoke_turbulence_get_flame(sds->wt);
}
else {
source = smoke_get_flame(sds->fluid);
}
GPUTexture *tex = GPU_texture_create_nD(
dim[0], dim[1], dim[2], 3,
source, GPU_R8, GPU_DATA_FLOAT, 0, true, NULL);
swizzle_texture_channel_rrrr(tex);
return tex;
}
2018-10-09 10:40:09 +11:00
#endif /* WITH_SMOKE */
void GPU_free_smoke(SmokeModifierData *smd)
{
if (smd->type & MOD_SMOKE_TYPE_DOMAIN && smd->domain) {
if (smd->domain->tex)
GPU_texture_free(smd->domain->tex);
smd->domain->tex = NULL;
if (smd->domain->tex_shadow)
GPU_texture_free(smd->domain->tex_shadow);
smd->domain->tex_shadow = NULL;
if (smd->domain->tex_flame)
GPU_texture_free(smd->domain->tex_flame);
smd->domain->tex_flame = NULL;
if (smd->domain->tex_flame_coba)
GPU_texture_free(smd->domain->tex_flame_coba);
smd->domain->tex_flame_coba = NULL;
if (smd->domain->tex_coba)
GPU_texture_free(smd->domain->tex_coba);
smd->domain->tex_coba = NULL;
if (smd->domain->tex_field)
GPU_texture_free(smd->domain->tex_field);
smd->domain->tex_field = NULL;
}
}
void GPU_create_smoke_coba_field(SmokeModifierData *smd)
{
#ifdef WITH_SMOKE
if (smd->type & MOD_SMOKE_TYPE_DOMAIN) {
SmokeDomainSettings *sds = smd->domain;
if (!sds->tex_field) {
sds->tex_field = create_field_texture(sds);
}
if (!sds->tex_coba) {
sds->tex_coba = create_transfer_function(TFUNC_COLOR_RAMP, sds->coba);
}
}
#else // WITH_SMOKE
smd->domain->tex_field = NULL;
#endif // WITH_SMOKE
}
void GPU_create_smoke(SmokeModifierData *smd, int highres)
{
#ifdef WITH_SMOKE
if (smd->type & MOD_SMOKE_TYPE_DOMAIN) {
SmokeDomainSettings *sds = smd->domain;
2018-10-09 11:13:00 +02:00
if (!sds->tex) {
sds->tex = create_density_texture(sds, highres);
}
2018-10-09 11:13:00 +02:00
if (!sds->tex_flame) {
sds->tex_flame = create_flame_texture(sds, highres);
}
2018-10-09 11:13:00 +02:00
if (!sds->tex_flame_coba && sds->tex_flame) {
sds->tex_flame_coba = create_transfer_function(TFUNC_FLAME_SPECTRUM, NULL);
}
2018-10-09 11:13:00 +02:00
if (!sds->tex_shadow) {
sds->tex_shadow = GPU_texture_create_nD(
sds->res[0], sds->res[1], sds->res[2], 3,
sds->shadow,
GPU_R8, GPU_DATA_FLOAT, 0, true, NULL);
}
}
#else // WITH_SMOKE
(void)highres;
smd->domain->tex = NULL;
smd->domain->tex_flame = NULL;
smd->domain->tex_flame_coba = NULL;
smd->domain->tex_shadow = NULL;
#endif // WITH_SMOKE
}
void GPU_create_smoke_velocity(SmokeModifierData *smd)
{
#ifdef WITH_SMOKE
if (smd->type & MOD_SMOKE_TYPE_DOMAIN) {
SmokeDomainSettings *sds = smd->domain;
const float *vel_x = smoke_get_velocity_x(sds->fluid);
const float *vel_y = smoke_get_velocity_y(sds->fluid);
const float *vel_z = smoke_get_velocity_z(sds->fluid);
if (ELEM(NULL, vel_x, vel_y, vel_z)) {
return;
}
if (!sds->tex_velocity_x) {
sds->tex_velocity_x = GPU_texture_create_3D(sds->res[0], sds->res[1], sds->res[2], GPU_R16F, vel_x, NULL);
sds->tex_velocity_y = GPU_texture_create_3D(sds->res[0], sds->res[1], sds->res[2], GPU_R16F, vel_y, NULL);
sds->tex_velocity_z = GPU_texture_create_3D(sds->res[0], sds->res[1], sds->res[2], GPU_R16F, vel_z, NULL);
}
}
#else // WITH_SMOKE
smd->domain->tex_velocity_x = NULL;
smd->domain->tex_velocity_y = NULL;
smd->domain->tex_velocity_z = NULL;
#endif // WITH_SMOKE
}
/* TODO Unify with the other GPU_free_smoke. */
void GPU_free_smoke_velocity(SmokeModifierData *smd)
{
if (smd->type & MOD_SMOKE_TYPE_DOMAIN && smd->domain) {
if (smd->domain->tex_velocity_x)
GPU_texture_free(smd->domain->tex_velocity_x);
if (smd->domain->tex_velocity_y)
GPU_texture_free(smd->domain->tex_velocity_y);
if (smd->domain->tex_velocity_z)
GPU_texture_free(smd->domain->tex_velocity_z);
smd->domain->tex_velocity_x = NULL;
smd->domain->tex_velocity_y = NULL;
smd->domain->tex_velocity_z = NULL;
}
}
static LinkNode *image_free_queue = NULL;
static void gpu_queue_image_for_free(Image *ima)
{
BLI_thread_lock(LOCK_OPENGL);
BLI_linklist_prepend(&image_free_queue, ima);
BLI_thread_unlock(LOCK_OPENGL);
}
void GPU_free_unused_buffers(Main *bmain)
{
if (!BLI_thread_is_main())
return;
BLI_thread_lock(LOCK_OPENGL);
/* images */
for (LinkNode *node = image_free_queue; node; node = node->next) {
Image *ima = node->link;
/* check in case it was freed in the meantime */
if (bmain && BLI_findindex(&bmain->image, ima) != -1)
GPU_free_image(ima);
}
BLI_linklist_free(image_free_queue, NULL);
image_free_queue = NULL;
BLI_thread_unlock(LOCK_OPENGL);
}
void GPU_free_image(Image *ima)
{
if (!BLI_thread_is_main()) {
gpu_queue_image_for_free(ima);
return;
}
for (int i = 0; i < TEXTARGET_COUNT; i++) {
/* free glsl image binding */
if (ima->gputexture[i]) {
GPU_texture_free(ima->gputexture[i]);
ima->gputexture[i] = NULL;
}
}
2016-01-10 07:12:10 +11:00
ima->tpageflag &= ~(IMA_MIPMAP_COMPLETE | IMA_GLBIND_IS_DATA);
}
void GPU_free_images(Main *bmain)
{
if (bmain) {
for (Image *ima = bmain->image.first; ima; ima = ima->id.next) {
GPU_free_image(ima);
}
}
}
/* same as above but only free animated images */
void GPU_free_images_anim(Main *bmain)
{
if (bmain) {
for (Image *ima = bmain->image.first; ima; ima = ima->id.next) {
if (BKE_image_is_animated(ima)) {
GPU_free_image(ima);
}
}
}
}
void GPU_free_images_old(Main *bmain)
{
static int lasttime = 0;
OpenSubdiv: Commit of OpenSubdiv integration into Blender This commit contains all the remained parts needed for initial integration of OpenSubdiv into Blender's subdivision surface code. Includes both GPU and CPU backends which works in the following way: - When SubSurf modifier is the last in the modifiers stack then GPU pipeline of OpenSubdiv is used, making viewport performance as fast as possible. This also requires graphscard with GLSL 1.5 support. If this requirement is not met, then no GPU pipeline is used at all. - If SubSurf is not a last modifier or if DerivesMesh is being evaluated for rendering then CPU limit evaluation API from OpenSubdiv is used. This only replaces the legacy evaluation code from CCGSubSurf_legacy, but keeps CCG structures exactly the same as they used to be for ages now. This integration is fully covered with ifdef and not enabled by default because there are several TODOs to be solved first: - Face varying data interpolation is not really cleanly implemented for GPU in OpenSubdiv 3.0. It is also not implemented for limit evaluation API. This basically means we'll have really hard time supporting UVs. - Limit evaluation only works with adaptivly subdivided meshes so far, which basically means all the points of CCG are pushed to the limit. This gives different result from old code. - There are some serious optimizations possible on the topology refiner creation, which would speed up initial OpenSubdiv mesh creation. - There are some hardcoded asumptions in the GPU and DerivedMesh areas which could be generalized. That's something where Antony and Campbell can help, making it so the code is structured in a way which is reusable by all planned viewport projects. - There are also some workarounds in the dependency graph to make sure OpenGL buffers are only freed from the main thread. Those who'll be wanting to make experiments with this code should grab dev branch (NOT master) from https://github.com/Nazg-Gul/OpenSubdiv/tree/dev There are some patches applied in there which we're working on on getting into upstream.
2015-07-20 16:08:06 +02:00
int ctime = (int)PIL_check_seconds_timer();
/*
* Run garbage collector once for every collecting period of time
* if textimeout is 0, that's the option to NOT run the collector
*/
if (U.textimeout == 0 || ctime % U.texcollectrate || ctime == lasttime)
return;
/* of course not! */
if (G.is_rendering)
return;
lasttime = ctime;
Image *ima = bmain->image.first;
while (ima) {
if ((ima->flag & IMA_NOCOLLECT) == 0 && ctime - ima->lastused > U.textimeout) {
/* If it's in GL memory, deallocate and set time tag to current time
* This gives textures a "second chance" to be used before dying. */
if (BKE_image_has_opengl_texture(ima)) {
GPU_free_image(ima);
ima->lastused = ctime;
}
/* Otherwise, just kill the buffers */
else {
2014-05-22 17:40:35 +10:00
BKE_image_free_buffers(ima);
}
}
ima = ima->id.next;
}
}
2018-02-15 18:03:55 +11:00
static void gpu_disable_multisample(void)
{
#ifdef __linux__
2015-11-24 02:20:38 -05:00
/* changing multisample from the default (enabled) causes problems on some
* systems (NVIDIA/Linux) when the pixel format doesn't have a multisample buffer */
bool toggle_ok = true;
2015-11-24 02:20:38 -05:00
if (GPU_type_matches(GPU_DEVICE_NVIDIA, GPU_OS_UNIX, GPU_DRIVER_ANY)) {
int samples = 0;
glGetIntegerv(GL_SAMPLES, &samples);
2015-11-24 02:20:38 -05:00
if (samples == 0)
toggle_ok = false;
}
2015-11-24 02:20:38 -05:00
if (toggle_ok) {
glDisable(GL_MULTISAMPLE);
}
2015-11-24 02:20:38 -05:00
#else
glDisable(GL_MULTISAMPLE);
2015-11-24 02:20:38 -05:00
#endif
}
/* Default OpenGL State
*
* This is called on startup, for opengl offscreen render.
* Generally we should always return to this state when
* temporarily modifying the state for drawing, though that are (undocumented)
* exceptions that we should try to get rid of. */
void GPU_state_init(void)
{
GPU_disable_program_point_size();
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
glDepthFunc(GL_LEQUAL);
glDisable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glDisable(GL_COLOR_LOGIC_OP);
glDisable(GL_STENCIL_TEST);
glDepthRange(0.0, 1.0);
glFrontFace(GL_CCW);
glCullFace(GL_BACK);
glDisable(GL_CULL_FACE);
gpu_disable_multisample();
}
2017-02-09 02:28:55 +11:00
void GPU_enable_program_point_size(void)
{
glEnable(GL_PROGRAM_POINT_SIZE);
}
2017-02-09 02:28:55 +11:00
void GPU_disable_program_point_size(void)
{
glDisable(GL_PROGRAM_POINT_SIZE);
}
/** \name Framebuffer color depth, for selection codes
* \{ */
#ifdef __APPLE__
/* apple seems to round colors to below and up on some configs */
2018-09-12 12:18:35 +10:00
static uint index_to_framebuffer(int index)
{
2018-09-12 12:18:35 +10:00
uint i = index;
switch (GPU_color_depth()) {
case 12:
i = ((i & 0xF00) << 12) + ((i & 0xF0) << 8) + ((i & 0xF) << 4);
/* sometimes dithering subtracts! */
i |= 0x070707;
break;
case 15:
case 16:
i = ((i & 0x7C00) << 9) + ((i & 0x3E0) << 6) + ((i & 0x1F) << 3);
i |= 0x030303;
break;
case 24:
break;
default: /* 18 bits... */
i = ((i & 0x3F000) << 6) + ((i & 0xFC0) << 4) + ((i & 0x3F) << 2);
i |= 0x010101;
break;
}
return i;
}
#else
/* this is the old method as being in use for ages.... seems to work? colors are rounded to lower values */
2018-09-12 12:18:35 +10:00
static uint index_to_framebuffer(int index)
{
2018-09-12 12:18:35 +10:00
uint i = index;
switch (GPU_color_depth()) {
case 8:
i = ((i & 48) << 18) + ((i & 12) << 12) + ((i & 3) << 6);
i |= 0x3F3F3F;
break;
case 12:
i = ((i & 0xF00) << 12) + ((i & 0xF0) << 8) + ((i & 0xF) << 4);
/* sometimes dithering subtracts! */
i |= 0x0F0F0F;
break;
case 15:
case 16:
i = ((i & 0x7C00) << 9) + ((i & 0x3E0) << 6) + ((i & 0x1F) << 3);
i |= 0x070707;
break;
case 24:
break;
default: /* 18 bits... */
i = ((i & 0x3F000) << 6) + ((i & 0xFC0) << 4) + ((i & 0x3F) << 2);
i |= 0x030303;
break;
}
return i;
}
#endif
void GPU_select_index_set(int index)
{
const int col = index_to_framebuffer(index);
glColor3ub(( (col) & 0xFF),
(((col) >> 8) & 0xFF),
(((col) >> 16) & 0xFF));
}
void GPU_select_index_get(int index, int *r_col)
{
const int col = index_to_framebuffer(index);
char *c_col = (char *)r_col;
c_col[0] = (col & 0xFF); /* red */
c_col[1] = ((col >> 8) & 0xFF); /* green */
c_col[2] = ((col >> 16) & 0xFF); /* blue */
c_col[3] = 0xFF; /* alpha */
}
#define INDEX_FROM_BUF_8(col) ((((col) & 0xC00000) >> 18) + (((col) & 0xC000) >> 12) + (((col) & 0xC0) >> 6))
#define INDEX_FROM_BUF_12(col) ((((col) & 0xF00000) >> 12) + (((col) & 0xF000) >> 8) + (((col) & 0xF0) >> 4))
#define INDEX_FROM_BUF_15_16(col) ((((col) & 0xF80000) >> 9) + (((col) & 0xF800) >> 6) + (((col) & 0xF8) >> 3))
#define INDEX_FROM_BUF_18(col) ((((col) & 0xFC0000) >> 6) + (((col) & 0xFC00) >> 4) + (((col) & 0xFC) >> 2))
#define INDEX_FROM_BUF_24(col) ((col) & 0xFFFFFF)
2018-09-12 12:18:35 +10:00
int GPU_select_to_index(uint col)
{
if (col == 0) {
return 0;
}
switch (GPU_color_depth()) {
case 8: return INDEX_FROM_BUF_8(col);
case 12: return INDEX_FROM_BUF_12(col);
case 15:
case 16: return INDEX_FROM_BUF_15_16(col);
case 24: return INDEX_FROM_BUF_24(col);
default: return INDEX_FROM_BUF_18(col);
}
}
2018-09-12 12:18:35 +10:00
void GPU_select_to_index_array(uint *col, const uint size)
{
#define INDEX_BUF_ARRAY(INDEX_FROM_BUF_BITS) \
for (i = size; i--; col++) { \
2018-07-18 23:09:31 +10:00
if ((c = *col)) { \
*col = INDEX_FROM_BUF_BITS(c); \
} \
} ((void)0)
if (size > 0) {
2018-09-12 12:18:35 +10:00
uint i, c;
switch (GPU_color_depth()) {
case 8:
INDEX_BUF_ARRAY(INDEX_FROM_BUF_8);
break;
case 12:
INDEX_BUF_ARRAY(INDEX_FROM_BUF_12);
break;
case 15:
case 16:
INDEX_BUF_ARRAY(INDEX_FROM_BUF_15_16);
break;
case 24:
INDEX_BUF_ARRAY(INDEX_FROM_BUF_24);
break;
default:
INDEX_BUF_ARRAY(INDEX_FROM_BUF_18);
break;
}
}
#undef INDEX_BUF_ARRAY
}
#define STATE_STACK_DEPTH 16
typedef struct {
eGPUAttribMask mask;
/* GL_ENABLE_BIT */
2018-09-12 12:18:35 +10:00
uint is_blend : 1;
uint is_cull_face : 1;
uint is_depth_test : 1;
uint is_dither : 1;
uint is_lighting : 1;
uint is_line_smooth : 1;
uint is_color_logic_op : 1;
uint is_multisample : 1;
uint is_polygon_offset_line : 1;
uint is_polygon_offset_fill : 1;
uint is_polygon_smooth : 1;
uint is_sample_alpha_to_coverage : 1;
uint is_scissor_test : 1;
uint is_stencil_test : 1;
bool is_clip_plane[6];
/* GL_DEPTH_BUFFER_BIT */
2018-09-12 12:18:35 +10:00
/* uint is_depth_test : 1; */
int depth_func;
double depth_clear_value;
bool depth_write_mask;
/* GL_SCISSOR_BIT */
int scissor_box[4];
2018-09-12 12:18:35 +10:00
/* uint is_scissor_test : 1; */
/* GL_VIEWPORT_BIT */
int viewport[4];
double near_far[2];
} GPUAttribValues;
typedef struct {
GPUAttribValues attrib_stack[STATE_STACK_DEPTH];
2018-09-12 12:18:35 +10:00
uint top;
} GPUAttribStack;
static GPUAttribStack state = {
.top = 0,
};
#define AttribStack state
#define Attrib state.attrib_stack[state.top]
/**
* Replacement for glPush/PopAttributes
*
* We don't need to cover all the options of legacy OpenGL
* but simply the ones used by Blender.
*/
void gpuPushAttrib(eGPUAttribMask mask)
{
Attrib.mask = mask;
if ((mask & GPU_DEPTH_BUFFER_BIT) != 0) {
Attrib.is_depth_test = glIsEnabled(GL_DEPTH_TEST);
glGetIntegerv(GL_DEPTH_FUNC, &Attrib.depth_func);
glGetDoublev(GL_DEPTH_CLEAR_VALUE, &Attrib.depth_clear_value);
glGetBooleanv(GL_DEPTH_WRITEMASK, (GLboolean *)&Attrib.depth_write_mask);
}
if ((mask & GPU_ENABLE_BIT) != 0) {
Attrib.is_blend = glIsEnabled(GL_BLEND);
for (int i = 0; i < 6; i++) {
Attrib.is_clip_plane[i] = glIsEnabled(GL_CLIP_PLANE0 + i);
}
Attrib.is_cull_face = glIsEnabled(GL_CULL_FACE);
Attrib.is_depth_test = glIsEnabled(GL_DEPTH_TEST);
Attrib.is_dither = glIsEnabled(GL_DITHER);
Attrib.is_line_smooth = glIsEnabled(GL_LINE_SMOOTH);
Attrib.is_color_logic_op = glIsEnabled(GL_COLOR_LOGIC_OP);
Attrib.is_multisample = glIsEnabled(GL_MULTISAMPLE);
Attrib.is_polygon_offset_line = glIsEnabled(GL_POLYGON_OFFSET_LINE);
Attrib.is_polygon_offset_fill = glIsEnabled(GL_POLYGON_OFFSET_FILL);
Attrib.is_polygon_smooth = glIsEnabled(GL_POLYGON_SMOOTH);
Attrib.is_sample_alpha_to_coverage = glIsEnabled(GL_SAMPLE_ALPHA_TO_COVERAGE);
Attrib.is_scissor_test = glIsEnabled(GL_SCISSOR_TEST);
Attrib.is_stencil_test = glIsEnabled(GL_STENCIL_TEST);
}
if ((mask & GPU_SCISSOR_BIT) != 0) {
Attrib.is_scissor_test = glIsEnabled(GL_SCISSOR_TEST);
glGetIntegerv(GL_SCISSOR_BOX, (GLint *)&Attrib.scissor_box);
}
if ((mask & GPU_VIEWPORT_BIT) != 0) {
glGetDoublev(GL_DEPTH_RANGE, (GLdouble *)&Attrib.near_far);
glGetIntegerv(GL_VIEWPORT, (GLint *)&Attrib.viewport);
}
if ((mask & GPU_BLEND_BIT) != 0) {
Attrib.is_blend = glIsEnabled(GL_BLEND);
}
BLI_assert(AttribStack.top < STATE_STACK_DEPTH);
AttribStack.top++;
}
2017-10-07 15:57:14 +11:00
static void restore_mask(GLenum cap, const bool value)
{
if (value) {
glEnable(cap);
}
else {
glDisable(cap);
}
}
2017-05-21 09:39:31 +10:00
void gpuPopAttrib(void)
{
BLI_assert(AttribStack.top > 0);
AttribStack.top--;
GLint mask = Attrib.mask;
if ((mask & GPU_DEPTH_BUFFER_BIT) != 0) {
restore_mask(GL_DEPTH_TEST, Attrib.is_depth_test);
glDepthFunc(Attrib.depth_func);
glClearDepth(Attrib.depth_clear_value);
glDepthMask(Attrib.depth_write_mask);
}
if ((mask & GPU_ENABLE_BIT) != 0) {
restore_mask(GL_BLEND, Attrib.is_blend);
for (int i = 0; i < 6; i++) {
restore_mask(GL_CLIP_PLANE0 + i, Attrib.is_clip_plane[i]);
}
restore_mask(GL_CULL_FACE, Attrib.is_cull_face);
restore_mask(GL_DEPTH_TEST, Attrib.is_depth_test);
restore_mask(GL_DITHER, Attrib.is_dither);
restore_mask(GL_LINE_SMOOTH, Attrib.is_line_smooth);
restore_mask(GL_COLOR_LOGIC_OP, Attrib.is_color_logic_op);
restore_mask(GL_MULTISAMPLE, Attrib.is_multisample);
restore_mask(GL_POLYGON_OFFSET_LINE, Attrib.is_polygon_offset_line);
restore_mask(GL_POLYGON_OFFSET_FILL, Attrib.is_polygon_offset_fill);
restore_mask(GL_POLYGON_SMOOTH, Attrib.is_polygon_smooth);
restore_mask(GL_SAMPLE_ALPHA_TO_COVERAGE, Attrib.is_sample_alpha_to_coverage);
restore_mask(GL_SCISSOR_TEST, Attrib.is_scissor_test);
restore_mask(GL_STENCIL_TEST, Attrib.is_stencil_test);
}
if ((mask & GPU_VIEWPORT_BIT) != 0) {
glViewport(Attrib.viewport[0], Attrib.viewport[1], Attrib.viewport[2], Attrib.viewport[3]);
glDepthRange(Attrib.near_far[0], Attrib.near_far[1]);
}
if ((mask & GPU_SCISSOR_BIT) != 0) {
restore_mask(GL_SCISSOR_TEST, Attrib.is_scissor_test);
glScissor(Attrib.scissor_box[0], Attrib.scissor_box[1], Attrib.scissor_box[2], Attrib.scissor_box[3]);
}
if ((mask & GPU_BLEND_BIT) != 0) {
restore_mask(GL_BLEND, Attrib.is_blend);
}
}
#undef Attrib
#undef AttribStack
/** \} */