Fix #106430: Index the right UVmap in BMesh #106537

Closed
Martijn Versteegh wants to merge 5 commits from Baardaap:fix-named-uvmap_bmesh into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
103 changed files with 1499 additions and 890 deletions
Showing only changes of commit 758ee5c8e0 - Show all commits

View File

@ -164,7 +164,7 @@ get_blender_version()
# Declare Options
# Blender internal features
option(WITH_BLENDER "Build blender (disable to build only the blender player)" ON)
option(WITH_BLENDER "Build blender (disable to build only Cycles stand-alone)." ON)
mark_as_advanced(WITH_BLENDER)
if(WIN32)

View File

@ -641,15 +641,29 @@ if(WITH_GHOST_WAYLAND)
pkg_check_modules(wayland-egl wayland-egl)
pkg_check_modules(wayland-scanner wayland-scanner)
pkg_check_modules(wayland-cursor wayland-cursor)
pkg_check_modules(wayland-protocols wayland-protocols>=1.15)
pkg_check_modules(wayland-protocols wayland-protocols>=1.31)
pkg_get_variable(WAYLAND_PROTOCOLS_DIR wayland-protocols pkgdatadir)
else()
# NOTE: this file must always refer to the newest API which is used, so older
# `wayland-protocols` are never found and used which then fail to locate required protocols.
set(_wayland_protocols_reference_file "staging/fractional-scale/fractional-scale-v1.xml")
# Reset the protocols directory the reference file from `wayland-protocols` is not found.
# This avoids developers having build failures when a cached directory is used that no
# longer contains the required file.
if(DEFINED WAYLAND_PROTOCOLS_DIR)
if(NOT EXISTS "${WAYLAND_PROTOCOLS_DIR}/${_wayland_protocols_reference_file}")
unset(WAYLAND_PROTOCOLS_DIR CACHE)
endif()
endif()
# Rocky8 packages have too old a version, a newer version exist in the pre-compiled libraries.
find_path(WAYLAND_PROTOCOLS_DIR
NAMES staging/xdg-activation/xdg-activation-v1.xml
NAMES ${_wayland_protocols_reference_file}
PATH_SUFFIXES share/wayland-protocols
PATHS ${LIBDIR}/wayland-protocols
)
unset(_wayland_protocols_reference_file)
if(EXISTS ${WAYLAND_PROTOCOLS_DIR})
set(wayland-protocols_FOUND ON)

View File

@ -131,9 +131,7 @@ def submodules_to_manifest(
skip_addon_contrib = version.is_release()
assert not blender_srcdir.is_absolute()
for line in git_command("-C", blender_srcdir, "submodule"):
submodule = line.split()[1]
for submodule in ("scripts/addons", "scripts/addons_contrib"):
# Don't use native slashes as GIT for MS-Windows outputs forward slashes.
if skip_addon_contrib and submodule == "scripts/addons_contrib":
continue

View File

@ -230,7 +230,7 @@ class DisplayGPUTexture {
}
GPU_texture_filter_mode(gpu_texture, false);
GPU_texture_wrap_mode(gpu_texture, false, true);
GPU_texture_extend_mode(gpu_texture, GPU_SAMPLER_EXTEND_MODE_EXTEND);
++num_used;
@ -705,14 +705,14 @@ static void draw_tile(const float2 &zoom,
const float zoomed_height = draw_tile.params.size.y * zoom.y;
if (texture.width != draw_tile.params.size.x || texture.height != draw_tile.params.size.y) {
/* Resolution divider is different from 1, force nearest interpolation. */
GPU_texture_bind_ex(texture.gpu_texture, GPU_SAMPLER_DEFAULT, 0);
GPU_texture_bind_ex(texture.gpu_texture, GPUSamplerState::default_sampler(), 0);
}
else if (zoomed_width - draw_tile.params.size.x > 0.5f ||
zoomed_height - draw_tile.params.size.y > 0.5f) {
GPU_texture_bind_ex(texture.gpu_texture, GPU_SAMPLER_DEFAULT, 0);
GPU_texture_bind_ex(texture.gpu_texture, GPUSamplerState::default_sampler(), 0);
}
else {
GPU_texture_bind_ex(texture.gpu_texture, GPU_SAMPLER_FILTER, 0);
GPU_texture_bind_ex(texture.gpu_texture, {GPU_SAMPLER_FILTERING_LINEAR}, 0);
}
/* Draw at the parameters for which the texture has been updated for. This allows to always draw

View File

@ -117,7 +117,7 @@ ccl_device_noinline bool light_sample(KernelGlobals kg,
if (kernel_data.integrator.use_light_tree) {
ccl_global const KernelLightTreeEmitter *kemitter = &kernel_data_fetch(light_tree_emitters,
emitter_index);
prim = kemitter->prim;
prim = kemitter->prim_id;
mesh_light = kemitter->mesh_light;
}
else

View File

@ -184,7 +184,7 @@ ccl_device bool compute_emitter_centroid_and_dir(KernelGlobals kg,
ccl_private float3 &centroid,
ccl_private packed_float3 &dir)
{
const int prim_id = kemitter->prim;
const int prim_id = kemitter->prim_id;
if (prim_id < 0) {
const ccl_global KernelLight *klight = &kernel_data_fetch(lights, ~prim_id);
centroid = klight->co;
@ -264,7 +264,7 @@ ccl_device void light_tree_emitter_importance(KernelGlobals kg,
return;
}
const int prim_id = kemitter->prim;
const int prim_id = kemitter->prim_id;
if (in_volume_segment) {
const float3 D = N_or_D;
@ -344,12 +344,12 @@ ccl_device void light_tree_node_importance(KernelGlobals kg,
{
max_importance = 0.0f;
min_importance = 0.0f;
if (knode->num_prims == 1) {
if (knode->num_emitters == 1) {
/* At a leaf node with only one emitter. */
light_tree_emitter_importance<in_volume_segment>(
kg, P, N_or_D, t, has_transmission, -knode->child_index, max_importance, min_importance);
}
else if (knode->num_prims != 0) {
else if (knode->num_emitters != 0) {
const BoundingCone bcone = knode->bcone;
const BoundingBox bbox = knode->bbox;
@ -452,13 +452,13 @@ ccl_device int light_tree_cluster_select_emitter(KernelGlobals kg,
int selected_index = -1;
/* Mark emitters with zero importance. Used for resevoir when total minimum importance = 0. */
kernel_assert(knode->num_prims <= sizeof(uint) * 8);
kernel_assert(knode->num_emitters <= sizeof(uint) * 8);
uint has_importance = 0;
const bool sample_max = (rand > 0.5f); /* Sampling using the maximum importance. */
rand = rand * 2.0f - float(sample_max);
for (int i = 0; i < knode->num_prims; i++) {
for (int i = 0; i < knode->num_emitters; i++) {
int current_index = -knode->child_index + i;
/* maximum importance = importance[0], minimum importance = importance[1] */
float importance[2];
@ -491,7 +491,7 @@ ccl_device int light_tree_cluster_select_emitter(KernelGlobals kg,
}
else {
selected_index = -1;
for (int i = 0; i < knode->num_prims; i++) {
for (int i = 0; i < knode->num_emitters; i++) {
int current_index = -knode->child_index + i;
sample_resevoir(current_index,
float(has_importance & 1),
@ -615,12 +615,12 @@ ccl_device_noinline bool light_tree_sample(KernelGlobals kg,
/* We need to be able to find the probability of selecting a given light for MIS. */
ccl_device float light_tree_pdf(
KernelGlobals kg, const float3 P, const float3 N, const int path_flag, const int prim)
KernelGlobals kg, const float3 P, const float3 N, const int path_flag, const int emitter)
{
const bool has_transmission = (path_flag & PATH_RAY_MIS_HAD_TRANSMISSION);
/* Target emitter info. */
const int target_emitter = (prim >= 0) ? kernel_data_fetch(triangle_to_tree, prim) :
kernel_data_fetch(light_to_tree, ~prim);
const int target_emitter = (emitter >= 0) ? kernel_data_fetch(triangle_to_tree, emitter) :
kernel_data_fetch(light_to_tree, ~emitter);
ccl_global const KernelLightTreeEmitter *kemitter = &kernel_data_fetch(light_tree_emitters,
target_emitter);
const int target_leaf = kemitter->parent_index;
@ -667,7 +667,7 @@ ccl_device float light_tree_pdf(
float total_max_importance = 0.0f;
float total_min_importance = 0.0f;
int num_has_importance = 0;
for (int i = 0; i < kleaf->num_prims; i++) {
for (int i = 0; i < kleaf->num_emitters; i++) {
const int emitter = -kleaf->child_index + i;
float max_importance, min_importance;
light_tree_emitter_importance<false>(

View File

@ -306,7 +306,7 @@ ccl_device_forceinline bool triangle_light_tree_parameters(
const int object = kemitter->mesh_light.object_id;
float3 vertices[3];
triangle_world_space_vertices(kg, object, kemitter->prim, -1.0f, vertices);
triangle_world_space_vertices(kg, object, kemitter->prim_id, -1.0f, vertices);
bool shape_above_surface = false;
for (int i = 0; i < 3; i++) {

View File

@ -1379,7 +1379,7 @@ typedef struct KernelLightTreeNode {
* and the negative value indexes into the first child of the light array.
* Otherwise, it's an index to the node's second child. */
int child_index;
int num_prims; /* leaf nodes need to know the number of primitives stored. */
int num_emitters; /* leaf nodes need to know the number of emitters stored. */
/* Bit trail. */
uint bit_trail;
@ -1397,8 +1397,8 @@ typedef struct KernelLightTreeEmitter {
/* Energy. */
float energy;
/* prim_id denotes the location in the lights or triangles array. */
int prim;
/* The location in the lights or triangles array. */
int prim_id;
MeshLight mesh_light;
EmissionSampling emission_sampling;

View File

@ -445,91 +445,23 @@ void LightManager::device_update_tree(Device *,
/* Update light tree. */
progress.set_status("Updating Lights", "Computing tree");
/* Add both lights and emissive triangles to this vector for light tree construction. */
vector<LightTreePrimitive> light_prims;
light_prims.reserve(kintegrator->num_distribution);
vector<LightTreePrimitive> distant_lights;
distant_lights.reserve(kintegrator->num_distant_lights);
vector<uint> object_lookup_offsets(scene->objects.size());
/* When we keep track of the light index, only contributing lights will be added to the device.
* Therefore, we want to keep track of the light's index on the device.
* However, we also need the light's index in the scene when we're constructing the tree. */
int device_light_index = 0;
int scene_light_index = 0;
foreach (Light *light, scene->lights) {
if (light->is_enabled) {
if (light->light_type == LIGHT_BACKGROUND || light->light_type == LIGHT_DISTANT) {
distant_lights.emplace_back(scene, ~device_light_index, scene_light_index);
}
else {
light_prims.emplace_back(scene, ~device_light_index, scene_light_index);
}
device_light_index++;
}
scene_light_index++;
}
/* Similarly, we also want to keep track of the index of triangles that are emissive. */
size_t total_triangles = 0;
int object_id = 0;
foreach (Object *object, scene->objects) {
if (progress.get_cancel())
return;
if (!object->usable_as_light()) {
object_id++;
continue;
}
object_lookup_offsets[object_id] = total_triangles;
/* Count emissive triangles. */
Mesh *mesh = static_cast<Mesh *>(object->get_geometry());
size_t mesh_num_triangles = mesh->num_triangles();
for (size_t i = 0; i < mesh_num_triangles; i++) {
int shader_index = mesh->get_shader()[i];
Shader *shader = (shader_index < mesh->get_used_shaders().size()) ?
static_cast<Shader *>(mesh->get_used_shaders()[shader_index]) :
scene->default_surface;
if (shader->emission_sampling != EMISSION_SAMPLING_NONE) {
light_prims.emplace_back(scene, i, object_id);
}
}
total_triangles += mesh_num_triangles;
object_id++;
}
/* Append distant lights to the end of `light_prims` */
std::move(distant_lights.begin(), distant_lights.end(), std::back_inserter(light_prims));
/* Update integrator state. */
kintegrator->use_direct_light = !light_prims.empty();
/* TODO: For now, we'll start with a smaller number of max lights in a node.
* More benchmarking is needed to determine what number works best. */
LightTree light_tree(light_prims, kintegrator->num_distant_lights, 8);
LightTree light_tree(scene, dscene, progress, 8);
LightTreeNode *root = light_tree.build(scene, dscene);
/* We want to create separate arrays corresponding to triangles and lights,
* which will be used to index back into the light tree for PDF calculations. */
const size_t num_lights = kintegrator->num_lights;
uint *light_array = dscene->light_to_tree.alloc(num_lights);
uint *object_offsets = dscene->object_lookup_offset.alloc(object_lookup_offsets.size());
uint *triangle_array = dscene->triangle_to_tree.alloc(total_triangles);
for (int i = 0; i < object_lookup_offsets.size(); i++) {
object_offsets[i] = object_lookup_offsets[i];
}
uint *light_array = dscene->light_to_tree.alloc(kintegrator->num_lights);
uint *triangle_array = dscene->triangle_to_tree.alloc(light_tree.num_triangles);
/* First initialize the light tree's nodes. */
KernelLightTreeNode *light_tree_nodes = dscene->light_tree_nodes.alloc(light_tree.size());
KernelLightTreeEmitter *light_tree_emitters = dscene->light_tree_emitters.alloc(
light_prims.size());
const size_t num_emitters = light_tree.num_emitters();
KernelLightTreeNode *light_tree_nodes = dscene->light_tree_nodes.alloc(light_tree.num_nodes);
KernelLightTreeEmitter *light_tree_emitters = dscene->light_tree_emitters.alloc(num_emitters);
/* Update integrator state. */
kintegrator->use_direct_light = num_emitters > 0;
/* Copy the light tree nodes to an array in the device. */
/* The nodes are arranged in a depth-first order, meaning the left child of each inner node
@ -544,40 +476,40 @@ void LightManager::device_update_tree(Device *,
int left_index_stack[32]; /* `sizeof(bit_trail) * 8 == 32`. */
LightTreeNode *right_node_stack[32];
int stack_id = 0;
const LightTreeNode *node = light_tree.get_root();
for (int index = 0; index < light_tree.size(); index++) {
light_tree_nodes[index].energy = node->measure.energy;
const LightTreeNode *node = root;
for (int node_index = 0; node_index < light_tree.num_nodes; node_index++) {
light_tree_nodes[node_index].energy = node->measure.energy;
light_tree_nodes[index].bbox.min = node->measure.bbox.min;
light_tree_nodes[index].bbox.max = node->measure.bbox.max;
light_tree_nodes[node_index].bbox.min = node->measure.bbox.min;
light_tree_nodes[node_index].bbox.max = node->measure.bbox.max;
light_tree_nodes[index].bcone.axis = node->measure.bcone.axis;
light_tree_nodes[index].bcone.theta_o = node->measure.bcone.theta_o;
light_tree_nodes[index].bcone.theta_e = node->measure.bcone.theta_e;
light_tree_nodes[node_index].bcone.axis = node->measure.bcone.axis;
light_tree_nodes[node_index].bcone.theta_o = node->measure.bcone.theta_o;
light_tree_nodes[node_index].bcone.theta_e = node->measure.bcone.theta_e;
light_tree_nodes[index].bit_trail = node->bit_trail;
light_tree_nodes[index].num_prims = node->num_prims;
light_tree_nodes[node_index].bit_trail = node->bit_trail;
light_tree_nodes[node_index].num_emitters = node->num_emitters;
/* Here we need to make a distinction between interior and leaf nodes. */
if (node->is_leaf()) {
light_tree_nodes[index].child_index = -node->first_prim_index;
light_tree_nodes[node_index].child_index = -node->first_emitter_index;
for (int i = 0; i < node->num_prims; i++) {
int emitter_index = i + node->first_prim_index;
LightTreePrimitive &prim = light_prims[emitter_index];
for (int i = 0; i < node->num_emitters; i++) {
int emitter_index = i + node->first_emitter_index;
const LightTreeEmitter &emitter = light_tree.get_emitter(emitter_index);
light_tree_emitters[emitter_index].energy = prim.measure.energy;
light_tree_emitters[emitter_index].theta_o = prim.measure.bcone.theta_o;
light_tree_emitters[emitter_index].theta_e = prim.measure.bcone.theta_e;
light_tree_emitters[emitter_index].energy = emitter.measure.energy;
light_tree_emitters[emitter_index].theta_o = emitter.measure.bcone.theta_o;
light_tree_emitters[emitter_index].theta_e = emitter.measure.bcone.theta_e;
if (prim.is_triangle()) {
light_tree_emitters[emitter_index].mesh_light.object_id = prim.object_id;
if (emitter.is_triangle()) {
light_tree_emitters[emitter_index].mesh_light.object_id = emitter.object_id;
int shader_flag = 0;
Object *object = scene->objects[prim.object_id];
Object *object = scene->objects[emitter.object_id];
Mesh *mesh = static_cast<Mesh *>(object->get_geometry());
Shader *shader = static_cast<Shader *>(
mesh->get_used_shaders()[mesh->get_shader()[prim.prim_id]]);
mesh->get_used_shaders()[mesh->get_shader()[emitter.prim_id]]);
if (!(object->get_visibility() & PATH_RAY_CAMERA)) {
shader_flag |= SHADER_EXCLUDE_CAMERA;
@ -598,19 +530,20 @@ void LightManager::device_update_tree(Device *,
shader_flag |= SHADER_EXCLUDE_SHADOW_CATCHER;
}
light_tree_emitters[emitter_index].prim = prim.prim_id + mesh->prim_offset;
light_tree_emitters[emitter_index].prim_id = emitter.prim_id + mesh->prim_offset;
light_tree_emitters[emitter_index].mesh_light.shader_flag = shader_flag;
light_tree_emitters[emitter_index].emission_sampling = shader->emission_sampling;
triangle_array[prim.prim_id + object_lookup_offsets[prim.object_id]] = emitter_index;
triangle_array[emitter.prim_id + dscene->object_lookup_offset[emitter.object_id]] =
emitter_index;
}
else {
light_tree_emitters[emitter_index].prim = prim.prim_id;
light_tree_emitters[emitter_index].prim_id = emitter.prim_id;
light_tree_emitters[emitter_index].mesh_light.shader_flag = 0;
light_tree_emitters[emitter_index].mesh_light.object_id = OBJECT_NONE;
light_tree_emitters[emitter_index].emission_sampling = EMISSION_SAMPLING_FRONT_BACK;
light_array[~prim.prim_id] = emitter_index;
light_array[~emitter.prim_id] = emitter_index;
}
light_tree_emitters[emitter_index].parent_index = index;
light_tree_emitters[emitter_index].parent_index = node_index;
}
/* Retrieve from the stacks. */
@ -618,12 +551,12 @@ void LightManager::device_update_tree(Device *,
break;
}
stack_id--;
light_tree_nodes[left_index_stack[stack_id]].child_index = index + 1;
light_tree_nodes[left_index_stack[stack_id]].child_index = node_index + 1;
node = right_node_stack[stack_id];
}
else {
/* Fill in the stacks. */
left_index_stack[stack_id] = index;
left_index_stack[stack_id] = node_index;
right_node_stack[stack_id] = node->children[LightTree::right].get();
node = node->children[LightTree::left].get();
stack_id++;
@ -634,7 +567,6 @@ void LightManager::device_update_tree(Device *,
dscene->light_tree_nodes.copy_to_device();
dscene->light_tree_emitters.copy_to_device();
dscene->light_to_tree.copy_to_device();
dscene->object_lookup_offset.copy_to_device();
dscene->triangle_to_tree.copy_to_device();
}

View File

@ -81,6 +81,7 @@ class Light : public Node {
bool has_contribution(Scene *scene);
friend class LightManager;
friend class LightTree;
};
class LightManager {

View File

@ -5,6 +5,8 @@
#include "scene/mesh.h"
#include "scene/object.h"
#include "util/progress.h"
CCL_NAMESPACE_BEGIN
float OrientationBounds::calculate_measure() const
@ -45,7 +47,7 @@ OrientationBounds merge(const OrientationBounds &cone_a, const OrientationBounds
/* Return axis and theta_o of a if it already contains b. */
/* This should also be called when b is empty. */
if (a->theta_o >= fminf(M_PI_F, theta_d + b->theta_o)) {
if (a->theta_o + 5e-4f >= fminf(M_PI_F, theta_d + b->theta_o)) {
return OrientationBounds({a->axis, a->theta_o, theta_e});
}
@ -65,14 +67,14 @@ OrientationBounds merge(const OrientationBounds &cone_a, const OrientationBounds
}
else {
float theta_r = theta_o - a->theta_o;
float3 ortho = normalize(b->axis - a->axis * cos_a_b);
float3 ortho = safe_normalize(b->axis - a->axis * cos_a_b);
new_axis = a->axis * cosf(theta_r) + ortho * sinf(theta_r);
}
return OrientationBounds({new_axis, theta_o, theta_e});
}
LightTreePrimitive::LightTreePrimitive(Scene *scene, int prim_id, int object_id)
LightTreeEmitter::LightTreeEmitter(Scene *scene, int prim_id, int object_id)
: prim_id(prim_id), object_id(object_id)
{
if (is_triangle()) {
@ -100,7 +102,7 @@ LightTreePrimitive::LightTreePrimitive(Scene *scene, int prim_id, int object_id)
float area = triangle_area(vertices[0], vertices[1], vertices[2]);
measure.energy = area * average(shader->emission_estimate);
/* NOTE: the original implementation used the bounding box centroid, but primitive centroid
/* NOTE: the original implementation used the bounding box centroid, but triangle centroid
* seems to work fine */
centroid = (vertices[0] + vertices[1] + vertices[2]) / 3.0f;
@ -208,80 +210,151 @@ LightTreePrimitive::LightTreePrimitive(Scene *scene, int prim_id, int object_id)
}
}
LightTree::LightTree(vector<LightTreePrimitive> &prims,
const int &num_distant_lights,
LightTree::LightTree(Scene *scene,
DeviceScene *dscene,
Progress &progress,
uint max_lights_in_leaf)
: progress_(progress), max_lights_in_leaf_(max_lights_in_leaf)
{
if (prims.empty()) {
return;
KernelIntegrator *kintegrator = &dscene->data.integrator;
/* Add both lights and emissive triangles to this vector for light tree construction. */
emitters_.reserve(kintegrator->num_distribution);
distant_lights_.reserve(kintegrator->num_distant_lights);
uint *object_offsets = dscene->object_lookup_offset.alloc(scene->objects.size());
/* When we keep track of the light index, only contributing lights will be added to the device.
* Therefore, we want to keep track of the light's index on the device.
* However, we also need the light's index in the scene when we're constructing the tree. */
int device_light_index = 0;
int scene_light_index = 0;
for (Light *light : scene->lights) {
if (light->is_enabled) {
if (light->light_type == LIGHT_BACKGROUND || light->light_type == LIGHT_DISTANT) {
distant_lights_.emplace_back(scene, ~device_light_index, scene_light_index);
}
else {
emitters_.emplace_back(scene, ~device_light_index, scene_light_index);
}
device_light_index++;
}
scene_light_index++;
}
max_lights_in_leaf_ = max_lights_in_leaf;
const int num_prims = prims.size();
const int num_local_lights = num_prims - num_distant_lights;
/* Similarly, we also want to keep track of the index of triangles that are emissive. */
int object_id = 0;
for (Object *object : scene->objects) {
if (progress_.get_cancel()) {
return;
}
root_ = create_node(LightTreePrimitivesMeasure::empty, 0);
if (!object->usable_as_light()) {
object_id++;
continue;
}
object_offsets[object_id] = num_triangles;
/* Count emissive triangles. */
Mesh *mesh = static_cast<Mesh *>(object->get_geometry());
size_t mesh_num_triangles = mesh->num_triangles();
for (size_t i = 0; i < mesh_num_triangles; i++) {
int shader_index = mesh->get_shader()[i];
Shader *shader = (shader_index < mesh->get_used_shaders().size()) ?
static_cast<Shader *>(mesh->get_used_shaders()[shader_index]) :
scene->default_surface;
if (shader->emission_sampling != EMISSION_SAMPLING_NONE) {
emitters_.emplace_back(scene, i, object_id);
}
}
num_triangles += mesh_num_triangles;
object_id++;
}
/* Copy array to device. */
dscene->object_lookup_offset.copy_to_device();
}
LightTreeNode *LightTree::build(Scene *scene, DeviceScene *dscene)
{
if (emitters_.empty() && distant_lights_.empty()) {
return nullptr;
}
/* At this stage `emitters_` only contains local lights, the distant lights will be merged
* into `emitters_` when Light Tree building is finished. */
const int num_local_lights = emitters_.size();
const int num_distant_lights = distant_lights_.size();
root_ = create_node(LightTreeMeasure::empty, 0);
/* All local lights are grouped to the left child as an inner node. */
recursive_build(left, root_.get(), 0, num_local_lights, &prims, 0, 1);
recursive_build(left, root_.get(), 0, num_local_lights, emitters_.data(), 0, 1);
task_pool.wait_work();
/* All distant lights are grouped to the right child as a leaf node. */
root_->children[right] = create_node(LightTreePrimitivesMeasure::empty, 1);
for (int i = num_local_lights; i < num_prims; i++) {
root_->children[right]->add(prims[i]);
root_->children[right] = create_node(LightTreeMeasure::empty, 1);
for (int i = 0; i < num_distant_lights; i++) {
root_->children[right]->add(distant_lights_[i]);
}
root_->children[right]->make_leaf(num_local_lights, num_distant_lights);
/* Append distant lights to the end of `light_prims` */
std::move(distant_lights_.begin(), distant_lights_.end(), std::back_inserter(emitters_));
return root_.get();
}
void LightTree::recursive_build(const Child child,
LightTreeNode *parent,
const int start,
const int end,
vector<LightTreePrimitive> *prims,
LightTreeEmitter *emitters,
const uint bit_trail,
const int depth)
{
BoundBox centroid_bounds = BoundBox::empty;
for (int i = start; i < end; i++) {
centroid_bounds.grow((*prims)[i].centroid);
if (progress_.get_cancel()) {
return;
}
parent->children[child] = create_node(LightTreePrimitivesMeasure::empty, bit_trail);
parent->children[child] = create_node(LightTreeMeasure::empty, bit_trail);
LightTreeNode *node = parent->children[child].get();
/* Find the best place to split the primitives into 2 nodes.
/* Find the best place to split the emitters into 2 nodes.
* If the best split cost is no better than making a leaf node, make a leaf instead. */
int split_dim = -1, middle;
if (should_split(*prims, start, middle, end, node->measure, centroid_bounds, split_dim)) {
if (should_split(emitters, start, middle, end, node->measure, split_dim)) {
if (split_dim != -1) {
/* Partition the primitives between start and end based on the centroids. */
std::nth_element(prims->begin() + start,
prims->begin() + middle,
prims->begin() + end,
[split_dim](const LightTreePrimitive &l, const LightTreePrimitive &r) {
/* Partition the emitters between start and end based on the centroids. */
std::nth_element(emitters + start,
emitters + middle,
emitters + end,
[split_dim](const LightTreeEmitter &l, const LightTreeEmitter &r) {
return l.centroid[split_dim] < r.centroid[split_dim];
});
}
/* Recursively build the left branch. */
if (middle - start > MIN_PRIMS_PER_THREAD) {
if (middle - start > MIN_EMITTERS_PER_THREAD) {
task_pool.push(
[=] { recursive_build(left, node, start, middle, prims, bit_trail, depth + 1); });
[=] { recursive_build(left, node, start, middle, emitters, bit_trail, depth + 1); });
}
else {
recursive_build(left, node, start, middle, prims, bit_trail, depth + 1);
recursive_build(left, node, start, middle, emitters, bit_trail, depth + 1);
}
/* Recursively build the right branch. */
if (end - middle > MIN_PRIMS_PER_THREAD) {
if (end - middle > MIN_EMITTERS_PER_THREAD) {
task_pool.push([=] {
recursive_build(right, node, middle, end, prims, bit_trail | (1u << depth), depth + 1);
recursive_build(right, node, middle, end, emitters, bit_trail | (1u << depth), depth + 1);
});
}
else {
recursive_build(right, node, middle, end, prims, bit_trail | (1u << depth), depth + 1);
recursive_build(right, node, middle, end, emitters, bit_trail | (1u << depth), depth + 1);
}
}
else {
@ -289,16 +362,27 @@ void LightTree::recursive_build(const Child child,
}
}
bool LightTree::should_split(const vector<LightTreePrimitive> &prims,
bool LightTree::should_split(LightTreeEmitter *emitters,
const int start,
int &middle,
const int end,
LightTreePrimitivesMeasure &measure,
const BoundBox &centroid_bbox,
LightTreeMeasure &measure,
int &split_dim)
{
const int num_emitters = end - start;
if (num_emitters == 1) {
/* Do not try to split if there is only one emitter. */
measure = (emitters + start)->measure;
return false;
}
middle = (start + end) / 2;
const int num_prims = end - start;
BoundBox centroid_bbox = BoundBox::empty;
for (int i = start; i < end; i++) {
centroid_bbox.grow((emitters + i)->centroid);
}
const float3 extent = centroid_bbox.size();
const float max_extent = max4(extent.x, extent.y, extent.z, 0.0f);
@ -313,18 +397,18 @@ bool LightTree::should_split(const vector<LightTreePrimitive> &prims,
const float inv_extent = 1 / (centroid_bbox.size()[dim]);
/* Fill in buckets with primitives. */
/* Fill in buckets with emitters. */
std::array<LightTreeBucket, LightTreeBucket::num_buckets> buckets;
for (int i = start; i < end; i++) {
const LightTreePrimitive &prim = prims[i];
const LightTreeEmitter *emitter = emitters + i;
/* Place primitive into the appropriate bucket, where the centroid box is split into equal
/* Place emitter into the appropriate bucket, where the centroid box is split into equal
* partitions. */
int bucket_idx = LightTreeBucket::num_buckets *
(prim.centroid[dim] - centroid_bbox.min[dim]) * inv_extent;
(emitter->centroid[dim] - centroid_bbox.min[dim]) * inv_extent;
bucket_idx = clamp(bucket_idx, 0, LightTreeBucket::num_buckets - 1);
buckets[bucket_idx].add(prim);
buckets[bucket_idx].add(*emitter);
}
/* Precompute the left bucket measure cumulatively. */
@ -338,12 +422,7 @@ bool LightTree::should_split(const vector<LightTreePrimitive> &prims,
/* Calculate node measure by summing up the bucket measure. */
measure = left_buckets.back().measure + buckets.back().measure;
/* Do not try to split if there are only one primitive. */
if (num_prims < 2) {
return false;
}
/* Degenerate case with co-located primitives. */
/* Degenerate case with co-located emitters. */
if (is_zero(centroid_bbox.size())) {
break;
}
@ -375,13 +454,12 @@ bool LightTree::should_split(const vector<LightTreePrimitive> &prims,
}
}
}
return min_cost < total_cost || num_prims > max_lights_in_leaf_;
return min_cost < total_cost || num_emitters > max_lights_in_leaf_;
}
__forceinline LightTreePrimitivesMeasure operator+(const LightTreePrimitivesMeasure &a,
const LightTreePrimitivesMeasure &b)
__forceinline LightTreeMeasure operator+(const LightTreeMeasure &a, const LightTreeMeasure &b)
{
LightTreePrimitivesMeasure c(a);
LightTreeMeasure c(a);
c.add(b);
return c;
}

View File

@ -58,25 +58,25 @@ OrientationBounds merge(const OrientationBounds &cone_a, const OrientationBounds
/* Light Tree uses the bounding box, the orientation bounding cone, and the energy of a cluster to
* compute the Surface Area Orientation Heuristic (SAOH). */
struct LightTreePrimitivesMeasure {
struct LightTreeMeasure {
BoundBox bbox = BoundBox::empty;
OrientationBounds bcone = OrientationBounds::empty;
float energy = 0.0f;
enum empty_t { empty = 0 };
__forceinline LightTreePrimitivesMeasure() = default;
__forceinline LightTreeMeasure() = default;
__forceinline LightTreePrimitivesMeasure(empty_t) {}
__forceinline LightTreeMeasure(empty_t) {}
__forceinline LightTreePrimitivesMeasure(const BoundBox &bbox,
const OrientationBounds &bcone,
const float &energy)
__forceinline LightTreeMeasure(const BoundBox &bbox,
const OrientationBounds &bcone,
const float &energy)
: bbox(bbox), bcone(bcone), energy(energy)
{
}
__forceinline LightTreePrimitivesMeasure(const LightTreePrimitivesMeasure &other)
__forceinline LightTreeMeasure(const LightTreeMeasure &other)
: bbox(other.bbox), bcone(other.bcone), energy(other.energy)
{
}
@ -86,7 +86,7 @@ struct LightTreePrimitivesMeasure {
return energy == 0;
}
__forceinline void add(const LightTreePrimitivesMeasure &measure)
__forceinline void add(const LightTreeMeasure &measure)
{
if (!measure.is_zero()) {
bbox.grow(measure.bbox);
@ -104,21 +104,20 @@ struct LightTreePrimitivesMeasure {
}
};
LightTreePrimitivesMeasure operator+(const LightTreePrimitivesMeasure &a,
const LightTreePrimitivesMeasure &b);
LightTreeMeasure operator+(const LightTreeMeasure &a, const LightTreeMeasure &b);
/* Light Tree Primitive
/* Light Tree Emitter
* Struct that indexes into the scene's triangle and light arrays. */
struct LightTreePrimitive {
struct LightTreeEmitter {
/* `prim_id >= 0` is an index into an object's local triangle index,
* otherwise `-prim_id-1`(`~prim`) is an index into device lights array. */
int prim_id;
int object_id;
float3 centroid;
LightTreePrimitivesMeasure measure;
LightTreeMeasure measure;
LightTreePrimitive(Scene *scene, int prim_id, int object_id);
LightTreeEmitter(Scene *scene, int prim_id, int object_id);
__forceinline bool is_triangle() const
{
@ -129,20 +128,20 @@ struct LightTreePrimitive {
/* Light Tree Bucket
* Struct used to determine splitting costs in the light BVH. */
struct LightTreeBucket {
LightTreePrimitivesMeasure measure;
LightTreeMeasure measure;
int count = 0;
static const int num_buckets = 12;
LightTreeBucket() = default;
LightTreeBucket(const LightTreePrimitivesMeasure &measure, const int &count)
LightTreeBucket(const LightTreeMeasure &measure, const int &count)
: measure(measure), count(count)
{
}
void add(const LightTreePrimitive &prim)
void add(const LightTreeEmitter &emitter)
{
measure.add(prim.measure);
measure.add(emitter.measure);
count++;
}
};
@ -151,34 +150,34 @@ LightTreeBucket operator+(const LightTreeBucket &a, const LightTreeBucket &b);
/* Light Tree Node */
struct LightTreeNode {
LightTreePrimitivesMeasure measure;
LightTreeMeasure measure;
uint bit_trail;
int num_prims = -1; /* The number of primitives a leaf node stores. A negative
number indicates it is an inner node. */
int first_prim_index; /* Leaf nodes contain an index to first primitive. */
int num_emitters = -1; /* The number of emitters a leaf node stores. A negative number indicates
it is an inner node. */
int first_emitter_index; /* Leaf nodes contain an index to first emitter. */
unique_ptr<LightTreeNode> children[2]; /* Inner node has two children. */
LightTreeNode() = default;
LightTreeNode(const LightTreePrimitivesMeasure &measure, const uint &bit_trial)
LightTreeNode(const LightTreeMeasure &measure, const uint &bit_trial)
: measure(measure), bit_trail(bit_trial)
{
}
__forceinline void add(const LightTreePrimitive &prim)
__forceinline void add(const LightTreeEmitter &emitter)
{
measure.add(prim.measure);
measure.add(emitter.measure);
}
void make_leaf(const int &first_prim_index, const int &num_prims)
void make_leaf(const int &first_emitter_index, const int &num_emitters)
{
this->first_prim_index = first_prim_index;
this->num_prims = num_prims;
this->first_emitter_index = first_emitter_index;
this->num_emitters = num_emitters;
}
__forceinline bool is_leaf() const
{
return num_prims >= 0;
return num_emitters >= 0;
}
};
@ -188,58 +187,65 @@ struct LightTreeNode {
* and considers additional orientation and energy information */
class LightTree {
unique_ptr<LightTreeNode> root_;
std::atomic<int> num_nodes_ = 0;
vector<LightTreeEmitter> emitters_;
vector<LightTreeEmitter> distant_lights_;
Progress &progress_;
uint max_lights_in_leaf_;
public:
std::atomic<int> num_nodes = 0;
size_t num_triangles = 0;
/* Left or right child of an inner node. */
enum Child {
left = 0,
right = 1,
};
LightTree(vector<LightTreePrimitive> &prims,
const int &num_distant_lights,
uint max_lights_in_leaf);
LightTree(Scene *scene, DeviceScene *dscene, Progress &progress, uint max_lights_in_leaf);
int size() const
{
return num_nodes_;
};
LightTreeNode *get_root() const
{
return root_.get();
};
/* Returns a pointer to the root node. */
LightTreeNode *build(Scene *scene, DeviceScene *dscene);
/* NOTE: Always use this function to create a new node so the number of nodes is in sync. */
unique_ptr<LightTreeNode> create_node(const LightTreePrimitivesMeasure &measure,
const uint &bit_trial)
unique_ptr<LightTreeNode> create_node(const LightTreeMeasure &measure, const uint &bit_trial)
{
num_nodes_++;
num_nodes++;
return make_unique<LightTreeNode>(measure, bit_trial);
}
size_t num_emitters()
{
return emitters_.size();
}
const LightTreeEmitter &get_emitter(int index) const
{
return emitters_.at(index);
}
private:
/* Thread. */
TaskPool task_pool;
/* Do not spawn a thread if less than this amount of primitives are to be processed. */
enum { MIN_PRIMS_PER_THREAD = 4096 };
/* Do not spawn a thread if less than this amount of emitters are to be processed. */
enum { MIN_EMITTERS_PER_THREAD = 4096 };
void recursive_build(Child child,
LightTreeNode *parent,
int start,
int end,
vector<LightTreePrimitive> *prims,
LightTreeEmitter *emitters,
uint bit_trail,
int depth);
bool should_split(const vector<LightTreePrimitive> &prims,
bool should_split(LightTreeEmitter *emitters,
const int start,
int &middle,
const int end,
LightTreePrimitivesMeasure &measure,
const BoundBox &centroid_bbox,
LightTreeMeasure &measure,
int &split_dim);
};

View File

@ -378,15 +378,6 @@ elseif(WITH_GHOST_X11 OR WITH_GHOST_WAYLAND)
unset(_name)
endmacro()
macro(generate_protocol_bindings_when_found PROT_DEF FOUND_VAR)
if(EXISTS ${PROT_DEF})
generate_protocol_bindings(${PROT_DEF})
set(${FOUND_VAR} TRUE)
else()
set(${FOUND_VAR} FALSE)
endif()
endmacro()
list(APPEND INC_SYS
${INC_DST}
)
@ -411,16 +402,13 @@ elseif(WITH_GHOST_X11 OR WITH_GHOST_WAYLAND)
"${WAYLAND_PROTOCOLS_DIR}/staging/xdg-activation/xdg-activation-v1.xml"
)
# Fractional scale.
generate_protocol_bindings_when_found(
generate_protocol_bindings(
"${WAYLAND_PROTOCOLS_DIR}/staging/fractional-scale/fractional-scale-v1.xml"
_has_fractional_scale
)
if(_has_fractional_scale)
# Viewport (only required when fractional scale is in use).
generate_protocol_bindings(
"${WAYLAND_PROTOCOLS_DIR}/stable/viewporter/viewporter.xml"
)
endif()
# Viewport (only required when fractional scale is in use).
generate_protocol_bindings(
"${WAYLAND_PROTOCOLS_DIR}/stable/viewporter/viewporter.xml"
)
# Pointer-constraints.
generate_protocol_bindings(
"${WAYLAND_PROTOCOLS_DIR}/unstable/pointer-constraints/pointer-constraints-unstable-v1.xml"
@ -445,12 +433,6 @@ elseif(WITH_GHOST_X11 OR WITH_GHOST_WAYLAND)
unset(INC_DST)
add_definitions(-DWITH_GHOST_WAYLAND)
if(_has_fractional_scale)
add_definitions(-DWITH_GHOST_WAYLAND_FRACTIONAL_SCALE)
endif()
unset(_has_fractional_scale)
if(NOT WITH_GHOST_WAYLAND_APP_ID STREQUAL "")
add_definitions(-DWITH_GHOST_WAYLAND_APP_ID=${WITH_GHOST_WAYLAND_APP_ID})
endif()

View File

@ -54,17 +54,15 @@
#include <xkbcommon/xkbcommon.h>
/* Generated by `wayland-scanner`. */
#include <fractional-scale-v1-client-protocol.h>
#include <pointer-constraints-unstable-v1-client-protocol.h>
#include <pointer-gestures-unstable-v1-client-protocol.h>
#include <primary-selection-unstable-v1-client-protocol.h>
#include <relative-pointer-unstable-v1-client-protocol.h>
#include <tablet-unstable-v2-client-protocol.h>
#include <viewporter-client-protocol.h>
#include <xdg-activation-v1-client-protocol.h>
#include <xdg-output-unstable-v1-client-protocol.h>
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
# include <fractional-scale-v1-client-protocol.h>
# include <viewporter-client-protocol.h>
#endif
/* Decorations `xdg_decor`. */
#include <xdg-decoration-unstable-v1-client-protocol.h>
@ -948,11 +946,8 @@ struct GWL_Display {
struct zwp_relative_pointer_manager_v1 *wp_relative_pointer_manager = nullptr;
struct zwp_primary_selection_device_manager_v1 *wp_primary_selection_device_manager = nullptr;
struct xdg_activation_v1 *xdg_activation_manager = nullptr;
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
struct wp_fractional_scale_manager_v1 *wp_fractional_scale_manager = nullptr;
struct wp_viewporter *wp_viewporter = nullptr;
#endif
struct zwp_pointer_constraints_v1 *wp_pointer_constraints = nullptr;
struct zwp_pointer_gestures_v1 *wp_pointer_gestures = nullptr;
@ -5184,8 +5179,6 @@ static void gwl_registry_xdg_activation_remove(GWL_Display *display,
/* #GWL_Display.wp_fractional_scale_manger */
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
static void gwl_registry_wp_fractional_scale_manager_add(GWL_Display *display,
const GWL_RegisteryAdd_Params *params)
{
@ -5221,8 +5214,6 @@ static void gwl_registry_wp_viewporter_remove(GWL_Display *display,
*value_p = nullptr;
}
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
/* #GWL_Display.wp_primary_selection_device_manager */
static void gwl_registry_wp_primary_selection_device_manager_add(
@ -5332,7 +5323,6 @@ static const GWL_RegistryHandler gwl_registry_handlers[] = {
/*update_fn*/ nullptr,
/*remove_fn*/ gwl_registry_xdg_activation_remove,
},
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
{
/*interface_p*/ &wp_fractional_scale_manager_v1_interface.name,
/*add_fn*/ gwl_registry_wp_fractional_scale_manager_add,
@ -5345,7 +5335,6 @@ static const GWL_RegistryHandler gwl_registry_handlers[] = {
/*update_fn*/ nullptr,
/*remove_fn*/ gwl_registry_wp_viewporter_remove,
},
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
/* Display outputs. */
{
/*interface_p*/ &wl_output_interface.name,
@ -6910,7 +6899,6 @@ struct xdg_activation_v1 *GHOST_SystemWayland::xdg_activation_manager()
return display_->xdg_activation_manager;
}
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
struct wp_fractional_scale_manager_v1 *GHOST_SystemWayland::wp_fractional_scale_manager()
{
return display_->wp_fractional_scale_manager;
@ -6919,7 +6907,6 @@ struct wp_viewporter *GHOST_SystemWayland::wp_viewporter()
{
return display_->wp_viewporter;
}
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
struct zwp_pointer_gestures_v1 *GHOST_SystemWayland::wp_pointer_gestures()
{

View File

@ -201,10 +201,8 @@ class GHOST_SystemWayland : public GHOST_System {
struct zwp_primary_selection_device_manager_v1 *wp_primary_selection_manager();
struct xdg_activation_v1 *xdg_activation_manager();
struct zwp_pointer_gestures_v1 *wp_pointer_gestures();
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
struct wp_fractional_scale_manager_v1 *wp_fractional_scale_manager();
struct wp_viewporter *wp_viewporter();
#endif
#ifdef WITH_GHOST_WAYLAND_LIBDECOR
libdecor *libdecor_context();

View File

@ -35,14 +35,12 @@
#endif
/* Generated by `wayland-scanner`. */
#include <fractional-scale-v1-client-protocol.h>
#include <viewporter-client-protocol.h>
#include <xdg-activation-v1-client-protocol.h>
#include <xdg-decoration-unstable-v1-client-protocol.h>
#include <xdg-shell-client-protocol.h>
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
# include <fractional-scale-v1-client-protocol.h>
# include <viewporter-client-protocol.h>
# define FRACTIONAL_DENOMINATOR 120
#endif
#define FRACTIONAL_DENOMINATOR 120
#include <atomic>
@ -105,21 +103,17 @@ struct GWL_WindowScaleParams {
wl_fixed_t gwl_window_scale_wl_fixed_to(const GWL_WindowScaleParams &scale_params,
wl_fixed_t value)
{
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (scale_params.is_fractional) {
return (value * scale_params.scale) / FRACTIONAL_DENOMINATOR;
}
#endif
return value * scale_params.scale;
}
wl_fixed_t gwl_window_scale_wl_fixed_from(const GWL_WindowScaleParams &scale_params,
wl_fixed_t value)
{
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (scale_params.is_fractional) {
return (value * FRACTIONAL_DENOMINATOR) / scale_params.scale;
}
#endif
return value / scale_params.scale;
}
@ -156,8 +150,15 @@ enum eGWL_PendingWindowActions {
* this window is visible on may have changed. Recalculate the windows scale.
*/
PENDING_OUTPUT_SCALE_UPDATE,
/**
* The surface needs a commit to run.
* Use this to avoid committing immediately which can cause flickering when other operations
* have not yet been performed - such as refreshing the window size.
*/
PENDING_WINDOW_SURFACE_COMMIT,
};
# define PENDING_NUM (PENDING_OUTPUT_SCALE_UPDATE + 1)
# define PENDING_NUM (PENDING_WINDOW_SURFACE_COMMIT + 1)
#endif /* USE_EVENT_BACKGROUND_THREAD */
@ -174,6 +175,8 @@ struct GWL_WindowFrame {
bool is_active = false;
/** Disable when the fractional scale is a whole number. */
int fractional_scale = 0;
/** The scale passed to #wl_surface_set_buffer_scale. */
int buffer_scale = 0;
};
struct GWL_Window {
@ -188,22 +191,15 @@ struct GWL_Window {
*/
std::vector<GWL_Output *> outputs;
/** The scale value written to #wl_surface_set_buffer_scale. */
int scale = 0;
/**
* The scale value to be used in the case fractional scale is disable.
* In general this should only be used when changing states
* (when disabling fractional scale).
*/
int scale_on_output = 0;
/** A temporary token used for the window to be notified of it's activation. */
struct xdg_activation_token_v1 *xdg_activation_token = nullptr;
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
struct wp_viewport *viewport = nullptr;
/**
* When set, only respond to the #wp_fractional_scale_v1_listener::preferred_scale callback
* and ignore updated scale based on #wl_surface_listener::enter & exit events.
*/
struct wp_fractional_scale_v1 *fractional_scale_handle = nullptr;
#endif
#ifdef WITH_GHOST_WAYLAND_LIBDECOR
WGL_LibDecor_Window *libdecor = nullptr;
@ -385,8 +381,6 @@ static bool gwl_window_state_set(GWL_Window *win, const GHOST_TWindowState state
* as the outputs scale may change at runtime, support creating & clearing the viewport as needed.
* \{ */
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
/**
* Scale a value from a viewport value to Wayland windowing.
* Scale down or not at all.
@ -438,9 +432,9 @@ static bool gwl_window_viewport_set(GWL_Window *win, bool *r_surface_needs_commi
}
/* Set the buffer scale to 1 since a viewport will be used. */
if (win->scale != 1) {
win->scale = 1;
wl_surface_set_buffer_scale(win->wl_surface, win->scale);
if (win->frame.buffer_scale != 1) {
win->frame.buffer_scale = 1;
wl_surface_set_buffer_scale(win->wl_surface, win->frame.buffer_scale);
if (r_surface_needs_commit) {
*r_surface_needs_commit = true;
}
@ -461,10 +455,10 @@ static bool gwl_window_viewport_unset(GWL_Window *win, bool *r_surface_needs_com
wp_viewport_destroy(win->viewport);
win->viewport = nullptr;
GHOST_ASSERT(win->scale == 1, "Unexpected scale!");
if (win->scale != win->scale_on_output) {
win->scale = win->scale_on_output;
wl_surface_set_buffer_scale(win->wl_surface, win->scale);
GHOST_ASSERT(win->frame.buffer_scale == 1, "Unexpected scale!");
if (win->frame_pending.buffer_scale != win->frame.buffer_scale) {
win->frame.buffer_scale = win->frame_pending.buffer_scale;
wl_surface_set_buffer_scale(win->wl_surface, win->frame.buffer_scale);
if (r_surface_needs_commit) {
*r_surface_needs_commit = true;
}
@ -492,8 +486,6 @@ static bool gwl_window_viewport_size_update(GWL_Window *win)
return true;
}
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
/** \} */
/* -------------------------------------------------------------------- */
@ -555,24 +547,35 @@ static void gwl_window_activate(GWL_Window *win)
/** \name Internal #GWL_Window Pending Actions
* \{ */
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
static void gwl_window_frame_pending_fractional_scale_set(GWL_Window *win,
bool *r_surface_needs_commit)
{
if (win->frame_pending.fractional_scale == win->frame.fractional_scale) {
if (win->frame_pending.fractional_scale == win->frame.fractional_scale &&
win->frame_pending.buffer_scale == win->frame.buffer_scale) {
return;
}
win->frame.fractional_scale = win->frame_pending.fractional_scale;
if (win->frame_pending.fractional_scale) {
win->frame.fractional_scale = win->frame_pending.fractional_scale;
gwl_window_viewport_set(win, r_surface_needs_commit);
gwl_window_viewport_size_update(win);
}
else {
gwl_window_viewport_unset(win, r_surface_needs_commit);
if (win->viewport) {
gwl_window_viewport_unset(win, r_surface_needs_commit);
}
else {
win->frame.buffer_scale = win->frame_pending.buffer_scale;
wl_surface_set_buffer_scale(win->wl_surface, win->frame.buffer_scale);
if (r_surface_needs_commit) {
*r_surface_needs_commit = true;
}
else {
wl_surface_commit(win->wl_surface);
}
}
}
}
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
static void gwl_window_frame_pending_size_set(GWL_Window *win, bool *r_surface_needs_commit)
{
@ -583,16 +586,13 @@ static void gwl_window_frame_pending_size_set(GWL_Window *win, bool *r_surface_n
win->frame.size[0] = win->frame_pending.size[0];
win->frame.size[1] = win->frame_pending.size[1];
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (win->frame_pending.fractional_scale != win->frame.fractional_scale) {
if (win->frame_pending.fractional_scale != win->frame.fractional_scale ||
win->frame_pending.buffer_scale != win->frame.buffer_scale) {
gwl_window_frame_pending_fractional_scale_set(win, r_surface_needs_commit);
}
else {
gwl_window_viewport_size_update(win);
}
#else
(void)r_surface_needs_commit;
#endif
wl_egl_window_resize(win->egl_window, UNPACK2(win->frame.size), 0, 0);
@ -614,23 +614,34 @@ static void gwl_window_pending_actions_tag(GWL_Window *win, enum eGWL_PendingWin
static void gwl_window_pending_actions_handle(GWL_Window *win)
{
if (win->pending_actions[PENDING_WINDOW_FRAME_CONFIGURE].exchange(false)) {
/* Ensure pending actions always use the state when the function starts
* because one actions may trigger other pending actions an in that case
* exact behavior depends on the order functions are called here.
* Without this, configuring the frame will trigger the surface
* commit immediately instead of the next time pending actions are handled. */
bool actions[PENDING_NUM];
for (size_t i = 0; i < ARRAY_SIZE(actions); i++) {
actions[i] = win->pending_actions[i].exchange(false);
}
if (actions[PENDING_WINDOW_FRAME_CONFIGURE]) {
gwl_window_frame_update_from_pending(win);
}
if (win->pending_actions[PENDING_EGL_WINDOW_RESIZE].exchange(false)) {
# ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (actions[PENDING_EGL_WINDOW_RESIZE]) {
gwl_window_viewport_size_update(win);
# endif
wl_egl_window_resize(win->egl_window, UNPACK2(win->frame.size), 0, 0);
}
# ifdef GHOST_OPENGL_ALPHA
if (win->pending_actions[PENDING_OPAQUE_SET].exchange(false)) {
if (actions[PENDING_OPAQUE_SET]) {
win->ghost_window->setOpaque();
}
# endif
if (win->pending_actions[PENDING_OUTPUT_SCALE_UPDATE].exchange(false)) {
if (actions[PENDING_OUTPUT_SCALE_UPDATE]) {
win->ghost_window->outputs_changed_update_scale();
}
if (actions[PENDING_WINDOW_SURFACE_COMMIT]) {
wl_surface_commit(win->wl_surface);
}
}
#endif /* USE_EVENT_BACKGROUND_THREAD */
@ -657,14 +668,17 @@ static void gwl_window_frame_update_from_pending_no_lock(GWL_Window *win)
}
}
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (win->frame_pending.fractional_scale != win->frame.fractional_scale) {
if (win->fractional_scale_handle) {
gwl_window_frame_pending_fractional_scale_set(win, &surface_needs_commit);
}
#endif
if (surface_needs_commit) {
#ifdef USE_EVENT_BACKGROUND_THREAD
/* Postponing the commit avoids flickering when moving between monitors of different scale. */
gwl_window_pending_actions_tag(win, PENDING_WINDOW_SURFACE_COMMIT);
#else
wl_surface_commit(win->wl_surface);
#endif
}
if (dpi_changed) {
@ -759,16 +773,13 @@ static void xdg_toplevel_handle_configure(void *data,
std::lock_guard lock_frame_guard{win->frame_pending_mutex};
#endif
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (win->frame.fractional_scale) {
win->frame_pending.size[0] = gwl_window_fractional_to_viewport_round(win->frame, width);
win->frame_pending.size[1] = gwl_window_fractional_to_viewport_round(win->frame, height);
}
else
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
{
win->frame_pending.size[0] = width * win->scale;
win->frame_pending.size[1] = height * win->scale;
else {
win->frame_pending.size[0] = width * win->frame.buffer_scale;
win->frame_pending.size[1] = height * win->frame.buffer_scale;
}
win->frame_pending.is_maximised = false;
@ -850,44 +861,50 @@ static const struct xdg_activation_token_v1_listener *xdg_activation_listener_ge
* Used by #gwl_window_activate.
* \{ */
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
static CLG_LogRef LOG_WL_FRACTIONAL_SCALE = {"ghost.wl.handle.fractional_scale"};
# define LOG (&LOG_WL_FRACTIONAL_SCALE)
#define LOG (&LOG_WL_FRACTIONAL_SCALE)
static void wp_fractional_scale_handle_preferred_scale(
void *data, struct wp_fractional_scale_v1 * /*wp_fractional_scale_v1*/, uint preferred_scale)
{
#ifdef USE_EVENT_BACKGROUND_THREAD
std::lock_guard lock_frame_guard{static_cast<GWL_Window *>(data)->frame_pending_mutex};
#endif
CLOG_INFO(LOG,
2,
"preferred_scale (preferred_scale=%.6f)",
double(preferred_scale) / FRACTIONAL_DENOMINATOR);
GWL_Window *win = static_cast<GWL_Window *>(data);
const bool is_fractional = (preferred_scale % FRACTIONAL_DENOMINATOR) != 0;
/* When non-fractional, never use fractional scaling! */
win->frame_pending.fractional_scale = is_fractional ? preferred_scale : 0;
win->frame_pending.buffer_scale = is_fractional ? 1 : preferred_scale / FRACTIONAL_DENOMINATOR;
if (win->frame.fractional_scale != win->frame_pending.fractional_scale) {
const int scale_prev = win->frame.fractional_scale ?
win->frame.fractional_scale :
win->frame.buffer_scale * FRACTIONAL_DENOMINATOR;
const int scale_next = preferred_scale;
if (scale_prev != scale_next) {
/* Resize the window failing to do so results in severe flickering with a
* multi-monitor setup when multiple monitors have different scales.
*
* NOTE: some flickering is still possible even when resizing this
* happens when dragging the right hand side of the title-bar in KDE
* as expanding changed the size on the RHS, this may be up to the compositor to fix. */
const int scale_prev = win->frame.fractional_scale ?
win->frame.fractional_scale :
win->scale_on_output * FRACTIONAL_DENOMINATOR;
const int scale_next = win->frame_pending.fractional_scale ?
win->frame_pending.fractional_scale :
win->scale_on_output * FRACTIONAL_DENOMINATOR;
for (size_t i = 0; i < ARRAY_SIZE(win->frame_pending.size); i++) {
const int value = win->frame_pending.size[i] ? win->frame_pending.size[i] :
win->frame.size[i];
win->frame_pending.size[i] = lroundf(value * (double(scale_next) / double(scale_prev)));
}
#ifdef USE_EVENT_BACKGROUND_THREAD
gwl_window_pending_actions_tag(win, PENDING_WINDOW_FRAME_CONFIGURE);
#else
gwl_window_frame_update_from_pending(win);
#endif
}
}
@ -895,9 +912,7 @@ static const struct wp_fractional_scale_v1_listener wp_fractional_scale_listener
/*preferred_scale*/ wp_fractional_scale_handle_preferred_scale,
};
# undef LOG
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
#undef LOG
/** \} */
@ -926,23 +941,20 @@ static void frame_handle_configure(struct libdecor_frame *frame,
int size_next[2];
{
GWL_Window *win = static_cast<GWL_Window *>(data);
const int scale = win->scale;
const int scale = win->frame.buffer_scale;
if (!libdecor_configuration_get_content_size(
configuration, frame, &size_next[0], &size_next[1])) {
size_next[0] = win->frame.size[0] / scale;
size_next[1] = win->frame.size[1] / scale;
}
# ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (win->frame.fractional_scale) {
win->frame_pending.size[0] = gwl_window_fractional_to_viewport_round(win->frame,
size_next[0]);
win->frame_pending.size[1] = gwl_window_fractional_to_viewport_round(win->frame,
size_next[1]);
}
else
# endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
{
else {
frame_pending->size[0] = size_next[0] * scale;
frame_pending->size[1] = size_next[1] * scale;
}
@ -1185,16 +1197,18 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
*
* Using the maximum scale is best as it results in the window first being smaller,
* avoiding a large window flashing before it's made smaller. */
window_->scale = outputs_max_scale_or_default(system_->outputs(), 1);
window_->scale_on_output = window_->scale;
window_->frame.buffer_scale = outputs_max_scale_or_default(system_->outputs(), 1);
window_->frame_pending.buffer_scale = window_->frame.buffer_scale;
window_->frame.size[0] = int32_t(width);
window_->frame.size[1] = int32_t(height);
/* The window surface must be rounded to the scale,
* failing to do so causes the WAYLAND-server to close the window immediately. */
window_->frame.size[0] = (window_->frame.size[0] / window_->scale) * window_->scale;
window_->frame.size[1] = (window_->frame.size[1] / window_->scale) * window_->scale;
window_->frame.size[0] = (window_->frame.size[0] / window_->frame.buffer_scale) *
window_->frame.buffer_scale;
window_->frame.size[1] = (window_->frame.size[1] / window_->frame.buffer_scale) *
window_->frame.buffer_scale;
window_->is_dialog = is_dialog;
@ -1202,14 +1216,13 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
window_->wl_surface = wl_compositor_create_surface(system_->wl_compositor());
ghost_wl_surface_tag(window_->wl_surface);
wl_surface_set_buffer_scale(window_->wl_surface, window_->scale);
wl_surface_set_buffer_scale(window_->wl_surface, window_->frame.buffer_scale);
wl_surface_add_listener(window_->wl_surface, &wl_surface_listener, window_);
window_->egl_window = wl_egl_window_create(
window_->wl_surface, int(window_->frame.size[0]), int(window_->frame.size[1]));
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
struct wp_fractional_scale_manager_v1 *fractional_scale_manager =
system->wp_fractional_scale_manager();
if (fractional_scale_manager) {
@ -1218,12 +1231,11 @@ GHOST_WindowWayland::GHOST_WindowWayland(GHOST_SystemWayland *system,
wp_fractional_scale_v1_add_listener(
window_->fractional_scale_handle, &wp_fractional_scale_listener, window_);
}
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
/* NOTE: The limit is in points (not pixels) so Hi-DPI will limit to larger number of pixels.
* This has the advantage that the size limit is the same when moving the window between monitors
* with different scales set. If it was important to limit in pixels it could be re-calculated
* when the `window_->scale` changed. */
* when the `window_->frame.buffer_scale` changed. */
const int32_t size_min[2] = {320, 240};
const char *xdg_app_id = GHOST_SystemWayland::xdg_app_id();
@ -1469,7 +1481,6 @@ GHOST_WindowWayland::~GHOST_WindowWayland()
window_->xdg_activation_token = nullptr;
}
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (window_->fractional_scale_handle) {
wp_fractional_scale_v1_destroy(window_->fractional_scale_handle);
window_->fractional_scale_handle = nullptr;
@ -1479,7 +1490,6 @@ GHOST_WindowWayland::~GHOST_WindowWayland()
wp_viewport_destroy(window_->viewport);
window_->viewport = nullptr;
}
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
#ifdef WITH_GHOST_WAYLAND_LIBDECOR
if (use_libdecor) {
@ -1510,13 +1520,11 @@ uint16_t GHOST_WindowWayland::getDPIHint()
/* No need to lock `server_mutex`
* (`outputs_changed_update_scale` never changes values in a non-main thread). */
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (window_->frame.fractional_scale) {
return gwl_window_fractional_to_viewport(window_->frame, base_dpi);
}
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
return window_->scale * base_dpi;
return window_->frame.buffer_scale * base_dpi;
}
GHOST_TSuccess GHOST_WindowWayland::setWindowCursorVisibility(bool visible)
@ -1686,7 +1694,7 @@ GHOST_Context *GHOST_WindowWayland::newDrawingContext(GHOST_TDrawingContextType
int GHOST_WindowWayland::scale() const
{
return window_->scale;
return window_->frame.buffer_scale;
}
const struct GWL_WindowScaleParams &GHOST_WindowWayland::scale_params() const
@ -1696,28 +1704,24 @@ const struct GWL_WindowScaleParams &GHOST_WindowWayland::scale_params() const
GWL_WindowScaleParams *scale_params = &window_->scale_params;
scale_params->is_fractional = (window_->frame.fractional_scale != 0);
scale_params->scale = scale_params->is_fractional ? window_->frame.fractional_scale :
window_->scale;
window_->frame.buffer_scale;
return *scale_params;
}
wl_fixed_t GHOST_WindowWayland::wl_fixed_from_window(wl_fixed_t value) const
{
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (window_->frame.fractional_scale) {
return gwl_window_fractional_from_viewport(window_->frame, value);
}
#endif
return value / window_->scale;
return value / window_->frame.buffer_scale;
}
wl_fixed_t GHOST_WindowWayland::wl_fixed_to_window(wl_fixed_t value) const
{
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (window_->frame.fractional_scale) {
return gwl_window_fractional_to_viewport(window_->frame, value);
}
#endif
return value * window_->scale;
return value * window_->frame.buffer_scale;
}
wl_surface *GHOST_WindowWayland::wl_surface() const
@ -1835,24 +1839,21 @@ bool GHOST_WindowWayland::outputs_changed_update_scale()
}
#endif
if (window_->fractional_scale_handle) {
/* Let the #wp_fractional_scale_v1_listener::preferred_scale callback handle
* changes to the windows scale. */
return false;
}
const int scale_next = outputs_max_scale_or_default(outputs(), 0);
if (UNLIKELY(scale_next == 0)) {
return false;
}
#ifdef WITH_GHOST_WAYLAND_FRACTIONAL_SCALE
if (window_->frame.fractional_scale) {
GHOST_ASSERT(window_->scale == 1, "Scale is expected to be 1 for fractional scaling!");
window_->scale_on_output = scale_next;
return false;
}
#endif /* WITH_GHOST_WAYLAND_FRACTIONAL_SCALE */
const int scale_curr = window_->scale;
const int scale_curr = window_->frame.buffer_scale;
bool changed = false;
if (scale_next != scale_curr) {
window_->scale = scale_next;
window_->frame.buffer_scale = scale_next;
wl_surface_set_buffer_scale(window_->wl_surface, scale_next);
#ifdef USE_EVENT_BACKGROUND_THREAD
@ -1881,9 +1882,6 @@ bool GHOST_WindowWayland::outputs_changed_update_scale()
changed = true;
}
/* Ensure both are always set. */
window_->scale_on_output = window_->scale;
return changed;
}

View File

@ -349,7 +349,7 @@ static bool addGPULut1D2D(OCIO_GPUTextures &textures,
}
GPU_texture_filter_mode(lut.texture, interpolation != INTERP_NEAREST);
GPU_texture_wrap_mode(lut.texture, false, true);
GPU_texture_extend_mode(lut.texture, GPU_SAMPLER_EXTEND_MODE_EXTEND);
lut.sampler_name = sampler_name;
@ -387,7 +387,7 @@ static bool addGPULut3D(OCIO_GPUTextures &textures,
}
GPU_texture_filter_mode(lut.texture, interpolation != INTERP_NEAREST);
GPU_texture_wrap_mode(lut.texture, false, true);
GPU_texture_extend_mode(lut.texture, GPU_SAMPLER_EXTEND_MODE_EXTEND);
lut.sampler_name = sampler_name;
@ -453,7 +453,7 @@ static bool createGPUCurveMapping(OCIO_GPUCurveMappping &curvemap,
curvemap.texture = GPU_texture_create_1d(
"OCIOCurveMap", lut_size, 1, GPU_RGBA16F, GPU_TEXTURE_USAGE_SHADER_READ, nullptr);
GPU_texture_filter_mode(curvemap.texture, false);
GPU_texture_wrap_mode(curvemap.texture, false, true);
GPU_texture_extend_mode(curvemap.texture, GPU_SAMPLER_EXTEND_MODE_EXTEND);
curvemap.buffer = GPU_uniformbuf_create(sizeof(OCIO_GPUCurveMappingParameters));

View File

@ -61,17 +61,6 @@ void Renderdoc::load()
RENDERDOC_GetAPI(eRENDERDOC_API_Version_1_1_2, (void **)&renderdoc_api_);
}
#endif
if (renderdoc_api_ != nullptr) {
int major;
int minor;
int patch;
renderdoc_api_->GetAPIVersion(&major, &minor, &patch);
std::cout << "Found renderdoc API [" << major << "." << minor << "." << patch << "]";
}
else {
std::cerr << "Unable to load renderdoc API.\n";
}
}
} // namespace renderdoc::api

View File

@ -753,7 +753,8 @@ def dump_py_messages(msgs, reports, addons, settings, addons_only=False):
return []
if os.path.isdir(path):
return [os.path.join(dpath, fn) for dpath, _, fnames in os.walk(path) for fn in fnames
if not fn.startswith("_") and fn.endswith(".py")]
if fn.endswith(".py") and (fn == "__init__.py"
or not fn.startswith("_"))]
return [path]
files = []

View File

@ -4,7 +4,7 @@ from bpy.types import Menu, Panel, UIList
from rna_prop_ui import PropertyPanel
from bpy.app.translations import (
pgettext_tip as iface_,
pgettext_iface as iface_,
pgettext_tip as tip_,
)

View File

@ -201,11 +201,12 @@ enum {
G_DEBUG_IO = (1 << 17), /* IO Debugging (for Collada, ...). */
G_DEBUG_GPU_FORCE_WORKAROUNDS = (1 << 18), /* force gpu workarounds bypassing detections. */
G_DEBUG_GPU_FORCE_DISABLE_SSBO = (1 << 19), /* force disabling usage of SSBO's */
G_DEBUG_XR = (1 << 20), /* XR/OpenXR messages */
G_DEBUG_XR_TIME = (1 << 21), /* XR/OpenXR timing messages */
G_DEBUG_GPU_RENDERDOC = (1 << 20), /* Enable Renderdoc integration. */
G_DEBUG_XR = (1 << 21), /* XR/OpenXR messages */
G_DEBUG_XR_TIME = (1 << 22), /* XR/OpenXR timing messages */
G_DEBUG_GHOST = (1 << 22), /* Debug GHOST module. */
G_DEBUG_WINTAB = (1 << 23), /* Debug Wintab. */
G_DEBUG_GHOST = (1 << 23), /* Debug GHOST module. */
G_DEBUG_WINTAB = (1 << 24), /* Debug Wintab. */
};
#define G_DEBUG_ALL \

View File

@ -437,7 +437,7 @@ static GPUTexture *image_get_gpu_texture(Image *ima,
ima->id.name + 2, ibuf_intern, use_high_bitdepth, store_premultiplied);
if (*tex) {
GPU_texture_wrap_mode(*tex, true, false);
GPU_texture_extend_mode(*tex, GPU_SAMPLER_EXTEND_MODE_REPEAT);
if (GPU_mipmap_enabled()) {
GPU_texture_update_mipmap_chain(*tex);

View File

@ -821,7 +821,7 @@ bool id_single_user(bContext *C, ID *id, PointerRNA *ptr, PropertyRNA *prop)
ID *newid = NULL;
PointerRNA idptr;
if (id) {
if (id && (ID_REAL_USERS(id) > 1)) {
/* If property isn't editable,
* we're going to have an extra block hanging around until we save. */
if (RNA_property_editable(ptr, prop)) {

View File

@ -2477,11 +2477,21 @@ void node_socket_move_default_value(Main &bmain,
}
break;
}
case SOCK_CUSTOM:
case SOCK_SHADER:
case SOCK_GEOMETRY: {
/* Unmovable types. */
return;
}
default: {
break;
}
}
if (dst_values.is_empty() || src_socket_value == nullptr) {
return;
}
for (ID **dst_value : dst_values) {
*dst_value = *src_socket_value;
id_us_plus(*dst_value);

View File

@ -487,7 +487,7 @@ static void studiolight_create_equirect_radiance_gputexture(StudioLight *sl)
ibuf->rect_float);
GPUTexture *tex = sl->equirect_radiance_gputexture;
GPU_texture_filter_mode(tex, true);
GPU_texture_wrap_mode(tex, true, true);
GPU_texture_extend_mode(tex, GPU_SAMPLER_EXTEND_MODE_REPEAT);
}
sl->flag |= STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE;
}
@ -548,7 +548,7 @@ static void studiolight_create_equirect_irradiance_gputexture(StudioLight *sl)
ibuf->rect_float);
GPUTexture *tex = sl->equirect_irradiance_gputexture;
GPU_texture_filter_mode(tex, true);
GPU_texture_wrap_mode(tex, true, true);
GPU_texture_extend_mode(tex, GPU_SAMPLER_EXTEND_MODE_REPEAT);
}
sl->flag |= STUDIOLIGHT_EQUIRECT_IRRADIANCE_GPUTEXTURE;
}

View File

@ -849,7 +849,6 @@ void blo_do_versions_pre250(FileData *fd, Library *lib, Main *bmain)
if (bmain->versionfile <= 224) {
bSound *sound;
Scene *sce;
Mesh *me;
bScreen *screen;
@ -869,10 +868,6 @@ void blo_do_versions_pre250(FileData *fd, Library *lib, Main *bmain)
}
}
for (sce = bmain->scenes.first; sce; sce = sce->id.next) {
sce->r.stereomode = 1; /* no stereo */
}
/* some oldfile patch, moved from set_func_space */
for (screen = bmain->screens.first; screen; screen = screen->id.next) {
ScrArea *area;

View File

@ -58,11 +58,16 @@ void RealizeOnDomainOperation::execute()
Interpolation::Bicubic);
GPU_texture_filter_mode(input.texture(), use_bilinear);
/* Make out-of-bound texture access return zero by clamping to border color. And make texture
* wrap appropriately if the input repeats. */
const bool repeats = input.get_realization_options().repeat_x ||
input.get_realization_options().repeat_y;
GPU_texture_wrap_mode(input.texture(), repeats, false);
/* If the input repeats, set a repeating wrap mode for out-of-bound texture access. Otherwise,
* make out-of-bound texture access return zero by setting a clamp to border extend mode. */
GPU_texture_extend_mode_x(input.texture(),
input.get_realization_options().repeat_x ?
GPU_SAMPLER_EXTEND_MODE_REPEAT :
GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER);
GPU_texture_extend_mode_y(input.texture(),
input.get_realization_options().repeat_y ?
GPU_SAMPLER_EXTEND_MODE_REPEAT :
GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER);
input.bind_as_texture(shader, "input_tx");
result.bind_as_image(shader, "domain_img");

View File

@ -329,8 +329,10 @@ int EEVEE_depth_of_field_init(EEVEE_ViewLayerData *UNUSED(sldata),
return 0;
}
#define WITH_FILTERING (GPU_SAMPLER_MIPMAP | GPU_SAMPLER_FILTER)
#define NO_FILTERING GPU_SAMPLER_MIPMAP
const static GPUSamplerState WITH_FILTERING = {GPU_SAMPLER_FILTERING_MIPMAP |
GPU_SAMPLER_FILTERING_LINEAR};
const static GPUSamplerState NO_FILTERING = {GPU_SAMPLER_FILTERING_MIPMAP};
#define COLOR_FORMAT fx->dof_color_format
#define FG_TILE_FORMAT GPU_RGBA16F
#define BG_TILE_FORMAT GPU_R11F_G11F_B10F

View File

@ -245,7 +245,8 @@ void EEVEE_effects_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *vedata)
DRW_PASS_CREATE(psl->color_downsample_ps, DRW_STATE_WRITE_COLOR);
grp = DRW_shgroup_create(EEVEE_shaders_effect_downsample_sh_get(), psl->color_downsample_ps);
DRW_shgroup_uniform_texture_ex(grp, "source", txl->filtered_radiance, GPU_SAMPLER_FILTER);
const GPUSamplerState sampler_state = {GPU_SAMPLER_FILTERING_LINEAR};
DRW_shgroup_uniform_texture_ex(grp, "source", txl->filtered_radiance, sampler_state);
DRW_shgroup_uniform_vec2(grp, "texelSize", e_data.texel_size, 1);
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
}

View File

@ -227,7 +227,9 @@ void EEVEE_lookdev_cache_init(EEVEE_Data *vedata,
if (probe_render) {
/* Avoid artifact with equirectangular mapping. */
eGPUSamplerState state = (GPU_SAMPLER_FILTER | GPU_SAMPLER_REPEAT_S);
GPUSamplerState state = {GPU_SAMPLER_FILTERING_LINEAR,
GPU_SAMPLER_EXTEND_MODE_REPEAT,
GPU_SAMPLER_EXTEND_MODE_EXTEND};
DRW_shgroup_uniform_float_copy(grp, "studioLightIntensity", shading->studiolight_intensity);
BKE_studiolight_ensure_flag(sl, STUDIOLIGHT_EQUIRECT_RADIANCE_GPUTEXTURE);
DRW_shgroup_uniform_texture_ex(grp, "studioLight", sl->equirect_radiance_gputexture, state);

View File

@ -162,7 +162,7 @@ void EEVEE_motion_blur_cache_init(EEVEE_ViewLayerData *UNUSED(sldata), EEVEE_Dat
}
{
DRW_PASS_CREATE(psl->motion_blur, DRW_STATE_WRITE_COLOR);
eGPUSamplerState state = 0;
const GPUSamplerState state = GPU_SAMPLER_DEFAULT;
int expand_steps = 1 + (max_ii(0, effects->motion_blur_max - 1) / EEVEE_VELOCITY_TILE_SIZE);
GPUTexture *tile_tx = (expand_steps & 1) ? effects->velocity_tiles_x_tx :
effects->velocity_tiles_tx;

View File

@ -150,7 +150,7 @@ void EEVEE_screen_raytrace_cache_init(EEVEE_ViewLayerData *sldata, EEVEE_Data *v
grp, "randomScale", effects->reflection_trace_full ? 0.0f : 0.5f);
DRW_shgroup_call_procedural_triangles(grp, NULL, 1);
eGPUSamplerState no_filter = GPU_SAMPLER_DEFAULT;
GPUSamplerState no_filter = GPU_SAMPLER_DEFAULT;
if (effects->use_split_ssr_pass) {
/* Prepare passes for split reflections resolve variant. */

View File

@ -191,7 +191,7 @@ void EEVEE_subsurface_add_pass(EEVEE_ViewLayerData *sldata,
DRW_shgroup_stencil_mask(shgrp, sss_id);
{
eGPUSamplerState state = GPU_SAMPLER_DEFAULT;
GPUSamplerState state = GPU_SAMPLER_DEFAULT;
DRWShadingGroup *grp = DRW_shgroup_create(EEVEE_shaders_subsurface_first_pass_sh_get(),
psl->sss_blur_ps);

View File

@ -446,7 +446,7 @@ void DepthOfField::hole_fill_pass_sync()
void DepthOfField::resolve_pass_sync()
{
eGPUSamplerState with_filter = GPU_SAMPLER_FILTER;
GPUSamplerState with_filter = {GPU_SAMPLER_FILTERING_LINEAR};
RenderBuffers &render_buffers = inst_.render_buffers;
eShaderType sh_type = use_bokeh_lut_ ? DOF_RESOLVE_LUT : DOF_RESOLVE;

View File

@ -45,8 +45,9 @@ class DepthOfField {
class Instance &inst_;
/** Samplers */
static constexpr eGPUSamplerState gather_bilinear = GPU_SAMPLER_MIPMAP | GPU_SAMPLER_FILTER;
static constexpr eGPUSamplerState gather_nearest = GPU_SAMPLER_MIPMAP;
static constexpr GPUSamplerState gather_bilinear = {GPU_SAMPLER_FILTERING_MIPMAP |
GPU_SAMPLER_FILTERING_LINEAR};
static constexpr GPUSamplerState gather_nearest = {GPU_SAMPLER_FILTERING_MIPMAP};
/** Input/Output texture references. */
GPUTexture *input_color_tx_ = nullptr;

View File

@ -416,7 +416,7 @@ void Film::sync()
RenderBuffers &rbuffers = inst_.render_buffers;
VelocityModule &velocity = inst_.velocity;
eGPUSamplerState filter = GPU_SAMPLER_FILTER;
GPUSamplerState filter = {GPU_SAMPLER_FILTERING_LINEAR};
/* For viewport, only previous motion is supported.
* Still bind previous step to avoid undefined behavior. */

View File

@ -132,7 +132,7 @@ void MotionBlurModule::sync()
return;
}
eGPUSamplerState no_filter = GPU_SAMPLER_DEFAULT;
GPUSamplerState no_filter = GPUSamplerState::default_sampler();
RenderBuffers &render_buffers = inst_.render_buffers;
motion_blur_ps_.init();

View File

@ -26,8 +26,8 @@ class ShadowPunctual;
using namespace draw;
constexpr eGPUSamplerState no_filter = GPU_SAMPLER_DEFAULT;
constexpr eGPUSamplerState with_filter = GPU_SAMPLER_FILTER;
constexpr GPUSamplerState no_filter = GPUSamplerState::default_sampler();
constexpr GPUSamplerState with_filter = {GPU_SAMPLER_FILTERING_LINEAR};
#endif

View File

@ -198,7 +198,8 @@ template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractD
for (const TextureInfo &info : instance_data->texture_infos) {
DRWShadingGroup *shgrp_sub = DRW_shgroup_create_sub(shgrp);
DRW_shgroup_uniform_ivec2_copy(shgrp_sub, "offset", info.offset());
DRW_shgroup_uniform_texture_ex(shgrp_sub, "imageTexture", info.texture, GPU_SAMPLER_DEFAULT);
DRW_shgroup_uniform_texture_ex(
shgrp_sub, "imageTexture", info.texture, GPUSamplerState::default_sampler());
DRW_shgroup_call_obmat(shgrp_sub, info.batch, image_mat);
}
}

View File

@ -77,7 +77,8 @@ GPU_SHADER_CREATE_INFO(workbench_next_resolve_curvature)
GPU_SHADER_CREATE_INFO(workbench_next_resolve_cavity)
.define("WORKBENCH_CAVITY")
/* TODO(@pragma37): GPU_SAMPLER_REPEAT is set in CavityEffect, it doesn't work here? */
/* TODO(@pragma37): GPU_SAMPLER_EXTEND_MODE_REPEAT is set in CavityEffect, it doesn't work
here? */
.sampler(8, ImageType::FLOAT_2D, "jitter_tx")
.uniform_buf(5, "vec4", "cavity_samples[512]");

View File

@ -79,7 +79,11 @@ void CavityEffect::setup_resolve_pass(PassSimple &pass, SceneResources &resource
{
if (cavity_enabled_) {
pass.bind_ubo("cavity_samples", samples_buf);
pass.bind_texture("jitter_tx", &resources.jitter_tx, eGPUSamplerState::GPU_SAMPLER_REPEAT);
pass.bind_texture("jitter_tx",
&resources.jitter_tx,
{GPU_SAMPLER_FILTERING_DEFAULT,
GPU_SAMPLER_EXTEND_MODE_REPEAT,
GPU_SAMPLER_EXTEND_MODE_REPEAT});
}
if (curvature_enabled_) {
pass.bind_texture("object_id_tx", &resources.object_id_tx);

View File

@ -173,7 +173,7 @@ void DofPass::sync(SceneResources &resources)
return;
}
eGPUSamplerState sampler_state = GPU_SAMPLER_FILTER | GPU_SAMPLER_MIPMAP;
GPUSamplerState sampler_state = {GPU_SAMPLER_FILTERING_LINEAR | GPU_SAMPLER_FILTERING_MIPMAP};
down_ps_.init();
down_ps_.state_set(DRW_STATE_WRITE_COLOR);

View File

@ -144,8 +144,12 @@ static void workbench_cache_texpaint_populate(WORKBENCH_PrivateData *wpd, Object
struct GPUBatch *geom = DRW_cache_mesh_surface_texpaint_single_get(ob);
if (geom) {
Image *ima = imapaint->canvas;
eGPUSamplerState state = GPU_SAMPLER_REPEAT;
SET_FLAG_FROM_TEST(state, imapaint->interp == IMAGEPAINT_INTERP_LINEAR, GPU_SAMPLER_FILTER);
const GPUSamplerFiltering filtering = imapaint->interp == IMAGEPAINT_INTERP_LINEAR ?
GPU_SAMPLER_FILTERING_LINEAR :
GPU_SAMPLER_FILTERING_DEFAULT;
GPUSamplerState state = {
filtering, GPU_SAMPLER_EXTEND_MODE_REPEAT, GPU_SAMPLER_EXTEND_MODE_REPEAT};
DRWShadingGroup *grp = workbench_image_setup(wpd, ob, 0, ima, NULL, state);
workbench_object_drawcall(grp, geom, ob);
@ -159,7 +163,8 @@ static void workbench_cache_texpaint_populate(WORKBENCH_PrivateData *wpd, Object
if (geoms[i] == NULL) {
continue;
}
DRWShadingGroup *grp = workbench_image_setup(wpd, ob, i + 1, NULL, NULL, 0);
DRWShadingGroup *grp = workbench_image_setup(
wpd, ob, i + 1, NULL, NULL, GPU_SAMPLER_DEFAULT);
workbench_object_drawcall(grp, geoms[i], ob);
}
}
@ -224,8 +229,9 @@ static void workbench_cache_hair_populate(WORKBENCH_PrivateData *wpd,
const ImagePaintSettings *imapaint = use_texpaint_mode ? &scene->toolsettings->imapaint : NULL;
Image *ima = (imapaint && imapaint->mode == IMAGEPAINT_MODE_IMAGE) ? imapaint->canvas : NULL;
eGPUSamplerState state = 0;
state |= (imapaint && imapaint->interp == IMAGEPAINT_INTERP_LINEAR) ? GPU_SAMPLER_FILTER : 0;
GPUSamplerState state = {imapaint && imapaint->interp == IMAGEPAINT_INTERP_LINEAR ?
GPU_SAMPLER_FILTERING_LINEAR :
GPU_SAMPLER_FILTERING_DEFAULT};
DRWShadingGroup *grp = (use_texpaint_mode) ?
workbench_image_hair_setup(wpd, ob, matnr, ima, NULL, state) :
workbench_material_hair_setup(wpd, ob, matnr, color_type);

View File

@ -217,7 +217,7 @@ class Instance {
::Image *image = nullptr;
ImageUser *iuser = nullptr;
eGPUSamplerState sampler_state = eGPUSamplerState::GPU_SAMPLER_DEFAULT;
GPUSamplerState sampler_state = GPUSamplerState::default_sampler();
if (object_state.color_type == V3D_SHADING_TEXTURE_COLOR) {
get_material_image(ob_ref.object, i + 1, image, iuser, sampler_state);
}
@ -284,7 +284,7 @@ class Instance {
GPUBatch *batch,
ResourceHandle handle,
::Image *image = nullptr,
eGPUSamplerState sampler_state = GPU_SAMPLER_DEFAULT,
GPUSamplerState sampler_state = GPUSamplerState::default_sampler(),
ImageUser *iuser = nullptr)
{
const bool in_front = (ob_ref.object->dtx & OB_DRAW_IN_FRONT) != 0;

View File

@ -86,10 +86,10 @@ BLI_INLINE Material *workbench_object_material_get(Object *ob, int mat_nr)
}
BLI_INLINE void workbench_material_get_image(
Object *ob, int mat_nr, Image **r_image, ImageUser **r_iuser, eGPUSamplerState *r_sampler)
Object *ob, int mat_nr, Image **r_image, ImageUser **r_iuser, GPUSamplerState *r_sampler)
{
const bNode *node;
*r_sampler = eGPUSamplerState(0);
*r_sampler = GPUSamplerState::default_sampler();
ED_object_get_active_image(ob, mat_nr, r_image, r_iuser, &node, nullptr);
if (node && *r_image) {
@ -97,19 +97,32 @@ BLI_INLINE void workbench_material_get_image(
case SH_NODE_TEX_IMAGE: {
const NodeTexImage *storage = static_cast<NodeTexImage *>(node->storage);
const bool use_filter = (storage->interpolation != SHD_INTERP_CLOSEST);
const bool use_mirror = (storage->extension == SHD_IMAGE_EXTENSION_MIRROR);
const bool use_repeat = use_mirror || (storage->extension == SHD_IMAGE_EXTENSION_REPEAT);
const bool use_clip = (storage->extension == SHD_IMAGE_EXTENSION_CLIP);
SET_FLAG_FROM_TEST(*r_sampler, use_filter, GPU_SAMPLER_FILTER);
SET_FLAG_FROM_TEST(*r_sampler, use_repeat, GPU_SAMPLER_REPEAT);
SET_FLAG_FROM_TEST(*r_sampler, use_clip, GPU_SAMPLER_CLAMP_BORDER);
SET_FLAG_FROM_TEST(*r_sampler, use_mirror, GPU_SAMPLER_MIRROR_REPEAT);
r_sampler->set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter);
switch (storage->extension) {
case SHD_IMAGE_EXTENSION_EXTEND:
default:
r_sampler->extend_x = GPU_SAMPLER_EXTEND_MODE_EXTEND;
r_sampler->extend_yz = GPU_SAMPLER_EXTEND_MODE_EXTEND;
break;
case SHD_IMAGE_EXTENSION_REPEAT:
r_sampler->extend_x = GPU_SAMPLER_EXTEND_MODE_REPEAT;
r_sampler->extend_yz = GPU_SAMPLER_EXTEND_MODE_REPEAT;
break;
case SHD_IMAGE_EXTENSION_MIRROR:
r_sampler->extend_x = GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT;
r_sampler->extend_yz = GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT;
break;
case SHD_IMAGE_EXTENSION_CLIP:
r_sampler->extend_x = GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER;
r_sampler->extend_yz = GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER;
break;
}
break;
}
case SH_NODE_TEX_ENVIRONMENT: {
const NodeTexEnvironment *storage = static_cast<NodeTexEnvironment *>(node->storage);
const bool use_filter = (storage->interpolation != SHD_INTERP_CLOSEST);
SET_FLAG_FROM_TEST(*r_sampler, use_filter, GPU_SAMPLER_FILTER);
r_sampler->set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter);
break;
}
default:
@ -157,7 +170,7 @@ DRWShadingGroup *workbench_material_setup_ex(WORKBENCH_PrivateData *wpd,
{
Image *ima = nullptr;
ImageUser *iuser = nullptr;
eGPUSamplerState sampler;
GPUSamplerState sampler;
const bool infront = (ob->dtx & OB_DRAW_IN_FRONT) != 0;
if (color_type == V3D_SHADING_TEXTURE_COLOR) {
@ -240,7 +253,7 @@ DRWShadingGroup *workbench_image_setup_ex(WORKBENCH_PrivateData *wpd,
int mat_nr,
Image *ima,
ImageUser *iuser,
eGPUSamplerState sampler,
GPUSamplerState sampler,
eWORKBENCH_DataType datatype)
{
GPUTexture *tex = nullptr, *tex_tile_data = nullptr;

View File

@ -61,7 +61,7 @@ void get_material_image(Object *ob,
int material_index,
::Image *&image,
ImageUser *&iuser,
eGPUSamplerState &sampler_state)
GPUSamplerState &sampler_state)
{
const ::bNode *node = nullptr;
@ -71,19 +71,32 @@ void get_material_image(Object *ob,
case SH_NODE_TEX_IMAGE: {
const NodeTexImage *storage = static_cast<NodeTexImage *>(node->storage);
const bool use_filter = (storage->interpolation != SHD_INTERP_CLOSEST);
const bool use_mirror = (storage->extension == SHD_IMAGE_EXTENSION_MIRROR);
const bool use_repeat = use_mirror || (storage->extension == SHD_IMAGE_EXTENSION_REPEAT);
const bool use_clip = (storage->extension == SHD_IMAGE_EXTENSION_CLIP);
SET_FLAG_FROM_TEST(sampler_state, use_filter, GPU_SAMPLER_FILTER);
SET_FLAG_FROM_TEST(sampler_state, use_repeat, GPU_SAMPLER_REPEAT);
SET_FLAG_FROM_TEST(sampler_state, use_clip, GPU_SAMPLER_CLAMP_BORDER);
SET_FLAG_FROM_TEST(sampler_state, use_mirror, GPU_SAMPLER_MIRROR_REPEAT);
sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter);
switch (storage->extension) {
case SHD_IMAGE_EXTENSION_EXTEND:
default:
sampler_state.extend_x = GPU_SAMPLER_EXTEND_MODE_EXTEND;
sampler_state.extend_yz = GPU_SAMPLER_EXTEND_MODE_EXTEND;
break;
case SHD_IMAGE_EXTENSION_REPEAT:
sampler_state.extend_x = GPU_SAMPLER_EXTEND_MODE_REPEAT;
sampler_state.extend_yz = GPU_SAMPLER_EXTEND_MODE_REPEAT;
break;
case SHD_IMAGE_EXTENSION_MIRROR:
sampler_state.extend_x = GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT;
sampler_state.extend_yz = GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT;
break;
case SHD_IMAGE_EXTENSION_CLIP:
sampler_state.extend_x = GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER;
sampler_state.extend_yz = GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER;
break;
}
break;
}
case SH_NODE_TEX_ENVIRONMENT: {
const NodeTexEnvironment *storage = static_cast<NodeTexEnvironment *>(node->storage);
const bool use_filter = (storage->interpolation != SHD_INTERP_CLOSEST);
SET_FLAG_FROM_TEST(sampler_state, use_filter, GPU_SAMPLER_FILTER);
sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter);
break;
}
default:

View File

@ -60,7 +60,7 @@ void MeshPass::draw(ObjectRef &ref,
ResourceHandle handle,
uint material_index,
::Image *image /* = nullptr */,
eGPUSamplerState sampler_state /* = GPU_SAMPLER_DEFAULT */,
GPUSamplerState sampler_state /* = GPUSamplerState::default_sampler() */,
ImageUser *iuser /* = nullptr */)
{
is_empty_ = false;

View File

@ -499,7 +499,7 @@ DRWShadingGroup *workbench_image_setup_ex(WORKBENCH_PrivateData *wpd,
int mat_nr,
Image *ima,
ImageUser *iuser,
eGPUSamplerState sampler,
GPUSamplerState sampler,
eWORKBENCH_DataType datatype);
#define WORKBENCH_OBJECT_DATATYPE(ob) \

View File

@ -57,7 +57,7 @@ void get_material_image(Object *ob,
int material_index,
::Image *&image,
ImageUser *&iuser,
eGPUSamplerState &sampler_state);
GPUSamplerState &sampler_state);
struct SceneState {
Scene *scene = nullptr;
@ -106,7 +106,7 @@ struct ObjectState {
bool sculpt_pbvh = false;
bool texture_paint_mode = false;
::Image *image_paint_override = nullptr;
eGPUSamplerState override_sampler_state = GPU_SAMPLER_DEFAULT;
GPUSamplerState override_sampler_state = GPUSamplerState::default_sampler();
bool draw_shadow = false;
bool use_per_material_batches = false;
@ -186,7 +186,7 @@ class MeshPass : public PassMain {
ResourceHandle handle,
uint material_index,
::Image *image = nullptr,
eGPUSamplerState sampler_state = eGPUSamplerState::GPU_SAMPLER_DEFAULT,
GPUSamplerState sampler_state = GPUSamplerState::default_sampler(),
ImageUser *iuser = nullptr);
};

View File

@ -199,7 +199,7 @@ ObjectState::ObjectState(const SceneState &scene_state, Object *ob)
sculpt_pbvh = false;
texture_paint_mode = false;
image_paint_override = nullptr;
override_sampler_state = GPU_SAMPLER_DEFAULT;
override_sampler_state = GPUSamplerState::default_sampler();
draw_shadow = false;
const DRWContextState *draw_ctx = DRW_context_state_get();
@ -283,10 +283,11 @@ ObjectState::ObjectState(const SceneState &scene_state, Object *ob)
const ImagePaintSettings *imapaint = &scene_state.scene->toolsettings->imapaint;
if (imapaint->mode == IMAGEPAINT_MODE_IMAGE) {
image_paint_override = imapaint->canvas;
override_sampler_state = GPU_SAMPLER_REPEAT;
SET_FLAG_FROM_TEST(override_sampler_state,
imapaint->interp == IMAGEPAINT_INTERP_LINEAR,
GPU_SAMPLER_FILTER);
override_sampler_state.extend_x = GPU_SAMPLER_EXTEND_MODE_REPEAT;
override_sampler_state.extend_yz = GPU_SAMPLER_EXTEND_MODE_REPEAT;
const bool use_linear_filter = imapaint->interp == IMAGEPAINT_INTERP_LINEAR;
override_sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR,
use_linear_filter);
}
}
}

View File

@ -534,11 +534,11 @@ void DRW_shgroup_clear_framebuffer(DRWShadingGroup *shgroup,
void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup,
const char *name,
const struct GPUTexture *tex,
eGPUSamplerState sampler_state);
GPUSamplerState sampler_state);
void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup,
const char *name,
GPUTexture **tex,
eGPUSamplerState sampler_state);
GPUSamplerState sampler_state);
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup,
const char *name,
const struct GPUTexture *tex);

View File

@ -318,7 +318,7 @@ static DRWVolumeGrid *volume_grid_cache_get(const Volume *volume,
* GL_MAX_3D_TEXTURE_SIZE. */
if (cache_grid->texture != nullptr) {
GPU_texture_swizzle_set(cache_grid->texture, (channels == 3) ? "rgb1" : "rrr1");
GPU_texture_wrap_mode(cache_grid->texture, false, false);
GPU_texture_extend_mode(cache_grid->texture, GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER);
BKE_volume_dense_float_grid_clear(&dense_grid);
}
else {

View File

@ -262,8 +262,7 @@ std::string ResourceBind::serialize() const
switch (type) {
case Type::Sampler:
return std::string(".bind_texture") + (is_reference ? "_ref" : "") + "(" +
std::to_string(slot) +
(sampler != GPU_SAMPLER_MAX ? ", sampler=" + std::to_string(sampler) : "") + ")";
std::to_string(slot) + ", sampler=" + sampler.to_string() + ")";
case Type::BufferSampler:
return std::string(".bind_vertbuf_as_texture") + (is_reference ? "_ref" : "") + "(" +
std::to_string(slot) + ")";

View File

@ -134,7 +134,7 @@ struct FramebufferBind {
};
struct ResourceBind {
eGPUSamplerState sampler;
GPUSamplerState sampler;
int slot;
bool is_reference;
@ -191,9 +191,9 @@ struct ResourceBind {
: slot(slot_), is_reference(false), type(Type::Image), texture(draw::as_texture(res)){};
ResourceBind(int slot_, draw::Image **res)
: slot(slot_), is_reference(true), type(Type::Image), texture_ref(draw::as_texture(res)){};
ResourceBind(int slot_, GPUTexture *res, eGPUSamplerState state)
ResourceBind(int slot_, GPUTexture *res, GPUSamplerState state)
: sampler(state), slot(slot_), is_reference(false), type(Type::Sampler), texture(res){};
ResourceBind(int slot_, GPUTexture **res, eGPUSamplerState state)
ResourceBind(int slot_, GPUTexture **res, GPUSamplerState state)
: sampler(state), slot(slot_), is_reference(true), type(Type::Sampler), texture_ref(res){};
ResourceBind(int slot_, GPUVertBuf *res)
: slot(slot_), is_reference(false), type(Type::BufferSampler), vertex_buf(res){};

View File

@ -362,7 +362,7 @@ struct DRWUniform {
GPUTexture *texture;
GPUTexture **texture_ref;
};
eGPUSamplerState sampler_state;
GPUSamplerState sampler_state;
};
/* DRW_UNIFORM_BLOCK */
union {

View File

@ -169,7 +169,7 @@ static void drw_shgroup_uniform_create_ex(DRWShadingGroup *shgroup,
int loc,
DRWUniformType type,
const void *value,
eGPUSamplerState sampler_state,
GPUSamplerState sampler_state,
int length,
int arraysize)
{
@ -247,13 +247,13 @@ static void drw_shgroup_uniform(DRWShadingGroup *shgroup,
DRW_UNIFORM_TEXTURE_REF));
int location = GPU_shader_get_uniform(shgroup->shader, name);
drw_shgroup_uniform_create_ex(
shgroup, location, type, value, GPU_SAMPLER_DEFAULT, length, arraysize);
shgroup, location, type, value, GPUSamplerState::default_sampler(), length, arraysize);
}
void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup,
const char *name,
const GPUTexture *tex,
eGPUSamplerState sampler_state)
GPUSamplerState sampler_state)
{
BLI_assert(tex != nullptr);
int loc = GPU_shader_get_sampler_binding(shgroup->shader, name);
@ -262,13 +262,13 @@ void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup,
void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
{
DRW_shgroup_uniform_texture_ex(shgroup, name, tex, GPU_SAMPLER_MAX);
DRW_shgroup_uniform_texture_ex(shgroup, name, tex, GPUSamplerState::internal_sampler());
}
void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup,
const char *name,
GPUTexture **tex,
eGPUSamplerState sampler_state)
GPUSamplerState sampler_state)
{
BLI_assert(tex != nullptr);
int loc = GPU_shader_get_sampler_binding(shgroup->shader, name);
@ -277,14 +277,15 @@ void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup,
void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
{
DRW_shgroup_uniform_texture_ref_ex(shgroup, name, tex, GPU_SAMPLER_MAX);
DRW_shgroup_uniform_texture_ref_ex(shgroup, name, tex, GPUSamplerState::internal_sampler());
}
void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex)
{
BLI_assert(tex != nullptr);
int loc = GPU_shader_get_sampler_binding(shgroup->shader, name);
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE, tex, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(
shgroup, loc, DRW_UNIFORM_IMAGE, tex, GPUSamplerState::default_sampler(), 0, 1);
}
void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex)
@ -292,7 +293,7 @@ void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, G
BLI_assert(tex != nullptr);
int loc = GPU_shader_get_sampler_binding(shgroup->shader, name);
drw_shgroup_uniform_create_ex(
shgroup, loc, DRW_UNIFORM_IMAGE_REF, tex, GPU_SAMPLER_DEFAULT, 0, 1);
shgroup, loc, DRW_UNIFORM_IMAGE_REF, tex, GPUSamplerState::default_sampler(), 0, 1);
}
void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup,
@ -313,7 +314,8 @@ void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup,
#endif
return;
}
drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK, ubo, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(
shgroup, loc, DRW_UNIFORM_BLOCK, ubo, GPUSamplerState::default_sampler(), 0, 1);
}
void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup,
@ -335,7 +337,7 @@ void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup,
return;
}
drw_shgroup_uniform_create_ex(
shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, GPU_SAMPLER_DEFAULT, 0, 1);
shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, GPUSamplerState::default_sampler(), 0, 1);
}
void DRW_shgroup_storage_block_ex(DRWShadingGroup *shgroup,
@ -358,7 +360,7 @@ void DRW_shgroup_storage_block_ex(DRWShadingGroup *shgroup,
return;
}
drw_shgroup_uniform_create_ex(
shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK, ssbo, GPU_SAMPLER_DEFAULT, 0, 1);
shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK, ssbo, GPUSamplerState::default_sampler(), 0, 1);
}
void DRW_shgroup_storage_block_ref_ex(DRWShadingGroup *shgroup,
@ -381,7 +383,7 @@ void DRW_shgroup_storage_block_ref_ex(DRWShadingGroup *shgroup,
return;
}
drw_shgroup_uniform_create_ex(
shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK_REF, ssbo, GPU_SAMPLER_DEFAULT, 0, 1);
shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK_REF, ssbo, GPUSamplerState::default_sampler(), 0, 1);
}
void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup,
@ -530,8 +532,13 @@ void DRW_shgroup_uniform_mat4_copy(DRWShadingGroup *shgroup,
* and array-size used to determine the number of elements
* copied in draw_update_uniforms. */
for (int i = 0; i < 4; i++) {
drw_shgroup_uniform_create_ex(
shgroup, location, DRW_UNIFORM_FLOAT_COPY, &value[i], GPU_SAMPLER_DEFAULT, 4, 4);
drw_shgroup_uniform_create_ex(shgroup,
location,
DRW_UNIFORM_FLOAT_COPY,
&value[i],
GPUSamplerState::default_sampler(),
4,
4);
}
}
@ -555,7 +562,7 @@ void DRW_shgroup_vertex_buffer_ex(DRWShadingGroup *shgroup,
location,
DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE,
vertex_buffer,
GPU_SAMPLER_DEFAULT,
GPUSamplerState::default_sampler(),
0,
1);
}
@ -580,7 +587,7 @@ void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup,
location,
DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF,
vertex_buffer,
GPU_SAMPLER_DEFAULT,
GPUSamplerState::default_sampler(),
0,
1);
}
@ -597,7 +604,7 @@ void DRW_shgroup_buffer_texture(DRWShadingGroup *shgroup,
location,
DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE,
vertex_buffer,
GPU_SAMPLER_DEFAULT,
GPUSamplerState::default_sampler(),
0,
1);
}
@ -614,7 +621,7 @@ void DRW_shgroup_buffer_texture_ref(DRWShadingGroup *shgroup,
location,
DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF,
vertex_buffer,
GPU_SAMPLER_DEFAULT,
GPUSamplerState::default_sampler(),
0,
1);
}
@ -698,7 +705,7 @@ static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob)
drw_call_calc_orco(ob, ob_infos->orcotexfac);
/* Random float value. */
uint random = (DST.dupli_source) ?
DST.dupli_source->random_id :
DST.dupli_source->random_id :
/* TODO(fclem): this is rather costly to do at runtime. Maybe we can
* put it in ob->runtime and make depsgraph ensure it is up to date. */
BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0);
@ -1653,23 +1660,43 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
}
if (chunkid_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(shgroup,
chunkid_location,
DRW_UNIFORM_RESOURCE_CHUNK,
nullptr,
GPUSamplerState::default_sampler(),
0,
1);
}
if (resourceid_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, resourceid_location, DRW_UNIFORM_RESOURCE_ID, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(shgroup,
resourceid_location,
DRW_UNIFORM_RESOURCE_ID,
nullptr,
GPUSamplerState::default_sampler(),
0,
1);
}
if (baseinst_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, baseinst_location, DRW_UNIFORM_BASE_INSTANCE, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(shgroup,
baseinst_location,
DRW_UNIFORM_BASE_INSTANCE,
nullptr,
GPUSamplerState::default_sampler(),
0,
1);
}
if (model_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, model_ubo_location, DRW_UNIFORM_BLOCK_OBMATS, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(shgroup,
model_ubo_location,
DRW_UNIFORM_BLOCK_OBMATS,
nullptr,
GPUSamplerState::default_sampler(),
0,
1);
}
else {
/* NOTE: This is only here to support old hardware fallback where uniform buffer is still
@ -1677,23 +1704,33 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
int model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL);
int modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV);
if (model != -1) {
drw_shgroup_uniform_create_ex(
shgroup, model, DRW_UNIFORM_MODEL_MATRIX, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(shgroup,
model,
DRW_UNIFORM_MODEL_MATRIX,
nullptr,
GPUSamplerState::default_sampler(),
0,
1);
}
if (modelinverse != -1) {
drw_shgroup_uniform_create_ex(shgroup,
modelinverse,
DRW_UNIFORM_MODEL_MATRIX_INVERSE,
nullptr,
GPU_SAMPLER_DEFAULT,
GPUSamplerState::default_sampler(),
0,
1);
}
}
if (info_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, info_ubo_location, DRW_UNIFORM_BLOCK_OBINFOS, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(shgroup,
info_ubo_location,
DRW_UNIFORM_BLOCK_OBINFOS,
nullptr,
GPUSamplerState::default_sampler(),
0,
1);
/* Abusing this loc to tell shgroup we need the obinfos. */
shgroup->objectinfo = 1;
@ -1703,8 +1740,13 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
}
if (view_ubo_location != -1) {
drw_shgroup_uniform_create_ex(
shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(shgroup,
view_ubo_location,
DRW_UNIFORM_BLOCK,
G_draw.view_ubo,
GPUSamplerState::default_sampler(),
0,
1);
}
if (clipping_ubo_location != -1) {
@ -1712,7 +1754,7 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
clipping_ubo_location,
DRW_UNIFORM_BLOCK,
G_draw.clipping_ubo,
GPU_SAMPLER_DEFAULT,
GPUSamplerState::default_sampler(),
0,
1);
}
@ -1767,7 +1809,7 @@ static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass
static void drw_shgroup_material_texture(DRWShadingGroup *grp,
GPUTexture *gputex,
const char *name,
eGPUSamplerState state)
GPUSamplerState state)
{
DRW_shgroup_uniform_texture_ex(grp, name, gputex, state);
@ -1788,16 +1830,13 @@ void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, GPUMaterial *mater
ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr;
if (tex->tiled_mapping_name[0]) {
gputex = BKE_image_get_gpu_tiles(tex->ima, iuser, nullptr);
drw_shgroup_material_texture(
grp, gputex, tex->sampler_name, eGPUSamplerState(tex->sampler_state));
drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state);
gputex = BKE_image_get_gpu_tilemap(tex->ima, iuser, nullptr);
drw_shgroup_material_texture(
grp, gputex, tex->tiled_mapping_name, eGPUSamplerState(tex->sampler_state));
drw_shgroup_material_texture(grp, gputex, tex->tiled_mapping_name, tex->sampler_state);
}
else {
gputex = BKE_image_get_gpu_texture(tex->ima, iuser, nullptr);
drw_shgroup_material_texture(
grp, gputex, tex->sampler_name, eGPUSamplerState(tex->sampler_state));
drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state);
}
}
else if (tex->colorband) {
@ -1806,8 +1845,7 @@ void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, GPUMaterial *mater
}
else if (tex->sky) {
/* Sky */
DRW_shgroup_uniform_texture_ex(
grp, tex->sampler_name, *tex->sky, eGPUSamplerState(tex->sampler_state));
DRW_shgroup_uniform_texture_ex(grp, tex->sampler_name, *tex->sky, tex->sampler_state);
}
}
@ -1820,14 +1858,14 @@ void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, GPUMaterial *mater
if (uattrs != nullptr) {
int loc = GPU_shader_get_ubo_binding(grp->shader, GPU_ATTRIBUTE_UBO_BLOCK_NAME);
drw_shgroup_uniform_create_ex(
grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, GPU_SAMPLER_DEFAULT, 0, 1);
grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, GPUSamplerState::default_sampler(), 0, 1);
grp->uniform_attrs = uattrs;
}
if (GPU_material_layer_attributes(material) != nullptr) {
int loc = GPU_shader_get_ubo_binding(grp->shader, GPU_LAYER_ATTRIBUTE_UBO_BLOCK_NAME);
drw_shgroup_uniform_create_ex(
grp, loc, DRW_UNIFORM_BLOCK_VLATTRS, nullptr, GPU_SAMPLER_DEFAULT, 0, 1);
grp, loc, DRW_UNIFORM_BLOCK_VLATTRS, nullptr, GPUSamplerState::default_sampler(), 0, 1);
}
}
@ -1872,8 +1910,13 @@ DRWShadingGroup *DRW_shgroup_transform_feedback_create(GPUShader *shader,
BLI_assert(tf_target != nullptr);
DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass);
drw_shgroup_init(shgroup, shader);
drw_shgroup_uniform_create_ex(
shgroup, 0, DRW_UNIFORM_TFEEDBACK_TARGET, tf_target, GPU_SAMPLER_DEFAULT, 0, 1);
drw_shgroup_uniform_create_ex(shgroup,
0,
DRW_UNIFORM_TFEEDBACK_TARGET,
tf_target,
GPUSamplerState::default_sampler(),
0,
1);
return shgroup;
}

View File

@ -60,7 +60,8 @@ void drw_texture_set_parameters(GPUTexture *tex, DRWTextureFlag flags)
GPU_texture_filter_mode(tex, flags & DRW_TEX_FILTER);
}
GPU_texture_anisotropic_filter(tex, false);
GPU_texture_wrap_mode(tex, flags & DRW_TEX_WRAP, true);
GPU_texture_extend_mode(
tex, flags & DRW_TEX_WRAP ? GPU_SAMPLER_EXTEND_MODE_REPEAT : GPU_SAMPLER_EXTEND_MODE_EXTEND);
GPU_texture_compare_mode(tex, flags & DRW_TEX_COMPARE);
}

View File

@ -117,8 +117,8 @@ class PassBase {
friend Manager;
friend DrawCommandBuf;
/** Will use texture own sampler state. */
static constexpr eGPUSamplerState sampler_auto = GPU_SAMPLER_MAX;
/** Will use texture own internal sampler state. */
static constexpr GPUSamplerState sampler_auto = GPUSamplerState::internal_sampler();
protected:
/** Highest level of the command stream. Split command stream in different command types. */
@ -287,12 +287,12 @@ class PassBase {
void bind_image(const char *name, GPUTexture **image);
void bind_image(int slot, GPUTexture *image);
void bind_image(int slot, GPUTexture **image);
void bind_texture(const char *name, GPUTexture *texture, eGPUSamplerState state = sampler_auto);
void bind_texture(const char *name, GPUTexture **texture, eGPUSamplerState state = sampler_auto);
void bind_texture(const char *name, GPUTexture *texture, GPUSamplerState state = sampler_auto);
void bind_texture(const char *name, GPUTexture **texture, GPUSamplerState state = sampler_auto);
void bind_texture(const char *name, GPUVertBuf *buffer);
void bind_texture(const char *name, GPUVertBuf **buffer);
void bind_texture(int slot, GPUTexture *texture, eGPUSamplerState state = sampler_auto);
void bind_texture(int slot, GPUTexture **texture, eGPUSamplerState state = sampler_auto);
void bind_texture(int slot, GPUTexture *texture, GPUSamplerState state = sampler_auto);
void bind_texture(int slot, GPUTexture **texture, GPUSamplerState state = sampler_auto);
void bind_texture(int slot, GPUVertBuf *buffer);
void bind_texture(int slot, GPUVertBuf **buffer);
void bind_ssbo(const char *name, GPUStorageBuf *buffer);
@ -836,16 +836,16 @@ template<class T> inline void PassBase<T>::material_set(Manager &manager, GPUMat
if (tex->tiled_mapping_name[0]) {
GPUTexture *tiles = BKE_image_get_gpu_tiles(tex->ima, iuser, nullptr);
manager.acquire_texture(tiles);
bind_texture(tex->sampler_name, tiles, (eGPUSamplerState)tex->sampler_state);
bind_texture(tex->sampler_name, tiles, tex->sampler_state);
GPUTexture *tile_map = BKE_image_get_gpu_tilemap(tex->ima, iuser, nullptr);
manager.acquire_texture(tile_map);
bind_texture(tex->tiled_mapping_name, tile_map, (eGPUSamplerState)tex->sampler_state);
bind_texture(tex->tiled_mapping_name, tile_map, tex->sampler_state);
}
else {
GPUTexture *texture = BKE_image_get_gpu_texture(tex->ima, iuser, nullptr);
manager.acquire_texture(texture);
bind_texture(tex->sampler_name, texture, (eGPUSamplerState)tex->sampler_state);
bind_texture(tex->sampler_name, texture, tex->sampler_state);
}
}
else if (tex->colorband) {
@ -912,9 +912,7 @@ template<class T> inline void PassBase<T>::bind_ubo(const char *name, GPUUniform
}
template<class T>
inline void PassBase<T>::bind_texture(const char *name,
GPUTexture *texture,
eGPUSamplerState state)
inline void PassBase<T>::bind_texture(const char *name, GPUTexture *texture, GPUSamplerState state)
{
this->bind_texture(GPU_shader_get_sampler_binding(shader_, name), texture, state);
}
@ -981,7 +979,7 @@ template<class T> inline void PassBase<T>::bind_ubo(int slot, GPUUniformBuf *buf
}
template<class T>
inline void PassBase<T>::bind_texture(int slot, GPUTexture *texture, eGPUSamplerState state)
inline void PassBase<T>::bind_texture(int slot, GPUTexture *texture, GPUSamplerState state)
{
create_command(Type::ResourceBind).resource_bind = {slot, texture, state};
}
@ -1014,7 +1012,7 @@ template<class T> inline void PassBase<T>::bind_ubo(const char *name, GPUUniform
template<class T>
inline void PassBase<T>::bind_texture(const char *name,
GPUTexture **texture,
eGPUSamplerState state)
GPUSamplerState state)
{
this->bind_texture(GPU_shader_get_sampler_binding(shader_, name), texture, state);
}
@ -1036,7 +1034,7 @@ template<class T> inline void PassBase<T>::bind_ubo(int slot, GPUUniformBuf **bu
}
template<class T>
inline void PassBase<T>::bind_texture(int slot, GPUTexture **texture, eGPUSamplerState state)
inline void PassBase<T>::bind_texture(int slot, GPUTexture **texture, GPUSamplerState state)
{
create_command(Type::ResourceBind).resource_bind = {slot, texture, state};
}

View File

@ -74,8 +74,8 @@ static void drw_volume_globals_init()
"dummy_zero", 1, 1, 1, 1, GPU_RGBA8, GPU_TEXTURE_USAGE_SHADER_READ, zero);
g_data.dummy_one = GPU_texture_create_3d(
"dummy_one", 1, 1, 1, 1, GPU_RGBA8, GPU_TEXTURE_USAGE_SHADER_READ, one);
GPU_texture_wrap_mode(g_data.dummy_zero, true, true);
GPU_texture_wrap_mode(g_data.dummy_one, true, true);
GPU_texture_extend_mode(g_data.dummy_zero, GPU_SAMPLER_EXTEND_MODE_REPEAT);
GPU_texture_extend_mode(g_data.dummy_one, GPU_SAMPLER_EXTEND_MODE_REPEAT);
memset(g_data.dummy_grid_mat, 0, sizeof(g_data.dummy_grid_mat));
}

View File

@ -8,7 +8,7 @@ set(INC_SYS
)
# blender and player
# Part of the `blender` binary (sources for data-files are appended).
set(SRC
)
@ -16,7 +16,7 @@ set(SRC
set(LIB
)
# Order matches "UI_icons.h", final name will be formatted: "icons{size}_{name}.dat"
# Order matches `UI_icons.h`, final name will be formatted: `icons{size}_{name}.dat`.
set(ICON_NAMES
question
error
@ -696,6 +696,7 @@ set(ICON_NAMES
)
# This section is maintained by the updating script, keep BEGIN/END comments.
# See: `make icons_geom` and the script `./release/datafiles/blender_icons_geom_update.py`.
set_property(GLOBAL PROPERTY ICON_GEOM_NAMES
# BEGIN ICON_GEOM_NAMES
brush.gpencil_draw.draw
@ -890,16 +891,16 @@ set_property(GLOBAL PROPERTY ICON_GEOM_NAMES
data_to_c_simple(../../../../release/datafiles/bfont.pfb SRC)
if(WITH_BLENDER)
# blender only (not player)
# Blender only (not Cycles stand-alone).
if(NOT WITH_HEADLESS)
# blender UI only
# Blender UI only.
# blends
# Blend files.
data_to_c_simple(../../../../release/datafiles/preview.blend SRC)
data_to_c_simple(../../../../release/datafiles/preview_grease_pencil.blend SRC)
# images
# Images.
data_to_c_simple(../../../../release/datafiles/splash.png SRC)
data_to_c_simple(../../../../release/datafiles/alert_icons.png SRC)
data_to_c_simple(../../../../release/datafiles/blender_logo.png SRC)
@ -920,7 +921,7 @@ if(WITH_BLENDER)
# 90 SRC)
data_to_c_simple(../../../../release/datafiles/prvicons.png SRC)
# brushes
# Brushes.
data_to_c_simple(../../../../release/datafiles/brushicons/blob.png SRC)
data_to_c_simple(../../../../release/datafiles/brushicons/blur.png SRC)
data_to_c_simple(../../../../release/datafiles/brushicons/clay.png SRC)
@ -949,7 +950,7 @@ if(WITH_BLENDER)
data_to_c_simple(../../../../release/datafiles/brushicons/thumb.png SRC)
data_to_c_simple(../../../../release/datafiles/brushicons/twist.png SRC)
# grease pencil sculpt
# Grease pencil sculpt.
data_to_c_simple(../../../../release/datafiles/brushicons/gp_brush_smooth.png SRC)
data_to_c_simple(../../../../release/datafiles/brushicons/gp_brush_thickness.png SRC)
data_to_c_simple(../../../../release/datafiles/brushicons/gp_brush_strength.png SRC)
@ -974,7 +975,7 @@ if(WITH_BLENDER)
data_to_c_simple(../../../../release/datafiles/brushicons/gp_brush_erase_hard.png SRC)
data_to_c_simple(../../../../release/datafiles/brushicons/gp_brush_erase_stroke.png SRC)
# curve sculpt
# Curve sculpt.
data_to_c_simple(../../../../release/datafiles/brushicons/curves_sculpt_add.png SRC)
data_to_c_simple(../../../../release/datafiles/brushicons/curves_sculpt_comb.png SRC)
data_to_c_simple(../../../../release/datafiles/brushicons/curves_sculpt_cut.png SRC)

View File

@ -1624,7 +1624,7 @@ static void icon_draw_cache_texture_flush_ex(GPUTexture *texture,
GPU_uniformbuf_bind(ubo, data_binding);
const int img_binding = GPU_shader_get_sampler_binding(shader, "image");
GPU_texture_bind_ex(texture, GPU_SAMPLER_ICON, img_binding);
GPU_texture_bind_ex(texture, GPUSamplerState::icon_sampler(), img_binding);
GPUBatch *quad = GPU_batch_preset_quad();
GPU_batch_set_shader(quad, shader);
@ -1816,7 +1816,7 @@ static void icon_draw_texture(float x,
GPU_shader_uniform_float_ex(shader, rect_geom_loc, 4, 1, geom_color);
GPU_shader_uniform_1f(shader, "text_width", text_width);
GPU_texture_bind_ex(texture, GPU_SAMPLER_ICON, img_binding);
GPU_texture_bind_ex(texture, GPUSamplerState::icon_sampler(), img_binding);
GPUBatch *quad = GPU_batch_preset_quad();
GPU_batch_set_shader(quad, shader);

View File

@ -974,7 +974,7 @@ UvElementMap *BM_uv_element_map_create(BMesh *bm,
const bool use_seams,
const bool do_islands)
{
/* In uv sync selection, all UVs are visible. */
/* In uv sync selection, all UVs (from unhidden geometry) are visible. */
const bool face_selected = !(scene->toolsettings->uv_flag & UV_SYNC_SELECTION);
BMVert *ev;

View File

@ -92,7 +92,7 @@ void immDrawPixelsTexScaledFullSize(const IMMDrawPixelsTexState *state,
GPU_texture_update_mipmap_chain(tex);
GPU_texture_mipmap_mode(tex, true, true);
}
GPU_texture_wrap_mode(tex, false, true);
GPU_texture_extend_mode(tex, GPU_SAMPLER_EXTEND_MODE_EXTEND);
GPU_texture_bind(tex, 0);
@ -186,7 +186,7 @@ void immDrawPixelsTexTiled_scaling_clipping(IMMDrawPixelsTexState *state,
"immDrawPixels", tex_w, tex_h, 1, gpu_format, GPU_TEXTURE_USAGE_SHADER_READ, NULL);
GPU_texture_filter_mode(tex, use_filter);
GPU_texture_wrap_mode(tex, false, true);
GPU_texture_extend_mode(tex, GPU_SAMPLER_EXTEND_MODE_EXTEND);
GPU_texture_bind(tex, 0);

View File

@ -655,10 +655,11 @@ static bool paint_draw_tex_overlay(UnifiedPaintSettings *ups,
GPUTexture *texture = (primary) ? primary_snap.overlay_texture :
secondary_snap.overlay_texture;
eGPUSamplerState state = GPU_SAMPLER_FILTER;
state |= (mtex->brush_map_mode == MTEX_MAP_MODE_VIEW) ? GPU_SAMPLER_CLAMP_BORDER :
GPU_SAMPLER_REPEAT;
immBindTextureSampler("image", texture, state);
GPUSamplerExtendMode extend_mode = (mtex->brush_map_mode == MTEX_MAP_MODE_VIEW) ?
GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER :
GPU_SAMPLER_EXTEND_MODE_REPEAT;
immBindTextureSampler(
"image", texture, {GPU_SAMPLER_FILTERING_LINEAR, extend_mode, extend_mode});
/* Draw textured quad. */
immBegin(GPU_PRIM_TRI_FAN, 4);
@ -742,8 +743,11 @@ static bool paint_draw_cursor_overlay(
immUniformColor4fv(final_color);
/* Draw textured quad. */
immBindTextureSampler(
"image", cursor_snap.overlay_texture, GPU_SAMPLER_FILTER | GPU_SAMPLER_CLAMP_BORDER);
immBindTextureSampler("image",
cursor_snap.overlay_texture,
{GPU_SAMPLER_FILTERING_LINEAR,
GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER,
GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER});
immBegin(GPU_PRIM_TRI_FAN, 4);
immAttr2f(texCoord, 0.0f, 0.0f);

View File

@ -3275,7 +3275,9 @@ static eSnapMode transform_snap_context_project_view3d_mixed_impl(SnapObjectCont
}
}
if (snap_to_flag & SCE_SNAP_MODE_FACE_RAYCAST) {
bool use_occlusion_test = params->use_occlusion_test && !XRAY_ENABLED(v3d);
if ((snap_to_flag & SCE_SNAP_MODE_FACE_RAYCAST) || use_occlusion_test) {
float ray_start[3], ray_normal[3];
if (!ED_view3d_win_to_ray_clipped_ex(
depsgraph, region, v3d, mval, nullptr, ray_normal, ray_start, true)) {

View File

@ -1889,9 +1889,12 @@ static StitchState *stitch_init(bContext *C,
counter = 0;
/* Now, on to generate our uv connectivity data */
const bool face_selected = !(ts->uv_flag & UV_SYNC_SELECTION);
BM_ITER_MESH (efa, &iter, em->bm, BM_FACES_OF_MESH) {
if (!(ts->uv_flag & UV_SYNC_SELECTION) &&
(BM_elem_flag_test(efa, BM_ELEM_HIDDEN) || !BM_elem_flag_test(efa, BM_ELEM_SELECT))) {
if (BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
continue;
}
if (face_selected && !BM_elem_flag_test(efa, BM_ELEM_SELECT)) {
continue;
}

View File

@ -105,7 +105,7 @@ void immUniformArray4fv(const char *bare_name, const float *data, int count);
void immUniformMatrix4fv(const char *name, const float data[4][4]);
void immBindTexture(const char *name, GPUTexture *tex);
void immBindTextureSampler(const char *name, GPUTexture *tex, eGPUSamplerState state);
void immBindTextureSampler(const char *name, GPUTexture *tex, GPUSamplerState state);
void immBindUniformBuf(const char *name, GPUUniformBuf *ubo);
/* Convenience functions for setting "uniform vec4 color". */

View File

@ -14,7 +14,7 @@
#include "BLI_sys_types.h" /* for bool */
#include "GPU_shader.h" /* for GPUShaderCreateInfo */
#include "GPU_texture.h" /* for eGPUSamplerState */
#include "GPU_texture.h" /* for GPUSamplerState */
#ifdef __cplusplus
extern "C" {
@ -166,11 +166,11 @@ GPUNodeLink *GPU_layer_attribute(GPUMaterial *mat, const char *name);
GPUNodeLink *GPU_image(GPUMaterial *mat,
struct Image *ima,
struct ImageUser *iuser,
eGPUSamplerState sampler_state);
GPUSamplerState sampler_state);
void GPU_image_tiled(GPUMaterial *mat,
struct Image *ima,
struct ImageUser *iuser,
eGPUSamplerState sampler_state,
GPUSamplerState sampler_state,
GPUNodeLink **r_image_tiled_link,
GPUNodeLink **r_image_tiled_mapping_link);
GPUNodeLink *GPU_image_sky(GPUMaterial *mat,
@ -178,7 +178,7 @@ GPUNodeLink *GPU_image_sky(GPUMaterial *mat,
int height,
const float *pixels,
float *layer,
eGPUSamplerState sampler_state);
GPUSamplerState sampler_state);
GPUNodeLink *GPU_color_band(GPUMaterial *mat, int size, float *pixels, float *row);
/**
@ -355,7 +355,7 @@ typedef struct GPUMaterialTexture {
char sampler_name[32]; /* Name of sampler in GLSL. */
char tiled_mapping_name[32]; /* Name of tile mapping sampler in GLSL. */
int users;
int sampler_state; /* eGPUSamplerState */
GPUSamplerState sampler_state;
} GPUMaterialTexture;
ListBase GPU_material_attributes(GPUMaterial *material);

View File

@ -11,6 +11,11 @@
#pragma once
#ifdef __cplusplus
# include <string>
#endif
#include "BLI_assert.h"
#include "BLI_utildefines.h"
#include "GPU_state.h"
@ -22,84 +27,385 @@ extern "C" {
#endif
/* -------------------------------------------------------------------- */
/** \name Enums
/** \name Sampler State
* \{ */
/**
* A `eGPUSamplerState` specify the sampler state to bind a texture with.
* One is stored inside `GPUTexture` for default parameters.
*
* Some sampler states commonly set:
* - BORDER_COLOR is set to {0, 0, 0, 0}.
* - MIN_LOD is set to -1000.
* - MAX_LOD is set to 1000.
* - LOD_BIAS is set to 0.0.
* The `GPUSamplerFiltering` bit flag specifies the enabled filtering options of a texture
* sampler.
*/
/**
* TODO(fclem): this enum needs to be split into multiple states. One for filtering. One for
* extension / wrap mode etc...
*/
typedef enum eGPUSamplerState {
typedef enum GPUSamplerFiltering {
/**
* Default sampler state with all options off.
* It means no filtering, no mipmap, clamp to edge texel, no compare.
* Default sampler filtering with all options off.
* It means no linear filtering, no mipmapping, and no anisotropic filtering.
*/
GPU_SAMPLER_DEFAULT = 0,
GPU_SAMPLER_FILTERING_DEFAULT = 0,
/**
* Enables hardware linear filtering.
* Enables linear interpolation between MIPS if GPU_SAMPLER_MIPMAP is also set.
* Also enables linear interpolation between MIPS if GPU_SAMPLER_FILTERING_MIPMAP is set.
*/
GPU_SAMPLER_FILTER = (1 << 0),
GPU_SAMPLER_FILTERING_LINEAR = (1 << 0),
/**
* Enables mipmap access through shader samplers.
* Enables linear interpolation between mips if GPU_SAMPLER_FILTER is also set, otherwise the mip
* Also enables linear interpolation between mips if GPU_SAMPLER_FILTER is set, otherwise the mip
* interpolation will be set to nearest.
*
* The following parameters are always left to their default values and can't be changed:
* - TEXTURE_MIN_LOD is -1000.
* - TEXTURE_MAX_LOD is 1000.
* - TEXTURE_LOD_BIAS is 0.0f.
*/
GPU_SAMPLER_MIPMAP = (1 << 1),
GPU_SAMPLER_FILTERING_MIPMAP = (1 << 1),
/**
* Sets texture coordinate extension to repeat in X, Y and Z direction.
* If not set for some direction, either clamp to edge (texel) or border color (0,0,0,0) if
* `GPU_SAMPLER_CLAMP_BORDER` is set.
* If `GPU_SAMPLER_MIRROR_REPEAT` is set, any direction using `GPU_SAMPLER_REPEAT_*` will use a
* mirrored repeat coordinate extension.
*/
GPU_SAMPLER_REPEAT_S = (1 << 2),
GPU_SAMPLER_REPEAT_T = (1 << 3),
GPU_SAMPLER_REPEAT_R = (1 << 4),
GPU_SAMPLER_REPEAT = (GPU_SAMPLER_REPEAT_S | GPU_SAMPLER_REPEAT_T | GPU_SAMPLER_REPEAT_R),
/**
* Clamp to border color instead of border texel.
* Used for directions not using `GPU_SAMPLER_REPEAT_*`.
*/
GPU_SAMPLER_CLAMP_BORDER = (1 << 5),
/**
* Enable compare mode for depth texture. The depth texture must then be bound to a shadow
* sampler.
*/
GPU_SAMPLER_COMPARE = (1 << 6),
/** Enable Anisotropic filtering. This only has effect if `GPU_SAMPLER_MIPMAP` is set.
* Enable Anisotropic filtering. This only has effect if `GPU_SAMPLER_FILTERING_MIPMAP` is set.
* The filtered result is implementation dependent.
* The maximum amount of samples is set
*
* The maximum amount of samples is always set to its maximum possible value and can't be
* changed, except by the user through the user preferences, see the use of U.anisotropic_filter.
*/
GPU_SAMPLER_ANISO = (1 << 7),
/** Enable mirror repeat extension mode for directions using the `GPU_SAMPLER_REPEAT_*` flag. */
GPU_SAMPLER_MIRROR_REPEAT = (1 << 8),
GPU_SAMPLER_FILTERING_ANISOTROPIC = (1 << 2),
} GPUSamplerFiltering;
/** Special icon sampler with custom LOD bias and interpolation mode. */
GPU_SAMPLER_ICON = (1 << 9),
} eGPUSamplerState;
ENUM_OPERATORS(GPUSamplerFiltering, GPU_SAMPLER_FILTERING_ANISOTROPIC)
/** The number of every possible filtering configuration. */
static const int GPU_SAMPLER_FILTERING_TYPES_COUNT = (GPU_SAMPLER_FILTERING_LINEAR |
GPU_SAMPLER_FILTERING_MIPMAP |
GPU_SAMPLER_FILTERING_ANISOTROPIC) +
1;
/**
* #GPU_SAMPLER_MAX is not a valid enum value, but only a limit.
* It also creates a bad mask for the `NOT` operator in #ENUM_OPERATORS.
* The `GPUSamplerExtendMode` specifies how the texture will be extrapolated for out-of-bound
* texture sampling.
*/
typedef enum GPUSamplerExtendMode {
/**
* Extrapolate by extending the edge pixels of the texture, in other words, the texture
* coordinates are clamped.
*/
GPU_SAMPLER_EXTEND_MODE_EXTEND = 0,
/** Extrapolate by repeating the texture. */
GPU_SAMPLER_EXTEND_MODE_REPEAT,
/** Extrapolate by repeating the texture with mirroring in a ping-pong fashion. */
GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT,
/**
* Extrapolate using the value of TEXTURE_BORDER_COLOR, which is always set to a transparent
* black color (0, 0, 0, 0) and can't be changed.
*/
GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER,
} GPUSamplerExtendMode;
#define GPU_SAMPLER_EXTEND_MODES_COUNT (GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER + 1)
/**
* The `GPUSamplerCustomType` specifies pre-defined sampler configurations with parameters that
* are not controllable using the GPUSamplerFiltering and GPUSamplerExtendMode options. Hence, the
* use of a custom sampler type is mutually exclusive with the use of the aforementioned enums.
*
* The parameters that needs to be set for those custom samplers are not added as yet another
* option inside the GPUSamplerState structure because every possible configuration of sampler
* states are generated, setup, and cached at startup, so adding yet another axis of variation will
* multiply the number of configurations that needs to be cached, which is not worth it due to the
* limited use of the parameters needed to setup those custom samplers.
*/
typedef enum GPUSamplerCustomType {
/**
* Enable compare mode for depth texture. The depth texture must then be bound to a shadow
* sampler. This is equivalent to:
*
* - GPU_SAMPLER_FILTERING_LINEAR.
* - GPU_SAMPLER_EXTEND_MODE_EXTEND.
*
* And sets:
*
* - TEXTURE_COMPARE_MODE -> COMPARE_REF_TO_TEXTURE.
* - TEXTURE_COMPARE_FUNC -> LEQUAL.
*/
GPU_SAMPLER_CUSTOM_COMPARE = 0,
/**
* Special icon sampler with custom LOD bias and interpolation mode. This sets:
*
* - TEXTURE_MAG_FILTER -> LINEAR.
* - TEXTURE_MIN_FILTER -> LINEAR_MIPMAP_NEAREST.
* - TEXTURE_LOD_BIAS -> -0.5.
*/
GPU_SAMPLER_CUSTOM_ICON,
} GPUSamplerCustomType;
#define GPU_SAMPLER_CUSTOM_TYPES_COUNT (GPU_SAMPLER_CUSTOM_ICON + 1)
/**
* The `GPUSamplerStateType` specifies how the GPUSamplerState structure should be interpreted
* when passed around due to it being an overloaded type, see the documentation of each of the
* types for more information.
*/
typedef enum GPUSamplerStateType {
/**
* The filtering, extend_x, and extend_yz members of the GPUSamplerState structure will be used
* in setting up the sampler state for the texture. The custom_type member will be ignored in
* that case.
*/
GPU_SAMPLER_STATE_TYPE_PARAMETERS = 0,
/**
* The filtering, extend_x, and extend_yz members of the GPUSamplerState structure will be
* ignored, and the predefined custom parameters outlined in the documentation of
* GPUSamplerCustomType will be used in setting up the sampler state for the texture.
*/
GPU_SAMPLER_STATE_TYPE_CUSTOM,
/**
* The members of the GPUSamplerState structure will be ignored and the internal sampler state of
* the texture will be used. In other words, this is a signal value and stores no useful or
* actual data.
*/
GPU_SAMPLER_STATE_TYPE_INTERNAL,
} GPUSamplerStateType;
/**
* The `GPUSamplerState` specifies the sampler state to bind a texture with.
*
* When the state type is set to GPU_SAMPLER_STATE_TYPE_CUSTOM or GPU_SAMPLER_STATE_TYPE_INTERNAL,
* the rest of the members of the structure will be ignored. However, we can't turn this structure
* into a union, because various functions merely temporally change the state type and expect the
* rest of the members' values to be retained when the state type is changed back to
* GPU_SAMPLER_STATE_TYPE_PARAMETERS. For the instance, a function might do the following and
* expect the original sampler state of the texture to be retained after disabling comparison mode:
*
* GPU_texture_compare_mode(texture, true);
* // Use the texture ...
* GPU_texture_compare_mode(texture, false);
*
*/
typedef struct GPUSamplerState {
/** Specifies the enabled filtering options for the sampler. */
GPUSamplerFiltering filtering : 8;
/**
* Specifies how the texture will be extrapolated for out-of-bound texture sampling along the x
* axis.
*/
GPUSamplerExtendMode extend_x : 4;
/**
* Specifies how the texture will be extrapolated for out-of-bound texture sampling along both
* the y and z axis. There is no individual control for the z axis because 3D textures have
* limited use, and when used, their extend mode is typically the same for all axis.
*/
GPUSamplerExtendMode extend_yz : 4;
/** Specifies the type of sampler if the state type is GPU_SAMPLER_STATE_TYPE_CUSTOM. */
GPUSamplerCustomType custom_type : 8;
/** Specifies how the GPUSamplerState structure should be interpreted when passed around. */
GPUSamplerStateType type : 8;
#ifdef __cplusplus
static constexpr eGPUSamplerState GPU_SAMPLER_MAX = eGPUSamplerState(GPU_SAMPLER_ICON + 1);
#else
static const int GPU_SAMPLER_MAX = (GPU_SAMPLER_ICON + 1);
/**
* Constructs a sampler state with default filtering and extended extend in both x and y axis.
* See the documentation on GPU_SAMPLER_FILTERING_DEFAULT and GPU_SAMPLER_EXTEND_MODE_EXTEND for
* more information.
*
* GPU_SAMPLER_STATE_TYPE_PARAMETERS is set in order to utilize the aforementioned parameters, so
* GPU_SAMPLER_CUSTOM_COMPARE is arbitrary, ignored, and irrelevant.
*/
static constexpr GPUSamplerState default_sampler()
{
return {GPU_SAMPLER_FILTERING_DEFAULT,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_CUSTOM_COMPARE,
GPU_SAMPLER_STATE_TYPE_PARAMETERS};
}
/**
* Constructs a sampler state that can be used to signal that the internal sampler of the texture
* should be used instead. See the documentation on GPU_SAMPLER_STATE_TYPE_INTERNAL for more
* information.
*
* GPU_SAMPLER_STATE_TYPE_INTERNAL is set in order to signal the use of the internal sampler of
* the texture, so the rest of the options before it are arbitrary, ignored, and irrelevant.
*/
static constexpr GPUSamplerState internal_sampler()
{
return {GPU_SAMPLER_FILTERING_DEFAULT,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_CUSTOM_COMPARE,
GPU_SAMPLER_STATE_TYPE_INTERNAL};
}
/**
* Constructs a special sampler state that can be used sampler icons. See the documentation on
* GPU_SAMPLER_CUSTOM_ICON for more information.
*
* GPU_SAMPLER_STATE_TYPE_CUSTOM is set in order to specify a custom sampler type, so the rest of
* the options before it are arbitrary, ignored, and irrelevant.
*/
static constexpr GPUSamplerState icon_sampler()
{
return {GPU_SAMPLER_FILTERING_DEFAULT,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_CUSTOM_ICON,
GPU_SAMPLER_STATE_TYPE_CUSTOM};
}
/**
* Constructs a special sampler state for depth comparison. See the documentation on
* GPU_SAMPLER_CUSTOM_COMPARE for more information.
*
* GPU_SAMPLER_STATE_TYPE_CUSTOM is set in order to specify a custom sampler type, so the rest of
* the options before it are ignored and irrelevant, but they are set to sensible defaults in
* case comparison mode is turned off, in which case, the sampler state will become equivalent to
* GPUSamplerState::default_sampler().
*/
static constexpr GPUSamplerState compare_sampler()
{
return {GPU_SAMPLER_FILTERING_DEFAULT,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_CUSTOM_COMPARE,
GPU_SAMPLER_STATE_TYPE_CUSTOM};
}
/**
* Enables the given filtering flags.
*/
void enable_filtering_flag(GPUSamplerFiltering filtering_flags)
{
this->filtering = this->filtering | filtering_flags;
}
/**
* Disables the given filtering flags.
*/
void disable_filtering_flag(GPUSamplerFiltering filtering_flags)
{
this->filtering = this->filtering & ~filtering_flags;
}
/**
* Enables the given filtering flags if the given test is true, otherwise, disables the given
* filtering flags.
*/
void set_filtering_flag_from_test(GPUSamplerFiltering filtering_flags, bool test)
{
if (test) {
this->enable_filtering_flag(filtering_flags);
}
else {
this->disable_filtering_flag(filtering_flags);
}
}
std::string to_string() const
{
if (this->type == GPU_SAMPLER_STATE_TYPE_INTERNAL) {
return "internal";
}
if (this->type == GPU_SAMPLER_STATE_TYPE_CUSTOM) {
switch (this->custom_type) {
case GPU_SAMPLER_CUSTOM_COMPARE:
return "compare";
break;
case GPU_SAMPLER_CUSTOM_ICON:
return "icon";
break;
default:
BLI_assert_unreachable();
return "";
}
}
/* The sampler state is of type PARAMETERS, so serialize the parameters. */
BLI_assert(this->type == GPU_SAMPLER_STATE_TYPE_PARAMETERS);
std::string serialized_paramaters;
if (this->filtering & GPU_SAMPLER_FILTERING_LINEAR) {
serialized_paramaters += "linear-filter_";
}
if (this->filtering & GPU_SAMPLER_FILTERING_MIPMAP) {
serialized_paramaters += "mipmap_";
}
if (this->filtering & GPU_SAMPLER_FILTERING_ANISOTROPIC) {
serialized_paramaters += "anisotropic_";
}
switch (this->extend_x) {
case GPU_SAMPLER_EXTEND_MODE_EXTEND:
serialized_paramaters += "extend-x_";
break;
case GPU_SAMPLER_EXTEND_MODE_REPEAT:
serialized_paramaters += "repeat-x_";
break;
case GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT:
serialized_paramaters += "mirrored-repeat-x_";
break;
case GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER:
serialized_paramaters += "clamp-to-border-x_";
break;
default:
BLI_assert_unreachable();
}
switch (this->extend_yz) {
case GPU_SAMPLER_EXTEND_MODE_EXTEND:
serialized_paramaters += "extend-y_";
break;
case GPU_SAMPLER_EXTEND_MODE_REPEAT:
serialized_paramaters += "repeat-y_";
break;
case GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT:
serialized_paramaters += "mirrored-repeat-y_";
break;
case GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER:
serialized_paramaters += "clamp-to-border-y_";
break;
default:
BLI_assert_unreachable();
}
switch (this->extend_yz) {
case GPU_SAMPLER_EXTEND_MODE_EXTEND:
serialized_paramaters += "extend-z";
break;
case GPU_SAMPLER_EXTEND_MODE_REPEAT:
serialized_paramaters += "repeat-z";
break;
case GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT:
serialized_paramaters += "mirrored-repeat-z";
break;
case GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER:
serialized_paramaters += "clamp-to-border-z";
break;
default:
BLI_assert_unreachable();
}
return serialized_paramaters;
}
bool operator==(GPUSamplerState const &rhs) const
{
return this->filtering == rhs.filtering && this->extend_x == rhs.extend_x &&
this->extend_yz == rhs.extend_yz && this->custom_type == rhs.custom_type &&
this->type == rhs.type;
}
#endif
} GPUSamplerState;
#ifndef __cplusplus
/** Identical to GPUSamplerState::default_sampler for non C++ users. */
const static GPUSamplerState GPU_SAMPLER_DEFAULT = {GPU_SAMPLER_FILTERING_DEFAULT,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_EXTEND_MODE_EXTEND,
GPU_SAMPLER_CUSTOM_COMPARE,
GPU_SAMPLER_STATE_TYPE_PARAMETERS};
#endif
ENUM_OPERATORS(eGPUSamplerState, GPU_SAMPLER_ICON)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Enums
* \{ */
/**
* Types of texture internal storage. Defines how the data is stored inside the video memory.
@ -505,7 +811,7 @@ void GPU_texture_bind(GPUTexture *texture, int unit);
/**
* Bind a texture to a texture sampling image units using the explicit sampler state.
*/
void GPU_texture_bind_ex(GPUTexture *texture, eGPUSamplerState state, int unit);
void GPU_texture_bind_ex(GPUTexture *texture, GPUSamplerState state, int unit);
/**
* Unbind \a tex from a texture sampling image unit.
* \note this isn't strictly required but it is better for debugging purpose.
@ -538,12 +844,6 @@ void GPU_texture_image_unbind_all(void);
/** \name State API
* \{ */
/**
* Set anisotropic filter usage. Filter sample count is determined globally by
* `U.anisotropic_filter` and updated when `GPU_samplers_update` is called.
*/
void GPU_texture_anisotropic_filter(GPUTexture *texture, bool use_aniso);
/**
* Set \a tex texture depth comparison mode. Only works on depth format.
*/
@ -567,15 +867,28 @@ void GPU_texture_filter_mode(GPUTexture *texture, bool use_filter);
void GPU_texture_mipmap_mode(GPUTexture *texture, bool use_mipmap, bool use_filter);
/**
* Set \a tex texture sampling method for coordinates outside of the [0..1] uv range.
*
* If \a use_repeat is true, sampling the texture outside of the [0..1] uv range will repeat to
* border color instead of the border texel value.
*
* If \a use_clamp is true, sampling the texture outside of the [0..1] uv range will clamp to the
* closest border texel value. If set to false, it will use the values (0, 0, 0, 0) instead.
* Set anisotropic filter usage. Filter sample count is determined globally by
* `U.anisotropic_filter` and updated when `GPU_samplers_update` is called.
*/
void GPU_texture_wrap_mode(GPUTexture *texture, bool use_repeat, bool use_clamp);
void GPU_texture_anisotropic_filter(GPUTexture *texture, bool use_aniso);
/**
* Set \a tex texture sampling method for coordinates outside of the [0..1] uv range along the x
* axis. See GPUSamplerExtendMode for the available and meaning of different extend modes.
*/
void GPU_texture_extend_mode_x(GPUTexture *texture, GPUSamplerExtendMode extend_mode);
/**
* Set \a tex texture sampling method for coordinates outside of the [0..1] uv range along the y
* axis. See GPUSamplerExtendMode for the available and meaning of different extend modes.
*/
void GPU_texture_extend_mode_y(GPUTexture *texture, GPUSamplerExtendMode extend_mode);
/**
* Set \a tex texture sampling method for coordinates outside of the [0..1] uv range along both the
* x and y axis. See GPUSamplerExtendMode for the available and meaning of different extend modes.
*/
void GPU_texture_extend_mode(GPUTexture *texture, GPUSamplerExtendMode extend_mode);
/**
* Set \a tex texture swizzle state for swizzling sample components.

View File

@ -606,7 +606,7 @@ void immBindTexture(const char *name, GPUTexture *tex)
GPU_texture_bind(tex, binding);
}
void immBindTextureSampler(const char *name, GPUTexture *tex, eGPUSamplerState state)
void immBindTextureSampler(const char *name, GPUTexture *tex, GPUSamplerState state)
{
int binding = GPU_shader_get_sampler_binding(imm->shader, name);
GPU_texture_bind_ex(tex, state, binding);

View File

@ -477,7 +477,7 @@ static GPUMaterialTexture *gpu_node_graph_add_texture(GPUNodeGraph *graph,
struct GPUTexture **colorband,
struct GPUTexture **sky,
bool is_tiled,
eGPUSamplerState sampler_state)
GPUSamplerState sampler_state)
{
/* Find existing texture. */
int num_textures = 0;
@ -625,7 +625,7 @@ GPUNodeLink *GPU_differentiate_float_function(const char *function_name)
GPUNodeLink *GPU_image(GPUMaterial *mat,
Image *ima,
ImageUser *iuser,
eGPUSamplerState sampler_state)
GPUSamplerState sampler_state)
{
GPUNodeGraph *graph = gpu_material_node_graph(mat);
GPUNodeLink *link = gpu_node_link_create();
@ -640,7 +640,7 @@ GPUNodeLink *GPU_image_sky(GPUMaterial *mat,
int height,
const float *pixels,
float *layer,
eGPUSamplerState sampler_state)
GPUSamplerState sampler_state)
{
struct GPUTexture **sky = gpu_material_sky_texture_layer_set(mat, width, height, pixels, layer);
@ -655,7 +655,7 @@ GPUNodeLink *GPU_image_sky(GPUMaterial *mat,
void GPU_image_tiled(GPUMaterial *mat,
struct Image *ima,
struct ImageUser *iuser,
eGPUSamplerState sampler_state,
GPUSamplerState sampler_state,
GPUNodeLink **r_image_tiled_link,
GPUNodeLink **r_image_tiled_mapping_link)
{
@ -681,7 +681,7 @@ GPUNodeLink *GPU_color_band(GPUMaterial *mat, int size, float *pixels, float *ro
GPUNodeLink *link = gpu_node_link_create();
link->link_type = GPU_NODE_LINK_COLORBAND;
link->texture = gpu_node_graph_add_texture(
graph, nullptr, nullptr, colorband, nullptr, false, GPU_SAMPLER_MAX);
graph, nullptr, nullptr, colorband, nullptr, false, GPUSamplerState::internal_sampler());
return link;
}

View File

@ -444,7 +444,7 @@ struct ShaderCreateInfo {
struct Sampler {
ImageType type;
eGPUSamplerState sampler;
GPUSamplerState sampler;
StringRefNull name;
};
@ -689,7 +689,7 @@ struct ShaderCreateInfo {
ImageType type,
StringRefNull name,
Frequency freq = Frequency::PASS,
eGPUSamplerState sampler = (eGPUSamplerState)-1)
GPUSamplerState sampler = GPUSamplerState::internal_sampler())
{
Resource res(Resource::BindType::SAMPLER, slot);
res.sampler.type = type;

View File

@ -149,7 +149,7 @@ class StateManager {
virtual void issue_barrier(eGPUBarrier barrier_bits) = 0;
virtual void texture_bind(Texture *tex, eGPUSamplerState sampler, int unit) = 0;
virtual void texture_bind(Texture *tex, GPUSamplerState sampler, int unit) = 0;
virtual void texture_unbind(Texture *tex) = 0;
virtual void texture_unbind_all() = 0;

View File

@ -64,7 +64,7 @@ bool Texture::init_1D(int w, int layers, int mip_len, eGPUTextureFormat format)
format_flag_ = to_format_flag(format);
type_ = (layers > 0) ? GPU_TEXTURE_1D_ARRAY : GPU_TEXTURE_1D;
if ((format_flag_ & (GPU_FORMAT_DEPTH_STENCIL | GPU_FORMAT_INTEGER)) == 0) {
sampler_state = GPU_SAMPLER_FILTER;
sampler_state.filtering = GPU_SAMPLER_FILTERING_LINEAR;
}
return this->init_internal();
}
@ -80,7 +80,7 @@ bool Texture::init_2D(int w, int h, int layers, int mip_len, eGPUTextureFormat f
format_flag_ = to_format_flag(format);
type_ = (layers > 0) ? GPU_TEXTURE_2D_ARRAY : GPU_TEXTURE_2D;
if ((format_flag_ & (GPU_FORMAT_DEPTH_STENCIL | GPU_FORMAT_INTEGER)) == 0) {
sampler_state = GPU_SAMPLER_FILTER;
sampler_state.filtering = GPU_SAMPLER_FILTERING_LINEAR;
}
return this->init_internal();
}
@ -96,7 +96,7 @@ bool Texture::init_3D(int w, int h, int d, int mip_len, eGPUTextureFormat format
format_flag_ = to_format_flag(format);
type_ = GPU_TEXTURE_3D;
if ((format_flag_ & (GPU_FORMAT_DEPTH_STENCIL | GPU_FORMAT_INTEGER)) == 0) {
sampler_state = GPU_SAMPLER_FILTER;
sampler_state.filtering = GPU_SAMPLER_FILTERING_LINEAR;
}
return this->init_internal();
}
@ -112,7 +112,7 @@ bool Texture::init_cubemap(int w, int layers, int mip_len, eGPUTextureFormat for
format_flag_ = to_format_flag(format);
type_ = (layers > 0) ? GPU_TEXTURE_CUBE_ARRAY : GPU_TEXTURE_CUBE;
if ((format_flag_ & (GPU_FORMAT_DEPTH_STENCIL | GPU_FORMAT_INTEGER)) == 0) {
sampler_state = GPU_SAMPLER_FILTER;
sampler_state.filtering = GPU_SAMPLER_FILTERING_LINEAR;
}
return this->init_internal();
}
@ -548,10 +548,10 @@ void GPU_unpack_row_length_set(uint len)
/* ------ Binding ------ */
void GPU_texture_bind_ex(GPUTexture *tex_, eGPUSamplerState state, int unit)
void GPU_texture_bind_ex(GPUTexture *tex_, GPUSamplerState state, int unit)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
state = (state >= GPU_SAMPLER_MAX) ? tex->sampler_state : state;
state = (state.type == GPU_SAMPLER_STATE_TYPE_INTERNAL) ? tex->sampler_state : state;
Context::get()->state_manager->texture_bind(tex, state, unit);
}
@ -604,7 +604,10 @@ void GPU_texture_compare_mode(GPUTexture *tex_, bool use_compare)
Texture *tex = reinterpret_cast<Texture *>(tex_);
/* Only depth formats does support compare mode. */
BLI_assert(!(use_compare) || (tex->format_flag_get() & GPU_FORMAT_DEPTH));
SET_FLAG_FROM_TEST(tex->sampler_state, use_compare, GPU_SAMPLER_COMPARE);
tex->sampler_state.type = use_compare ? GPU_SAMPLER_STATE_TYPE_CUSTOM :
GPU_SAMPLER_STATE_TYPE_PARAMETERS;
tex->sampler_state.custom_type = GPU_SAMPLER_CUSTOM_COMPARE;
}
void GPU_texture_filter_mode(GPUTexture *tex_, bool use_filter)
@ -613,7 +616,7 @@ void GPU_texture_filter_mode(GPUTexture *tex_, bool use_filter)
/* Stencil and integer format does not support filtering. */
BLI_assert(!(use_filter) ||
!(tex->format_flag_get() & (GPU_FORMAT_STENCIL | GPU_FORMAT_INTEGER)));
SET_FLAG_FROM_TEST(tex->sampler_state, use_filter, GPU_SAMPLER_FILTER);
tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter);
}
void GPU_texture_mipmap_mode(GPUTexture *tex_, bool use_mipmap, bool use_filter)
@ -622,8 +625,8 @@ void GPU_texture_mipmap_mode(GPUTexture *tex_, bool use_mipmap, bool use_filter)
/* Stencil and integer format does not support filtering. */
BLI_assert(!(use_filter || use_mipmap) ||
!(tex->format_flag_get() & (GPU_FORMAT_STENCIL | GPU_FORMAT_INTEGER)));
SET_FLAG_FROM_TEST(tex->sampler_state, use_mipmap, GPU_SAMPLER_MIPMAP);
SET_FLAG_FROM_TEST(tex->sampler_state, use_filter, GPU_SAMPLER_FILTER);
tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_MIPMAP, use_mipmap);
tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_LINEAR, use_filter);
}
void GPU_texture_anisotropic_filter(GPUTexture *tex_, bool use_aniso)
@ -632,14 +635,26 @@ void GPU_texture_anisotropic_filter(GPUTexture *tex_, bool use_aniso)
/* Stencil and integer format does not support filtering. */
BLI_assert(!(use_aniso) ||
!(tex->format_flag_get() & (GPU_FORMAT_STENCIL | GPU_FORMAT_INTEGER)));
SET_FLAG_FROM_TEST(tex->sampler_state, use_aniso, GPU_SAMPLER_ANISO);
tex->sampler_state.set_filtering_flag_from_test(GPU_SAMPLER_FILTERING_ANISOTROPIC, use_aniso);
}
void GPU_texture_wrap_mode(GPUTexture *tex_, bool use_repeat, bool use_clamp)
void GPU_texture_extend_mode_x(GPUTexture *tex_, GPUSamplerExtendMode extend_mode)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
SET_FLAG_FROM_TEST(tex->sampler_state, use_repeat, GPU_SAMPLER_REPEAT);
SET_FLAG_FROM_TEST(tex->sampler_state, !use_clamp, GPU_SAMPLER_CLAMP_BORDER);
tex->sampler_state.extend_x = extend_mode;
}
void GPU_texture_extend_mode_y(GPUTexture *tex_, GPUSamplerExtendMode extend_mode)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
tex->sampler_state.extend_yz = extend_mode;
}
void GPU_texture_extend_mode(GPUTexture *tex_, GPUSamplerExtendMode extend_mode)
{
Texture *tex = reinterpret_cast<Texture *>(tex_);
tex->sampler_state.extend_x = extend_mode;
tex->sampler_state.extend_yz = extend_mode;
}
void GPU_texture_swizzle_set(GPUTexture *tex, const char swizzle[4])

View File

@ -83,7 +83,7 @@ ENUM_OPERATORS(eGPUSamplerFormat, GPU_SAMPLER_TYPE_UINT)
class Texture {
public:
/** Internal Sampler state. */
eGPUSamplerState sampler_state = GPU_SAMPLER_DEFAULT;
GPUSamplerState sampler_state = GPUSamplerState::default_sampler();
/** Reference counter. */
int refcount = 1;
/** Width & Height (of source data), optional. */

View File

@ -685,8 +685,12 @@ class MTLContext : public Context {
MTLContextTextureUtils texture_utils_;
/* Texture Samplers. */
/* Cache of generated #MTLSamplerState objects based on permutations of `eGPUSamplerState`. */
id<MTLSamplerState> sampler_state_cache_[GPU_SAMPLER_MAX];
/* Cache of generated #MTLSamplerState objects based on permutations of the members of
* `GPUSamplerState`. */
id<MTLSamplerState> sampler_state_cache_[GPU_SAMPLER_EXTEND_MODES_COUNT]
[GPU_SAMPLER_EXTEND_MODES_COUNT]
[GPU_SAMPLER_FILTERING_TYPES_COUNT];
id<MTLSamplerState> custom_sampler_state_cache_[GPU_SAMPLER_CUSTOM_TYPES_COUNT];
id<MTLSamplerState> default_sampler_state_ = nil;
/* When texture sampler count exceeds the resource bind limit, an
@ -771,9 +775,8 @@ class MTLContext : public Context {
void sampler_bind(MTLSamplerState, uint sampler_unit);
void texture_unbind(gpu::MTLTexture *mtl_texture);
void texture_unbind_all();
void sampler_state_cache_init();
id<MTLSamplerState> get_sampler_from_state(MTLSamplerState state);
id<MTLSamplerState> generate_sampler_from_state(MTLSamplerState state);
id<MTLSamplerState> generate_icon_sampler();
id<MTLSamplerState> get_default_sampler_state();
/* Metal Context pipeline state. */

View File

@ -223,13 +223,7 @@ MTLContext::MTLContext(void *ghost_window, void *ghost_context)
}
/* Initialize samplers. */
for (uint i = 0; i < GPU_SAMPLER_ICON; i++) {
MTLSamplerState state;
state.state = static_cast<eGPUSamplerState>(i);
sampler_state_cache_[i] = this->generate_sampler_from_state(state);
}
/* Special sampler for icons. */
sampler_state_cache_[GPU_SAMPLER_ICON] = this->generate_icon_sampler();
this->sampler_state_cache_init();
}
MTLContext::~MTLContext()
@ -278,10 +272,22 @@ MTLContext::~MTLContext()
this->free_dummy_resources();
/* Release Sampler States. */
for (int i = 0; i < GPU_SAMPLER_MAX; i++) {
if (sampler_state_cache_[i] != nil) {
[sampler_state_cache_[i] release];
sampler_state_cache_[i] = nil;
for (int extend_yz_i = 0; extend_yz_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_yz_i++) {
for (int extend_x_i = 0; extend_x_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_x_i++) {
for (int filtering_i = 0; filtering_i < GPU_SAMPLER_FILTERING_TYPES_COUNT; filtering_i++) {
if (sampler_state_cache_[extend_yz_i][extend_x_i][filtering_i] != nil) {
[sampler_state_cache_[extend_yz_i][extend_x_i][filtering_i] release];
sampler_state_cache_[extend_yz_i][extend_x_i][filtering_i] = nil;
}
}
}
}
/* Release Custom Sampler States. */
for (int i = 0; i < GPU_SAMPLER_CUSTOM_TYPES_COUNT; i++) {
if (custom_sampler_state_cache_[i] != nil) {
[custom_sampler_state_cache_[i] release];
custom_sampler_state_cache_[i] = nil;
}
}
@ -2021,73 +2027,118 @@ void MTLContext::texture_unbind_all()
id<MTLSamplerState> MTLContext::get_sampler_from_state(MTLSamplerState sampler_state)
{
BLI_assert((uint)sampler_state >= 0 && ((uint)sampler_state) < GPU_SAMPLER_MAX);
return sampler_state_cache_[(uint)sampler_state];
/* Internal sampler states are signal values and do not correspond to actual samplers. */
BLI_assert(sampler_state.state.type != GPU_SAMPLER_STATE_TYPE_INTERNAL);
if (sampler_state.state.type == GPU_SAMPLER_STATE_TYPE_CUSTOM) {
return custom_sampler_state_cache_[sampler_state.state.custom_type];
}
return sampler_state_cache_[sampler_state.state.extend_yz][sampler_state.state.extend_x]
[sampler_state.state.filtering];
}
id<MTLSamplerState> MTLContext::generate_sampler_from_state(MTLSamplerState sampler_state)
/** A function that maps GPUSamplerExtendMode values to their Metal enum counterparts. */
static inline MTLSamplerAddressMode to_mtl_type(GPUSamplerExtendMode wrap_mode)
{
MTLSamplerDescriptor *descriptor = [[MTLSamplerDescriptor alloc] init];
descriptor.normalizedCoordinates = true;
MTLSamplerAddressMode clamp_type = (sampler_state.state & GPU_SAMPLER_CLAMP_BORDER) ?
MTLSamplerAddressModeClampToBorderColor :
MTLSamplerAddressModeClampToEdge;
MTLSamplerAddressMode repeat_type = (sampler_state.state & GPU_SAMPLER_MIRROR_REPEAT) ?
MTLSamplerAddressModeMirrorRepeat :
MTLSamplerAddressModeRepeat;
descriptor.rAddressMode = (sampler_state.state & GPU_SAMPLER_REPEAT_R) ? repeat_type :
clamp_type;
descriptor.sAddressMode = (sampler_state.state & GPU_SAMPLER_REPEAT_S) ? repeat_type :
clamp_type;
descriptor.tAddressMode = (sampler_state.state & GPU_SAMPLER_REPEAT_T) ? repeat_type :
clamp_type;
descriptor.borderColor = MTLSamplerBorderColorTransparentBlack;
descriptor.minFilter = (sampler_state.state & GPU_SAMPLER_FILTER) ?
MTLSamplerMinMagFilterLinear :
MTLSamplerMinMagFilterNearest;
descriptor.magFilter = (sampler_state.state & GPU_SAMPLER_FILTER) ?
MTLSamplerMinMagFilterLinear :
MTLSamplerMinMagFilterNearest;
descriptor.mipFilter = (sampler_state.state & GPU_SAMPLER_MIPMAP) ?
MTLSamplerMipFilterLinear :
MTLSamplerMipFilterNotMipmapped;
descriptor.lodMinClamp = -1000;
descriptor.lodMaxClamp = 1000;
float aniso_filter = max_ff(16, U.anisotropic_filter);
descriptor.maxAnisotropy = (sampler_state.state & GPU_SAMPLER_MIPMAP) ? aniso_filter : 1;
descriptor.compareFunction = (sampler_state.state & GPU_SAMPLER_COMPARE) ?
MTLCompareFunctionLessEqual :
MTLCompareFunctionAlways;
descriptor.supportArgumentBuffers = true;
id<MTLSamplerState> state = [this->device newSamplerStateWithDescriptor:descriptor];
sampler_state_cache_[(uint)sampler_state] = state;
BLI_assert(state != nil);
[descriptor autorelease];
return state;
switch (wrap_mode) {
case GPU_SAMPLER_EXTEND_MODE_EXTEND:
return MTLSamplerAddressModeClampToEdge;
case GPU_SAMPLER_EXTEND_MODE_REPEAT:
return MTLSamplerAddressModeRepeat;
case GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT:
return MTLSamplerAddressModeMirrorRepeat;
case GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER:
return MTLSamplerAddressModeClampToBorderColor;
default:
BLI_assert_unreachable();
return MTLSamplerAddressModeClampToEdge;
}
}
id<MTLSamplerState> MTLContext::generate_icon_sampler()
void MTLContext::sampler_state_cache_init()
{
MTLSamplerDescriptor *descriptor = [[MTLSamplerDescriptor alloc] init];
descriptor.minFilter = MTLSamplerMinMagFilterLinear;
descriptor.magFilter = MTLSamplerMinMagFilterLinear;
descriptor.mipFilter = MTLSamplerMipFilterNearest;
descriptor.lodMinClamp = 0;
descriptor.lodMaxClamp = 1;
for (int extend_yz_i = 0; extend_yz_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_yz_i++) {
const GPUSamplerExtendMode extend_yz = static_cast<GPUSamplerExtendMode>(extend_yz_i);
const MTLSamplerAddressMode extend_t = to_mtl_type(extend_yz);
id<MTLSamplerState> icon_state = [this->device newSamplerStateWithDescriptor:descriptor];
BLI_assert(icon_state != nil);
[descriptor autorelease];
return icon_state;
for (int extend_x_i = 0; extend_x_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_x_i++) {
const GPUSamplerExtendMode extend_x = static_cast<GPUSamplerExtendMode>(extend_x_i);
const MTLSamplerAddressMode extend_s = to_mtl_type(extend_x);
for (int filtering_i = 0; filtering_i < GPU_SAMPLER_FILTERING_TYPES_COUNT; filtering_i++) {
const GPUSamplerFiltering filtering = GPUSamplerFiltering(filtering_i);
MTLSamplerDescriptor *descriptor = [[MTLSamplerDescriptor alloc] init];
descriptor.normalizedCoordinates = true;
descriptor.sAddressMode = extend_s;
descriptor.tAddressMode = extend_t;
descriptor.rAddressMode = extend_t;
descriptor.borderColor = MTLSamplerBorderColorTransparentBlack;
descriptor.minFilter = (filtering & GPU_SAMPLER_FILTERING_LINEAR) ?
MTLSamplerMinMagFilterLinear :
MTLSamplerMinMagFilterNearest;
descriptor.magFilter = (filtering & GPU_SAMPLER_FILTERING_LINEAR) ?
MTLSamplerMinMagFilterLinear :
MTLSamplerMinMagFilterNearest;
descriptor.mipFilter = (filtering & GPU_SAMPLER_FILTERING_MIPMAP) ?
MTLSamplerMipFilterLinear :
MTLSamplerMipFilterNotMipmapped;
descriptor.lodMinClamp = -1000;
descriptor.lodMaxClamp = 1000;
float aniso_filter = max_ff(16, U.anisotropic_filter);
descriptor.maxAnisotropy = (filtering & GPU_SAMPLER_FILTERING_MIPMAP) ? aniso_filter : 1;
descriptor.compareFunction = MTLCompareFunctionAlways;
descriptor.supportArgumentBuffers = true;
id<MTLSamplerState> state = [this->device newSamplerStateWithDescriptor:descriptor];
sampler_state_cache_[extend_yz_i][extend_x_i][filtering_i] = state;
BLI_assert(state != nil);
[descriptor autorelease];
}
}
}
/* Compare sampler for depth textures. */
{
MTLSamplerDescriptor *descriptor = [[MTLSamplerDescriptor alloc] init];
descriptor.minFilter = MTLSamplerMinMagFilterLinear;
descriptor.magFilter = MTLSamplerMinMagFilterLinear;
descriptor.compareFunction = MTLCompareFunctionLessEqual;
descriptor.lodMinClamp = -1000;
descriptor.lodMaxClamp = 1000;
descriptor.supportArgumentBuffers = true;
id<MTLSamplerState> compare_state = [this->device newSamplerStateWithDescriptor:descriptor];
custom_sampler_state_cache_[GPU_SAMPLER_CUSTOM_COMPARE] = compare_state;
BLI_assert(compare_state != nil);
[descriptor autorelease];
}
/* Custom sampler for icons. The icon texture is sampled within the shader using a -0.5f LOD
* bias. */
{
MTLSamplerDescriptor *descriptor = [[MTLSamplerDescriptor alloc] init];
descriptor.minFilter = MTLSamplerMinMagFilterLinear;
descriptor.magFilter = MTLSamplerMinMagFilterLinear;
descriptor.mipFilter = MTLSamplerMipFilterNearest;
descriptor.lodMinClamp = 0;
descriptor.lodMaxClamp = 1;
id<MTLSamplerState> icon_state = [this->device newSamplerStateWithDescriptor:descriptor];
custom_sampler_state_cache_[GPU_SAMPLER_CUSTOM_ICON] = icon_state;
BLI_assert(icon_state != nil);
[descriptor autorelease];
}
}
id<MTLSamplerState> MTLContext::get_default_sampler_state()
{
if (default_sampler_state_ == nil) {
default_sampler_state_ = this->get_sampler_from_state(DEFAULT_SAMPLER_STATE);
default_sampler_state_ = this->get_sampler_from_state({GPUSamplerState::default_sampler()});
}
return default_sampler_state_;
}
@ -2231,4 +2282,4 @@ void present(MTLRenderPassDescriptor *blit_descriptor,
/** \} */
} // blender::gpu
} // namespace blender::gpu

View File

@ -43,7 +43,7 @@ class MTLStateManager : public StateManager {
void issue_barrier(eGPUBarrier barrier_bits) override;
void texture_bind(Texture *tex, eGPUSamplerState sampler, int unit) override;
void texture_bind(Texture *tex, GPUSamplerState sampler, int unit) override;
void texture_unbind(Texture *tex) override;
void texture_unbind_all() override;

View File

@ -629,7 +629,7 @@ void MTLStateManager::texture_unpack_row_length_set(uint len)
ctx->pipeline_state.unpack_row_length = len;
}
void MTLStateManager::texture_bind(Texture *tex_, eGPUSamplerState sampler_type, int unit)
void MTLStateManager::texture_bind(Texture *tex_, GPUSamplerState sampler_type, int unit)
{
BLI_assert(tex_);
gpu::MTLTexture *mtl_tex = static_cast<gpu::MTLTexture *>(tex_);
@ -674,7 +674,7 @@ void MTLStateManager::texture_unbind_all()
void MTLStateManager::image_bind(Texture *tex_, int unit)
{
this->texture_bind(tex_, GPU_SAMPLER_DEFAULT, unit);
this->texture_bind(tex_, GPUSamplerState::default_sampler(), unit);
}
void MTLStateManager::image_unbind(Texture *tex_)
@ -689,4 +689,4 @@ void MTLStateManager::image_unbind_all()
/** \} */
} // blender::gpu
} // namespace blender::gpu

View File

@ -126,7 +126,7 @@ static const int MTL_MAX_FBO_ATTACHED = 16;
/* Samplers */
struct MTLSamplerState {
eGPUSamplerState state;
GPUSamplerState state;
/* Mip min and mip max on sampler state always the same.
* Level range now controlled with textureView to be consistent with GL baseLevel. */
@ -138,16 +138,28 @@ struct MTLSamplerState {
operator uint() const
{
return uint(state);
uint integer_representation = 0;
integer_representation |= this->state.filtering;
integer_representation |= this->state.extend_x << 8;
integer_representation |= this->state.extend_yz << 12;
integer_representation |= this->state.custom_type << 16;
integer_representation |= this->state.type << 24;
return integer_representation;
}
operator uint64_t() const
{
return uint64_t(state);
uint64_t integer_representation = 0;
integer_representation |= this->state.filtering;
integer_representation |= this->state.extend_x << 8;
integer_representation |= this->state.extend_yz << 12;
integer_representation |= this->state.custom_type << 16;
integer_representation |= this->state.type << 24;
return integer_representation;
}
};
const MTLSamplerState DEFAULT_SAMPLER_STATE = {GPU_SAMPLER_DEFAULT /*, 0, 9999*/};
const MTLSamplerState DEFAULT_SAMPLER_STATE = {GPUSamplerState::default_sampler() /*, 0, 9999*/};
class MTLTexture : public Texture {
friend class MTLContext;

View File

@ -586,7 +586,7 @@ void gpu::MTLTexture::update_sub_depth_2d(
GPU_TEXTURE_USAGE_ATTACHMENT,
nullptr);
GPU_texture_filter_mode(r32_tex_tmp, false);
GPU_texture_wrap_mode(r32_tex_tmp, false, true);
GPU_texture_extend_mode(r32_tex_tmp, GPU_SAMPLER_EXTEND_MODE_EXTEND);
gpu::MTLTexture *mtl_tex = static_cast<gpu::MTLTexture *>(unwrap(r32_tex_tmp));
mtl_tex->update_sub(mip, offset, extent, type, data);

View File

@ -249,6 +249,12 @@ static void detect_workarounds()
return;
}
/* Only use main context when running inside renderdoc. Renderdoc requires that all calls are
* from the same context.*/
if (G.debug & G_DEBUG_GPU_RENDERDOC) {
GCaps.use_main_context_workaround = true;
}
/* Limit support for GL_ARB_base_instance to OpenGL 4.0 and higher. NVIDIA Quadro FX 4800
* (TeraScale) report that they support GL_ARB_base_instance, but the driver does not support
* GLEW_ARB_draw_indirect as it has an OpenGL3 context what also matches the minimum needed

View File

@ -389,10 +389,11 @@ bool GLContext::debug_capture_begin()
bool GLBackend::debug_capture_begin()
{
#ifdef WITH_RENDERDOC
return renderdoc_.start_frame_capture(nullptr, nullptr);
#else
return false;
if (G.debug & G_DEBUG_GPU_RENDERDOC) {
return renderdoc_.start_frame_capture(nullptr, nullptr);
}
#endif
return false;
}
void GLContext::debug_capture_end()
@ -403,7 +404,9 @@ void GLContext::debug_capture_end()
void GLBackend::debug_capture_end()
{
#ifdef WITH_RENDERDOC
renderdoc_.end_frame_capture(nullptr, nullptr);
if (G.debug & G_DEBUG_GPU_RENDERDOC) {
renderdoc_.end_frame_capture(nullptr, nullptr);
}
#endif
}

View File

@ -444,7 +444,7 @@ void GLStateManager::set_blend(const eGPUBlend value)
/** \name Texture State Management
* \{ */
void GLStateManager::texture_bind(Texture *tex_, eGPUSamplerState sampler_type, int unit)
void GLStateManager::texture_bind(Texture *tex_, GPUSamplerState sampler_state, int unit)
{
BLI_assert(unit < GPU_max_textures());
GLTexture *tex = static_cast<GLTexture *>(tex_);
@ -453,12 +453,12 @@ void GLStateManager::texture_bind(Texture *tex_, eGPUSamplerState sampler_type,
}
/* Eliminate redundant binds. */
if ((textures_[unit] == tex->tex_id_) &&
(samplers_[unit] == GLTexture::samplers_[sampler_type])) {
(samplers_[unit] == GLTexture::get_sampler(sampler_state))) {
return;
}
targets_[unit] = tex->target_;
textures_[unit] = tex->tex_id_;
samplers_[unit] = GLTexture::samplers_[sampler_type];
samplers_[unit] = GLTexture::get_sampler(sampler_state);
tex->is_bound_ = true;
dirty_texture_binds_ |= 1ULL << unit;
}

View File

@ -64,7 +64,7 @@ class GLStateManager : public StateManager {
void issue_barrier(eGPUBarrier barrier_bits) override;
void texture_bind(Texture *tex, eGPUSamplerState sampler, int unit) override;
void texture_bind(Texture *tex, GPUSamplerState sampler, int unit) override;
/**
* Bind the texture to slot 0 for editing purpose. Used by legacy pipeline.
*/

View File

@ -5,6 +5,10 @@
* \ingroup gpu
*/
#include <string>
#include "BLI_assert.h"
#include "DNA_userdef_types.h"
#include "GPU_capabilities.h"
@ -537,60 +541,94 @@ struct GPUFrameBuffer *GLTexture::framebuffer_get()
/** \name Sampler objects
* \{ */
GLuint GLTexture::samplers_[GPU_SAMPLER_MAX] = {0};
/** A function that maps GPUSamplerExtendMode values to their OpenGL enum counterparts. */
static inline GLenum to_gl(GPUSamplerExtendMode extend_mode)
{
switch (extend_mode) {
case GPU_SAMPLER_EXTEND_MODE_EXTEND:
return GL_CLAMP_TO_EDGE;
case GPU_SAMPLER_EXTEND_MODE_REPEAT:
return GL_REPEAT;
case GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT:
return GL_MIRRORED_REPEAT;
case GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER:
return GL_CLAMP_TO_BORDER;
default:
BLI_assert_unreachable();
return GL_CLAMP_TO_EDGE;
}
}
GLuint GLTexture::samplers_state_cache_[GPU_SAMPLER_EXTEND_MODES_COUNT]
[GPU_SAMPLER_EXTEND_MODES_COUNT]
[GPU_SAMPLER_FILTERING_TYPES_COUNT] = {};
GLuint GLTexture::custom_samplers_state_cache_[GPU_SAMPLER_CUSTOM_TYPES_COUNT] = {};
void GLTexture::samplers_init()
{
glGenSamplers(GPU_SAMPLER_MAX, samplers_);
for (int i = 0; i < GPU_SAMPLER_ICON; i++) {
eGPUSamplerState state = static_cast<eGPUSamplerState>(i);
GLenum clamp_type = (state & GPU_SAMPLER_CLAMP_BORDER) ? GL_CLAMP_TO_BORDER : GL_CLAMP_TO_EDGE;
GLenum repeat_type = (state & GPU_SAMPLER_MIRROR_REPEAT) ? GL_MIRRORED_REPEAT : GL_REPEAT;
GLenum wrap_s = (state & GPU_SAMPLER_REPEAT_S) ? repeat_type : clamp_type;
GLenum wrap_t = (state & GPU_SAMPLER_REPEAT_T) ? repeat_type : clamp_type;
GLenum wrap_r = (state & GPU_SAMPLER_REPEAT_R) ? repeat_type : clamp_type;
GLenum mag_filter = (state & GPU_SAMPLER_FILTER) ? GL_LINEAR : GL_NEAREST;
GLenum min_filter = (state & GPU_SAMPLER_FILTER) ?
((state & GPU_SAMPLER_MIPMAP) ? GL_LINEAR_MIPMAP_LINEAR : GL_LINEAR) :
((state & GPU_SAMPLER_MIPMAP) ? GL_NEAREST_MIPMAP_LINEAR : GL_NEAREST);
GLenum compare_mode = (state & GPU_SAMPLER_COMPARE) ? GL_COMPARE_REF_TO_TEXTURE : GL_NONE;
glGenSamplers(samplers_state_cache_count_, &samplers_state_cache_[0][0][0]);
glSamplerParameteri(samplers_[i], GL_TEXTURE_WRAP_S, wrap_s);
glSamplerParameteri(samplers_[i], GL_TEXTURE_WRAP_T, wrap_t);
glSamplerParameteri(samplers_[i], GL_TEXTURE_WRAP_R, wrap_r);
glSamplerParameteri(samplers_[i], GL_TEXTURE_MIN_FILTER, min_filter);
glSamplerParameteri(samplers_[i], GL_TEXTURE_MAG_FILTER, mag_filter);
glSamplerParameteri(samplers_[i], GL_TEXTURE_COMPARE_MODE, compare_mode);
glSamplerParameteri(samplers_[i], GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
for (int extend_yz_i = 0; extend_yz_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_yz_i++) {
const GPUSamplerExtendMode extend_yz = static_cast<GPUSamplerExtendMode>(extend_yz_i);
const GLenum extend_t = to_gl(extend_yz);
/** Other states are left to default:
* - GL_TEXTURE_BORDER_COLOR is {0, 0, 0, 0}.
* - GL_TEXTURE_MIN_LOD is -1000.
* - GL_TEXTURE_MAX_LOD is 1000.
* - GL_TEXTURE_LOD_BIAS is 0.0f.
*/
for (int extend_x_i = 0; extend_x_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_x_i++) {
const GPUSamplerExtendMode extend_x = static_cast<GPUSamplerExtendMode>(extend_x_i);
const GLenum extend_s = to_gl(extend_x);
char sampler_name[128] = "\0\0";
SNPRINTF(sampler_name,
"%s%s%s%s%s%s%s%s%s%s%s",
(state == GPU_SAMPLER_DEFAULT) ? "_default" : "",
(state & GPU_SAMPLER_FILTER) ? "_filter" : "",
(state & GPU_SAMPLER_MIPMAP) ? "_mipmap" : "",
(state & GPU_SAMPLER_REPEAT) ? "_repeat-" : "",
(state & GPU_SAMPLER_REPEAT_S) ? "S" : "",
(state & GPU_SAMPLER_REPEAT_T) ? "T" : "",
(state & GPU_SAMPLER_REPEAT_R) ? "R" : "",
(state & GPU_SAMPLER_MIRROR_REPEAT) ? "-mirror" : "",
(state & GPU_SAMPLER_CLAMP_BORDER) ? "_clamp_border" : "",
(state & GPU_SAMPLER_COMPARE) ? "_compare" : "",
(state & GPU_SAMPLER_ANISO) ? "_aniso" : "");
debug::object_label(GL_SAMPLER, samplers_[i], &sampler_name[1]);
for (int filtering_i = 0; filtering_i < GPU_SAMPLER_FILTERING_TYPES_COUNT; filtering_i++) {
const GPUSamplerFiltering filtering = GPUSamplerFiltering(filtering_i);
const GLenum mag_filter = (filtering & GPU_SAMPLER_FILTERING_LINEAR) ? GL_LINEAR :
GL_NEAREST;
const GLenum linear_min_filter = (filtering & GPU_SAMPLER_FILTERING_MIPMAP) ?
GL_LINEAR_MIPMAP_LINEAR :
GL_LINEAR;
const GLenum nearest_min_filter = (filtering & GPU_SAMPLER_FILTERING_MIPMAP) ?
GL_NEAREST_MIPMAP_LINEAR :
GL_NEAREST;
const GLenum min_filter = (filtering & GPU_SAMPLER_FILTERING_LINEAR) ? linear_min_filter :
nearest_min_filter;
GLuint sampler = samplers_state_cache_[extend_yz_i][extend_x_i][filtering_i];
glSamplerParameteri(sampler, GL_TEXTURE_WRAP_S, extend_s);
glSamplerParameteri(sampler, GL_TEXTURE_WRAP_T, extend_t);
glSamplerParameteri(sampler, GL_TEXTURE_WRAP_R, extend_t);
glSamplerParameteri(sampler, GL_TEXTURE_MIN_FILTER, min_filter);
glSamplerParameteri(sampler, GL_TEXTURE_MAG_FILTER, mag_filter);
/** Other states are left to default:
* - GL_TEXTURE_BORDER_COLOR is {0, 0, 0, 0}.
* - GL_TEXTURE_MIN_LOD is -1000.
* - GL_TEXTURE_MAX_LOD is 1000.
* - GL_TEXTURE_LOD_BIAS is 0.0f.
*/
const GPUSamplerState sampler_state = {filtering, extend_x, extend_yz};
const std::string sampler_name = sampler_state.to_string();
debug::object_label(GL_SAMPLER, sampler, sampler_name.c_str());
}
}
}
samplers_update();
/* Custom sampler for icons.
* NOTE: The icon texture is sampled within the shader using a -0.5f LOD bias. */
GLuint icon_sampler = samplers_[GPU_SAMPLER_ICON];
glGenSamplers(GPU_SAMPLER_CUSTOM_TYPES_COUNT, custom_samplers_state_cache_);
/* Compare sampler for depth textures. */
GLuint compare_sampler = custom_samplers_state_cache_[GPU_SAMPLER_CUSTOM_COMPARE];
glSamplerParameteri(compare_sampler, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glSamplerParameteri(compare_sampler, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glSamplerParameteri(compare_sampler, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glSamplerParameteri(compare_sampler, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glSamplerParameteri(compare_sampler, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glSamplerParameteri(compare_sampler, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
glSamplerParameteri(compare_sampler, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
debug::object_label(GL_SAMPLER, compare_sampler, "compare");
/* Custom sampler for icons. The icon texture is sampled within the shader using a -0.5f LOD
* bias. */
GLuint icon_sampler = custom_samplers_state_cache_[GPU_SAMPLER_CUSTOM_ICON];
glSamplerParameteri(icon_sampler, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glSamplerParameteri(icon_sampler, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
@ -606,19 +644,41 @@ void GLTexture::samplers_update()
float max_anisotropy = 1.0f;
glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY_EXT, &max_anisotropy);
float aniso_filter = min_ff(max_anisotropy, U.anisotropic_filter);
const float anisotropic_filter = min_ff(max_anisotropy, U.anisotropic_filter);
for (int i = 0; i < GPU_SAMPLER_ICON; i++) {
eGPUSamplerState state = static_cast<eGPUSamplerState>(i);
if ((state & GPU_SAMPLER_ANISO) && (state & GPU_SAMPLER_MIPMAP)) {
glSamplerParameterf(samplers_[i], GL_TEXTURE_MAX_ANISOTROPY_EXT, aniso_filter);
for (int extend_yz_i = 0; extend_yz_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_yz_i++) {
for (int extend_x_i = 0; extend_x_i < GPU_SAMPLER_EXTEND_MODES_COUNT; extend_x_i++) {
for (int filtering_i = 0; filtering_i < GPU_SAMPLER_FILTERING_TYPES_COUNT; filtering_i++) {
const GPUSamplerFiltering filtering = GPUSamplerFiltering(filtering_i);
if ((filtering & GPU_SAMPLER_FILTERING_ANISOTROPIC) &&
(filtering & GPU_SAMPLER_FILTERING_MIPMAP)) {
glSamplerParameterf(samplers_state_cache_[extend_yz_i][extend_x_i][filtering_i],
GL_TEXTURE_MAX_ANISOTROPY_EXT,
anisotropic_filter);
}
}
}
}
}
void GLTexture::samplers_free()
{
glDeleteSamplers(GPU_SAMPLER_MAX, samplers_);
glDeleteSamplers(samplers_state_cache_count_, &samplers_state_cache_[0][0][0]);
glDeleteSamplers(GPU_SAMPLER_CUSTOM_TYPES_COUNT, custom_samplers_state_cache_);
}
GLuint GLTexture::get_sampler(const GPUSamplerState &sampler_state)
{
/* Internal sampler states are signal values and do not correspond to actual samplers. */
BLI_assert(sampler_state.type != GPU_SAMPLER_STATE_TYPE_INTERNAL);
if (sampler_state.type == GPU_SAMPLER_STATE_TYPE_CUSTOM) {
return custom_samplers_state_cache_[sampler_state.custom_type];
}
return samplers_state_cache_[sampler_state.extend_yz][sampler_state.extend_x]
[sampler_state.filtering];
}
/** \} */

View File

@ -23,8 +23,23 @@ class GLTexture : public Texture {
friend class GLFrameBuffer;
private:
/** All samplers states. */
static GLuint samplers_[GPU_SAMPLER_MAX];
/**
* A cache of all possible sampler configurations stored along each of the three axis of
* variation. The first and second variation axis are the wrap mode along x and y axis
* respectively, and the third variation axis is the filtering type. See the samplers_init()
* method for more information.
*/
static GLuint samplers_state_cache_[GPU_SAMPLER_EXTEND_MODES_COUNT]
[GPU_SAMPLER_EXTEND_MODES_COUNT]
[GPU_SAMPLER_FILTERING_TYPES_COUNT];
static const int samplers_state_cache_count_ = GPU_SAMPLER_EXTEND_MODES_COUNT *
GPU_SAMPLER_EXTEND_MODES_COUNT *
GPU_SAMPLER_FILTERING_TYPES_COUNT;
/**
* A cache of all custom sampler configurations described in GPUSamplerCustomType. See the
* samplers_init() method for more information.
*/
static GLuint custom_samplers_state_cache_[GPU_SAMPLER_CUSTOM_TYPES_COUNT];
/** Target to bind the texture to (#GL_TEXTURE_1D, #GL_TEXTURE_2D, etc...). */
GLenum target_ = -1;
@ -70,10 +85,32 @@ class GLTexture : public Texture {
/* TODO(fclem): Legacy. Should be removed at some point. */
uint gl_bindcode_get() const override;
/**
* Pre-generate, setup all possible samplers and cache them in the samplers_state_cache_ and
* custom_samplers_state_cache_ arrays. This is done to avoid the runtime cost associated with
* setting up a sampler at draw time.
*/
static void samplers_init();
/**
* Free the samplers cache generated in samplers_init() method.
*/
static void samplers_free();
/**
* Updates the anisotropic filter parameters of samplers that enables anisotropic filtering. This
* is not done as a one time initialization in samplers_init() method because the user might
* change the anisotropic filtering samples in the user preferences. So it is called in
* samplers_init() method as well as every time the user preferences change.
*/
static void samplers_update();
/**
* Get the handle of the OpenGL sampler that corresponds to the given sampler state.
* The sampler is retrieved from the cached samplers computed in the samplers_init() method.
*/
static GLuint get_sampler(const GPUSamplerState &sampler_state);
protected:
/** Return true on success. */
bool init_internal() override;

View File

@ -8,7 +8,7 @@
void main()
{
/* Sample texture with LOD BIAS. Used instead of custom lod bias in GPU_SAMPLER_ICON. */
/* Sample texture with LOD BIAS. Used instead of custom lod bias in GPU_SAMPLER_CUSTOM_ICON. */
fragColor = texture(image, texCoord_interp, -0.5) * finalColor;
#ifdef DO_CORNER_MASKING

View File

@ -18,7 +18,7 @@ namespace blender::gpu {
void GPUTest::SetUp()
{
prev_g_debug_ = G.debug;
G.debug |= G_DEBUG_GPU;
G.debug |= G_DEBUG_GPU | G_DEBUG_GPU_RENDERDOC;
CLG_init();
GPU_backend_type_selection_set(gpu_backend_type);

View File

@ -330,7 +330,7 @@ void VKFrameBuffer::render_pass_create()
VkSubpassDescription subpass = {};
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = color_attachment_len;
subpass.pColorAttachments = attachment_references.begin();
subpass.pColorAttachments = attachment_references.data();
if (has_depth_attachment) {
subpass.pDepthStencilAttachment = &attachment_references[depth_location];
}
@ -351,7 +351,7 @@ void VKFrameBuffer::render_pass_create()
framebuffer_create_info.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
framebuffer_create_info.renderPass = vk_render_pass_;
framebuffer_create_info.attachmentCount = attachment_len;
framebuffer_create_info.pAttachments = image_views.begin();
framebuffer_create_info.pAttachments = image_views.data();
framebuffer_create_info.width = width_;
framebuffer_create_info.height = height_;
framebuffer_create_info.layers = 1;

View File

@ -15,6 +15,8 @@
#include "BLI_string_utils.h"
#include "BLI_vector.hh"
#include "BKE_global.h"
using namespace blender::gpu::shader;
extern "C" char datatoc_glsl_shader_defines_glsl[];
@ -360,7 +362,7 @@ static void print_resource(std::ostream &os,
array_offset = res.uniformbuf.name.find_first_of("[");
name_no_array = (array_offset == -1) ? res.uniformbuf.name :
StringRef(res.uniformbuf.name.c_str(), array_offset);
os << "uniform " << name_no_array << " { " << res.uniformbuf.type_name << " _"
os << "uniform _" << name_no_array << " { " << res.uniformbuf.type_name << " "
<< res.uniformbuf.name << "; };\n";
break;
case ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER:
@ -368,8 +370,8 @@ static void print_resource(std::ostream &os,
name_no_array = (array_offset == -1) ? res.storagebuf.name :
StringRef(res.storagebuf.name.c_str(), array_offset);
print_qualifier(os, res.storagebuf.qualifiers);
os << "buffer ";
os << name_no_array << " { " << res.storagebuf.type_name << " _" << res.storagebuf.name
os << "buffer _";
os << name_no_array << " { " << res.storagebuf.type_name << " " << res.storagebuf.name
<< "; };\n";
break;
}
@ -383,29 +385,6 @@ static void print_resource(std::ostream &os,
print_resource(os, location, res);
}
static void print_resource_alias(std::ostream &os, const ShaderCreateInfo::Resource &res)
{
int64_t array_offset;
StringRef name_no_array;
switch (res.bind_type) {
case ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER:
array_offset = res.uniformbuf.name.find_first_of("[");
name_no_array = (array_offset == -1) ? res.uniformbuf.name :
StringRef(res.uniformbuf.name.c_str(), array_offset);
os << "#define " << name_no_array << " (_" << name_no_array << ")\n";
break;
case ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER:
array_offset = res.storagebuf.name.find_first_of("[");
name_no_array = (array_offset == -1) ? res.storagebuf.name :
StringRef(res.storagebuf.name.c_str(), array_offset);
os << "#define " << name_no_array << " (_" << name_no_array << ")\n";
break;
default:
break;
}
}
inline int get_location_count(const Type &type)
{
if (type == shader::Type::MAT4) {
@ -546,6 +525,10 @@ Vector<uint32_t> VKShader::compile_glsl_to_spirv(Span<const char *> sources,
shaderc::Compiler &compiler = backend.get_shaderc_compiler();
shaderc::CompileOptions options;
options.SetOptimizationLevel(shaderc_optimization_level_performance);
if (G.debug & G_DEBUG_GPU_RENDERDOC) {
options.SetOptimizationLevel(shaderc_optimization_level_zero);
options.SetGenerateDebugInfo();
}
shaderc::SpvCompilationResult module = compiler.CompileGlslToSpv(
combined_sources, stage, name, options);
@ -1019,17 +1002,11 @@ std::string VKShader::resources_declare(const shader::ShaderCreateInfo &info) co
for (const ShaderCreateInfo::Resource &res : info.pass_resources_) {
print_resource(ss, interface, res);
}
for (const ShaderCreateInfo::Resource &res : info.pass_resources_) {
print_resource_alias(ss, res);
}
ss << "\n/* Batch Resources. */\n";
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
print_resource(ss, interface, res);
}
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
print_resource_alias(ss, res);
}
/* Push constants. */
const VKPushConstants::Layout &push_constants_layout = interface.push_constants_layout_get();

View File

@ -22,7 +22,7 @@ void VKStateManager::issue_barrier(eGPUBarrier /*barrier_bits*/)
command_buffer.submit();
}
void VKStateManager::texture_bind(Texture * /*tex*/, eGPUSamplerState /*sampler*/, int /*unit*/) {}
void VKStateManager::texture_bind(Texture * /*tex*/, GPUSamplerState /*sampler*/, int /*unit*/) {}
void VKStateManager::texture_unbind(Texture * /*tex*/) {}

View File

@ -17,7 +17,7 @@ class VKStateManager : public StateManager {
void issue_barrier(eGPUBarrier barrier_bits) override;
void texture_bind(Texture *tex, eGPUSamplerState sampler, int unit) override;
void texture_bind(Texture *tex, GPUSamplerState sampler, int unit) override;
void texture_unbind(Texture *tex) override;
void texture_unbind_all() override;

View File

@ -670,8 +670,7 @@ typedef struct RenderData {
/** Frames to jump during render/playback. */
int frame_step;
/** Standalone player stereo settings. */ /* XXX deprecated since .2.5 */
short stereomode DNA_DEPRECATED;
char _pad10[2];
/** For the dimensions presets menu. */
short dimensionspreset;

View File

@ -57,18 +57,22 @@ static void requiredDataMask(ModifierData *md, CustomData_MeshMasks *r_cddata_ma
}
}
/**
* The object type check is only needed here in case we have a placeholder
* Object assigned (because the library containing the lattice is missing).
* In other cases it should be impossible to have a type mismatch.
*/
static bool is_disabled(LatticeModifierData *lmd)
{
return !lmd->object || lmd->object->type != OB_LATTICE;
}
static bool isDisabled(const struct Scene *UNUSED(scene),
ModifierData *md,
bool UNUSED(userRenderParams))
{
LatticeModifierData *lmd = (LatticeModifierData *)md;
/* The object type check is only needed here in case we have a placeholder
* object assigned (because the library containing the lattice is missing).
*
* In other cases it should be impossible to have a type mismatch.
*/
return !lmd->object || lmd->object->type != OB_LATTICE;
return is_disabled(lmd);
}
static void foreachIDLink(ModifierData *md, Object *ob, IDWalkFunc walk, void *userData)
@ -81,10 +85,12 @@ static void foreachIDLink(ModifierData *md, Object *ob, IDWalkFunc walk, void *u
static void updateDepsgraph(ModifierData *md, const ModifierUpdateDepsgraphContext *ctx)
{
LatticeModifierData *lmd = (LatticeModifierData *)md;
if (lmd->object != NULL) {
DEG_add_object_relation(ctx->node, lmd->object, DEG_OB_COMP_GEOMETRY, "Lattice Modifier");
DEG_add_object_relation(ctx->node, lmd->object, DEG_OB_COMP_TRANSFORM, "Lattice Modifier");
if (is_disabled(lmd)) {
return;
}
DEG_add_object_relation(ctx->node, lmd->object, DEG_OB_COMP_GEOMETRY, "Lattice Modifier");
DEG_add_object_relation(ctx->node, lmd->object, DEG_OB_COMP_TRANSFORM, "Lattice Modifier");
DEG_add_depends_on_transform_relation(ctx->node, "Lattice Modifier");
}

View File

@ -88,7 +88,7 @@ class DirectionalBlurOperation : public NodeOperation {
input_image.bind_as_texture(shader, "input_tx");
GPU_texture_filter_mode(input_image.texture(), true);
GPU_texture_wrap_mode(input_image.texture(), false, false);
GPU_texture_extend_mode(input_image.texture(), GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER);
const Domain domain = compute_domain();
Result &output_image = get_result("Image");

View File

@ -398,7 +398,8 @@ class GlareOperation : public NodeOperation {
input_streak_result.bind_as_texture(shader, "input_streak_tx");
GPU_texture_filter_mode(input_streak_result.texture(), true);
GPU_texture_wrap_mode(input_streak_result.texture(), false, false);
GPU_texture_extend_mode(input_streak_result.texture(),
GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER);
output_streak_result.bind_as_image(shader, "output_streak_img");
@ -585,11 +586,11 @@ class GlareOperation : public NodeOperation {
small_ghost_result.bind_as_texture(shader, "small_ghost_tx");
GPU_texture_filter_mode(small_ghost_result.texture(), true);
GPU_texture_wrap_mode(small_ghost_result.texture(), false, false);
GPU_texture_extend_mode(small_ghost_result.texture(), GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER);
big_ghost_result.bind_as_texture(shader, "big_ghost_tx");
GPU_texture_filter_mode(big_ghost_result.texture(), true);
GPU_texture_wrap_mode(big_ghost_result.texture(), false, false);
GPU_texture_extend_mode(big_ghost_result.texture(), GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER);
const int2 glare_size = get_glare_size();
Result base_ghost_result = Result::Temporary(ResultType::Color, texture_pool());

View File

@ -102,7 +102,7 @@ class LensDistortionOperation : public NodeOperation {
input_image.bind_as_texture(shader, "input_tx");
GPU_texture_filter_mode(input_image.texture(), true);
GPU_texture_wrap_mode(input_image.texture(), false, false);
GPU_texture_extend_mode(input_image.texture(), GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER);
const Domain domain = compute_domain();
@ -129,7 +129,7 @@ class LensDistortionOperation : public NodeOperation {
input_image.bind_as_texture(shader, "input_tx");
GPU_texture_filter_mode(input_image.texture(), true);
GPU_texture_wrap_mode(input_image.texture(), false, false);
GPU_texture_extend_mode(input_image.texture(), GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER);
const Domain domain = compute_domain();

View File

@ -38,10 +38,12 @@ static int node_shader_gpu_tex_environment(GPUMaterial *mat,
bNode *node_original = node->runtime->original ? node->runtime->original : node;
NodeTexImage *tex_original = (NodeTexImage *)node_original->storage;
ImageUser *iuser = &tex_original->iuser;
eGPUSamplerState sampler = GPU_SAMPLER_REPEAT | GPU_SAMPLER_ANISO | GPU_SAMPLER_FILTER;
GPUSamplerState sampler = {GPU_SAMPLER_FILTERING_LINEAR | GPU_SAMPLER_FILTERING_ANISOTROPIC,
GPU_SAMPLER_EXTEND_MODE_REPEAT,
GPU_SAMPLER_EXTEND_MODE_REPEAT};
/* TODO(@fclem): For now assume mipmap is always enabled. */
if (true) {
sampler |= GPU_SAMPLER_MIPMAP;
sampler.enable_filtering_flag(GPU_SAMPLER_FILTERING_MIPMAP);
}
GPUNodeLink *outalpha;
@ -63,15 +65,17 @@ static int node_shader_gpu_tex_environment(GPUMaterial *mat,
if (tex->projection == SHD_PROJ_EQUIRECTANGULAR) {
GPU_link(mat, "node_tex_environment_equirectangular", in[0].link, &in[0].link);
/* To fix pole issue we clamp the v coordinate. */
sampler &= ~GPU_SAMPLER_REPEAT_T;
sampler.extend_yz = GPU_SAMPLER_EXTEND_MODE_EXTEND;
/* Force the highest mipmap and don't do anisotropic filtering.
* This is to fix the artifact caused by derivatives discontinuity. */
sampler &= ~(GPU_SAMPLER_MIPMAP | GPU_SAMPLER_ANISO);
sampler.disable_filtering_flag(GPU_SAMPLER_FILTERING_MIPMAP |
GPU_SAMPLER_FILTERING_ANISOTROPIC);
}
else {
GPU_link(mat, "node_tex_environment_mirror_ball", in[0].link, &in[0].link);
/* Fix pole issue. */
sampler &= ~GPU_SAMPLER_REPEAT;
sampler.extend_x = GPU_SAMPLER_EXTEND_MODE_EXTEND;
sampler.extend_yz = GPU_SAMPLER_EXTEND_MODE_EXTEND;
}
const char *gpu_fn;
@ -85,7 +89,7 @@ static int node_shader_gpu_tex_environment(GPUMaterial *mat,
gpu_fn = names[0];
break;
case SHD_INTERP_CLOSEST:
sampler &= ~(GPU_SAMPLER_FILTER | GPU_SAMPLER_MIPMAP);
sampler.disable_filtering_flag(GPU_SAMPLER_FILTERING_LINEAR | GPU_SAMPLER_FILTERING_MIPMAP);
gpu_fn = names[0];
break;
default:

View File

@ -52,26 +52,33 @@ static int node_shader_gpu_tex_image(GPUMaterial *mat,
node_shader_gpu_tex_mapping(mat, node, in, out);
eGPUSamplerState sampler_state = GPU_SAMPLER_DEFAULT;
GPUSamplerState sampler_state = GPUSamplerState::default_sampler();
switch (tex->extension) {
case SHD_IMAGE_EXTENSION_EXTEND:
sampler_state.extend_x = GPU_SAMPLER_EXTEND_MODE_EXTEND;
sampler_state.extend_yz = GPU_SAMPLER_EXTEND_MODE_EXTEND;
break;
case SHD_IMAGE_EXTENSION_REPEAT:
sampler_state |= GPU_SAMPLER_REPEAT;
sampler_state.extend_x = GPU_SAMPLER_EXTEND_MODE_REPEAT;
sampler_state.extend_yz = GPU_SAMPLER_EXTEND_MODE_REPEAT;
break;
case SHD_IMAGE_EXTENSION_CLIP:
sampler_state |= GPU_SAMPLER_CLAMP_BORDER;
sampler_state.extend_x = GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER;
sampler_state.extend_yz = GPU_SAMPLER_EXTEND_MODE_CLAMP_TO_BORDER;
break;
case SHD_IMAGE_EXTENSION_MIRROR:
sampler_state |= GPU_SAMPLER_REPEAT | GPU_SAMPLER_MIRROR_REPEAT;
sampler_state.extend_x = GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT;
sampler_state.extend_yz = GPU_SAMPLER_EXTEND_MODE_MIRRORED_REPEAT;
break;
default:
break;
}
if (tex->interpolation != SHD_INTERP_CLOSEST) {
sampler_state |= GPU_SAMPLER_ANISO | GPU_SAMPLER_FILTER;
/* TODO(fclem): For now assume mipmap is always enabled. */
sampler_state |= GPU_SAMPLER_MIPMAP;
sampler_state.filtering = GPU_SAMPLER_FILTERING_ANISOTROPIC | GPU_SAMPLER_FILTERING_LINEAR |
GPU_SAMPLER_FILTERING_MIPMAP;
}
const bool use_cubic = ELEM(tex->interpolation, SHD_INTERP_CUBIC, SHD_INTERP_SMART);
@ -105,7 +112,7 @@ static int node_shader_gpu_tex_image(GPUMaterial *mat,
case SHD_PROJ_SPHERE: {
/* This projection is known to have a derivative discontinuity.
* Hide it by turning off mipmapping. */
sampler_state &= ~GPU_SAMPLER_MIPMAP;
sampler_state.disable_filtering_flag(GPU_SAMPLER_FILTERING_MIPMAP);
GPUNodeLink *gpu_image = GPU_image(mat, ima, iuser, sampler_state);
GPU_link(mat, "point_texco_remap_square", *texco, texco);
GPU_link(mat, "point_map_to_sphere", *texco, texco);
@ -115,7 +122,7 @@ static int node_shader_gpu_tex_image(GPUMaterial *mat,
case SHD_PROJ_TUBE: {
/* This projection is known to have a derivative discontinuity.
* Hide it by turning off mipmapping. */
sampler_state &= ~GPU_SAMPLER_MIPMAP;
sampler_state.disable_filtering_flag(GPU_SAMPLER_FILTERING_MIPMAP);
GPUNodeLink *gpu_image = GPU_image(mat, ima, iuser, sampler_state);
GPU_link(mat, "point_texco_remap_square", *texco, texco);
GPU_link(mat, "point_map_to_tube", *texco, texco);

Some files were not shown because too many files have changed in this diff Show More