Vulkan: Clearing Framebuffer + Scissors #106044

Merged
Jeroen Bakker merged 49 commits from Jeroen-Bakker/blender:vulkan-framebuffer-clear into main 2023-03-28 11:51:45 +02:00
317 changed files with 5912 additions and 2738 deletions
Showing only changes of commit 37ab478537 - Show all commits

View File

@ -1555,6 +1555,9 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
# add_check_c_compiler_flag(C_WARNINGS C_WARN_UNUSED_MACROS -Wunused-macros)
# add_check_cxx_compiler_flag(CXX_WARNINGS CXX_WARN_UNUSED_MACROS -Wunused-macros)
add_check_c_compiler_flag(C_WARNINGS C_WARN_ERROR_UNGUARDED_AVAILABILITY_NEW -Werror=unguarded-availability-new)
add_check_c_compiler_flag(CXX_WARNINGS CXX_WARN_ERROR_UNGUARDED_AVAILABILITY_NEW -Werror=unguarded-availability-new)
# ---------------------
# Suppress Strict Flags

View File

@ -322,6 +322,11 @@ def external_script_initialize_if_needed(args: argparse.Namespace,
blender_url = make_utils.git_get_remote_url(args.git_command, origin_name)
external_url = resolve_external_url(blender_url, repo_name)
# When running `make update` from a freshly cloned fork check whether the fork of the submodule is
# available, If not, switch to the submodule relative to the main blender repository.
if origin_name == "origin" and not make_utils.git_is_remote_repository(args.git_command, external_url):
external_url = resolve_external_url("https://projects.blender.org/blender/blender", repo_name)
call((args.git_command, "clone", "--origin", origin_name, external_url, str(external_dir)))

View File

@ -1,20 +0,0 @@
if NOT exist "%BLENDER_DIR%\source\tools\.git" (
echo Checking out sub-modules
if not "%GIT%" == "" (
"%GIT%" submodule update --init --recursive --progress
if errorlevel 1 goto FAIL
"%GIT%" submodule foreach git checkout main
if errorlevel 1 goto FAIL
"%GIT%" submodule foreach git pull --rebase origin main
if errorlevel 1 goto FAIL
goto EOF
) else (
echo Blender submodules not found, and git not found in path to retrieve them.
goto FAIL
)
)
goto EOF
:FAIL
exit /b 1
:EOF

View File

@ -14,7 +14,7 @@ if NOT EXIST %PYTHON% (
exit /b 1
)
set FORMAT_PATHS=%BLENDER_DIR%\source\tools\utils_maintenance\clang_format_paths.py
set FORMAT_PATHS=%BLENDER_DIR%\tools\utils_maintenance\clang_format_paths.py
REM The formatting script expects clang-format to be in the current PATH.
set PATH=%CF_PATH%;%PATH%

View File

@ -41,7 +41,7 @@ static const char *FRAGMENT_SHADER =
"void main()\n"
"{\n"
" vec4 rgba = texture(image_texture, texCoord_interp);\n"
/* Harcoded Rec.709 gamma, should use OpenColorIO eventually. */
/* Hard-coded Rec.709 gamma, should use OpenColorIO eventually. */
" fragColor = pow(rgba, vec4(0.45, 0.45, 0.45, 1.0));\n"
"}\n\0";

View File

@ -536,12 +536,11 @@ void CUDADevice::free_host(void *shared_pointer)
cuMemFreeHost(shared_pointer);
}
bool CUDADevice::transform_host_pointer(void *&device_pointer, void *&shared_pointer)
void CUDADevice::transform_host_pointer(void *&device_pointer, void *&shared_pointer)
{
CUDAContextScope scope(this);
cuda_assert(cuMemHostGetDevicePointer_v2((CUdeviceptr *)&device_pointer, shared_pointer, 0));
return true;
}
void CUDADevice::copy_host_to_device(void *device_pointer, void *host_pointer, size_t size)

View File

@ -68,7 +68,7 @@ class CUDADevice : public GPUDevice {
virtual void free_device(void *device_pointer) override;
virtual bool alloc_host(void *&shared_pointer, size_t size) override;
virtual void free_host(void *shared_pointer) override;
virtual bool transform_host_pointer(void *&device_pointer, void *&shared_pointer) override;
virtual void transform_host_pointer(void *&device_pointer, void *&shared_pointer) override;
virtual void copy_host_to_device(void *device_pointer, void *host_pointer, size_t size) override;
void mem_alloc(device_memory &mem) override;

View File

@ -648,7 +648,7 @@ GPUDevice::Mem *GPUDevice::generic_alloc(device_memory &mem, size_t pitch_paddin
}
if (mem_alloc_result) {
assert(transform_host_pointer(device_pointer, shared_pointer));
transform_host_pointer(device_pointer, shared_pointer);
map_host_used += size;
status = " in host memory";
}

View File

@ -391,7 +391,7 @@ class GPUDevice : public Device {
/* This function should return device pointer corresponding to shared pointer, which
* is host buffer, allocated in `alloc_host`. The function should `true`, if such
* address transformation is possible and `false` otherwise. */
virtual bool transform_host_pointer(void *&device_pointer, void *&shared_pointer) = 0;
virtual void transform_host_pointer(void *&device_pointer, void *&shared_pointer) = 0;
virtual void copy_host_to_device(void *device_pointer, void *host_pointer, size_t size) = 0;
};

View File

@ -499,12 +499,11 @@ void HIPDevice::free_host(void *shared_pointer)
hipHostFree(shared_pointer);
}
bool HIPDevice::transform_host_pointer(void *&device_pointer, void *&shared_pointer)
void HIPDevice::transform_host_pointer(void *&device_pointer, void *&shared_pointer)
{
HIPContextScope scope(this);
hip_assert(hipHostGetDevicePointer((hipDeviceptr_t *)&device_pointer, shared_pointer, 0));
return true;
}
void HIPDevice::copy_host_to_device(void *device_pointer, void *host_pointer, size_t size)

View File

@ -61,7 +61,7 @@ class HIPDevice : public GPUDevice {
virtual void free_device(void *device_pointer) override;
virtual bool alloc_host(void *&shared_pointer, size_t size) override;
virtual void free_host(void *shared_pointer) override;
virtual bool transform_host_pointer(void *&device_pointer, void *&shared_pointer) override;
virtual void transform_host_pointer(void *&device_pointer, void *&shared_pointer) override;
virtual void copy_host_to_device(void *device_pointer, void *host_pointer, size_t size) override;
void mem_alloc(device_memory &mem) override;

View File

@ -128,9 +128,8 @@ void RenderScheduler::reset(const BufferParams &buffer_params, int num_samples,
state_.resolution_divider = 1;
}
else {
/* NOTE: Divide by 2 because of the way how scheduling works: it advances resolution divider
* first and then initialized render work. */
state_.resolution_divider = start_resolution_divider_ * 2;
state_.user_is_navigating = true;
state_.resolution_divider = start_resolution_divider_;
}
state_.num_rendered_samples = 0;
@ -312,7 +311,21 @@ RenderWork RenderScheduler::get_render_work()
RenderWork render_work;
if (state_.resolution_divider != pixel_size_) {
state_.resolution_divider = max(state_.resolution_divider / 2, pixel_size_);
if (state_.user_is_navigating) {
/* Don't progress the resolution divider as the user is currently navigating in the scene. */
state_.user_is_navigating = false;
}
else {
/* If the resolution divider is greater than or equal to default_start_resolution_divider_,
* drop the resolution divider down to 4. This is so users with slow hardware and thus high
* resolution dividers (E.G. 16), get an update to let them know something is happening
* rather than having to wait for the full 1:1 render to show up. */
state_.resolution_divider = state_.resolution_divider > default_start_resolution_divider_ ?
(4 * pixel_size_) :
1;
}
state_.resolution_divider = max(state_.resolution_divider, pixel_size_);
state_.num_rendered_samples = 0;
state_.last_display_update_sample = -1;
}
@ -1058,10 +1071,16 @@ void RenderScheduler::update_start_resolution_divider()
return;
}
/* Calculate the maximum resolution divider possible while keeping the long axis of the viewport
* above our preferred minimum axis size (128). */
const int long_viewport_axis = max(buffer_params_.width, buffer_params_.height);
const int max_res_divider_for_desired_size = long_viewport_axis / 128;
if (start_resolution_divider_ == 0) {
/* Resolution divider has never been calculated before: use default resolution, so that we have
* somewhat good initial behavior, giving a chance to collect real numbers. */
start_resolution_divider_ = default_start_resolution_divider_;
/* Resolution divider has never been calculated before: start with a high resolution divider so
* that we have a somewhat good initial behavior, giving a chance to collect real numbers. */
start_resolution_divider_ = min(default_start_resolution_divider_,
max_res_divider_for_desired_size);
VLOG_WORK << "Initial resolution divider is " << start_resolution_divider_;
return;
}
@ -1089,8 +1108,7 @@ void RenderScheduler::update_start_resolution_divider()
/* Don't let resolution drop below the desired one. It's better to be slow than provide an
* unreadable viewport render. */
start_resolution_divider_ = min(resolution_divider_for_update,
default_start_resolution_divider_);
start_resolution_divider_ = min(resolution_divider_for_update, max_res_divider_for_desired_size);
VLOG_WORK << "Calculated resolution divider is " << start_resolution_divider_;
}

View File

@ -332,6 +332,8 @@ class RenderScheduler {
};
struct {
bool user_is_navigating = false;
int resolution_divider = 1;
/* Number of rendered samples on top of the start sample. */

View File

@ -741,21 +741,21 @@ if(WITH_CYCLES_DEVICE_ONEAPI)
endif()
# SYCL_CPP_FLAGS is a variable that the user can set to pass extra compiler options
set(sycl_compiler_flags
${CMAKE_CURRENT_SOURCE_DIR}/${SRC_KERNEL_DEVICE_ONEAPI}
-fsycl
-fsycl-unnamed-lambda
-fdelayed-template-parsing
-mllvm -inlinedefault-threshold=250
-mllvm -inlinehint-threshold=350
-fsycl-device-code-split=per_kernel
-fsycl-max-parallel-link-jobs=${SYCL_OFFLINE_COMPILER_PARALLEL_JOBS}
-shared
-DWITH_ONEAPI
-ffast-math
-O2
-o"${cycles_kernel_oneapi_lib}"
-I"${CMAKE_CURRENT_SOURCE_DIR}/.."
${SYCL_CPP_FLAGS}
${CMAKE_CURRENT_SOURCE_DIR}/${SRC_KERNEL_DEVICE_ONEAPI}
-fsycl
-fsycl-unnamed-lambda
-fdelayed-template-parsing
-mllvm -inlinedefault-threshold=250
-mllvm -inlinehint-threshold=350
-fsycl-device-code-split=per_kernel
-fsycl-max-parallel-link-jobs=${SYCL_OFFLINE_COMPILER_PARALLEL_JOBS}
-shared
-DWITH_ONEAPI
-ffast-math
-O2
-o"${cycles_kernel_oneapi_lib}"
-I"${CMAKE_CURRENT_SOURCE_DIR}/.."
${SYCL_CPP_FLAGS}
)
if(WITH_CYCLES_ONEAPI_HOST_TASK_EXECUTION)

View File

@ -205,8 +205,8 @@ LightTree::LightTree(vector<LightTreePrimitive> &prims,
}
max_lights_in_leaf_ = max_lights_in_leaf;
int num_prims = prims.size();
int num_local_lights = num_prims - num_distant_lights;
const int num_prims = prims.size();
const int num_local_lights = num_prims - num_distant_lights;
/* The amount of nodes is estimated to be twice the amount of primitives */
nodes_.reserve(2 * num_prims);
@ -240,8 +240,8 @@ int LightTree::recursive_build(
OrientationBounds bcone = OrientationBounds::empty;
BoundBox centroid_bounds = BoundBox::empty;
float energy_total = 0.0;
int num_prims = end - start;
int current_index = nodes_.size();
const int num_prims = end - start;
for (int i = start; i < end; i++) {
const LightTreePrimitive &prim = prims.at(i);
@ -254,13 +254,13 @@ int LightTree::recursive_build(
nodes_.emplace_back(bbox, bcone, energy_total, bit_trail);
bool try_splitting = num_prims > 1 && len(centroid_bounds.size()) > 0.0f;
const bool try_splitting = num_prims > 1 && len(centroid_bounds.size()) > 0.0f;
int split_dim = -1, split_bucket = 0, num_left_prims = 0;
bool should_split = false;
if (try_splitting) {
/* Find the best place to split the primitives into 2 nodes.
* If the best split cost is no better than making a leaf node, make a leaf instead. */
float min_cost = min_split_saoh(
const float min_cost = min_split_saoh(
centroid_bounds, start, end, bbox, bcone, split_dim, split_bucket, num_left_prims, prims);
should_split = num_prims > max_lights_in_leaf_ || min_cost < energy_total;
}
@ -295,8 +295,8 @@ int LightTree::recursive_build(
}
float LightTree::min_split_saoh(const BoundBox &centroid_bbox,
int start,
int end,
const int start,
const int end,
const BoundBox &bbox,
const OrientationBounds &bcone,
int &split_dim,
@ -329,7 +329,7 @@ float LightTree::min_split_saoh(const BoundBox &centroid_bbox,
const float inv_extent = 1 / (centroid_bbox.size()[dim]);
/* Fill in buckets with primitives. */
vector<LightTreeBucketInfo> buckets(LightTreeBucketInfo::num_buckets);
std::array<LightTreeBucketInfo, LightTreeBucketInfo::num_buckets> buckets;
for (int i = start; i < end; i++) {
const LightTreePrimitive &prim = prims[i];
@ -348,7 +348,7 @@ float LightTree::min_split_saoh(const BoundBox &centroid_bbox,
}
/* Calculate the cost of splitting at each point between partitions. */
vector<float> bucket_costs(LightTreeBucketInfo::num_buckets - 1);
std::array<float, LightTreeBucketInfo::num_buckets - 1> bucket_costs;
float energy_L, energy_R;
BoundBox bbox_L, bbox_R;
OrientationBounds bcone_L, bcone_R;
@ -379,9 +379,10 @@ float LightTree::min_split_saoh(const BoundBox &centroid_bbox,
/* Calculate the cost of splitting using the heuristic as described in the paper. */
const float area_L = has_area ? bbox_L.area() : len(bbox_L.size());
const float area_R = has_area ? bbox_R.area() : len(bbox_R.size());
float left = (bbox_L.valid()) ? energy_L * area_L * bcone_L.calculate_measure() : 0.0f;
float right = (bbox_R.valid()) ? energy_R * area_R * bcone_R.calculate_measure() : 0.0f;
float regularization = max_extent * inv_extent;
const float left = (bbox_L.valid()) ? energy_L * area_L * bcone_L.calculate_measure() : 0.0f;
const float right = (bbox_R.valid()) ? energy_R * area_R * bcone_R.calculate_measure() :
0.0f;
const float regularization = max_extent * inv_extent;
bucket_costs[split] = regularization * (left + right) * inv_total_cost;
if (bucket_costs[split] < min_cost) {

View File

@ -153,6 +153,16 @@ static float3 output_estimate_emission(ShaderOutput *output, bool &is_constant)
estimate *= node->get_float(strength_in->socket_type);
}
/* Lower importance of emission nodes from automatic value/color to shader
* conversion, as these are likely used for previewing and can be slow to
* build a light tree for on dense meshes. */
if (node->type == EmissionNode::get_node_type()) {
EmissionNode *emission_node = static_cast<EmissionNode *>(node);
if (emission_node->from_auto_conversion) {
estimate *= 0.1f;
}
}
return estimate;
}
else if (node->type == LightFalloffNode::get_node_type() ||

View File

@ -260,6 +260,7 @@ void ShaderGraph::connect(ShaderOutput *from, ShaderInput *to)
if (to->type() == SocketType::CLOSURE) {
EmissionNode *emission = create_node<EmissionNode>();
emission->from_auto_conversion = true;
emission->set_color(one_float3());
emission->set_strength(1.0f);
convert = add(emission);

View File

@ -723,6 +723,8 @@ class EmissionNode : public ShaderNode {
NODE_SOCKET_API(float3, color)
NODE_SOCKET_API(float, strength)
NODE_SOCKET_API(float, surface_mix_weight)
bool from_auto_conversion = false;
};
class BackgroundNode : public ShaderNode {

View File

@ -56,8 +56,8 @@ class BoundBox {
__forceinline void grow(const BoundBox &bbox)
{
grow(bbox.min);
grow(bbox.max);
min = ccl::min(bbox.min, min);
max = ccl::max(bbox.max, max);
}
__forceinline void grow_safe(const float3 &pt)
@ -81,8 +81,12 @@ class BoundBox {
__forceinline void grow_safe(const BoundBox &bbox)
{
grow_safe(bbox.min);
grow_safe(bbox.max);
if (isfinite_safe(bbox.min)) {
min = ccl::min(bbox.min, min);
}
if (isfinite_safe(bbox.max)) {
max = ccl::max(bbox.max, max);
}
}
__forceinline void intersect(const BoundBox &bbox)

View File

@ -113,9 +113,6 @@ if "%TEST%" == "1" (
goto EOF
)
call "%BLENDER_DIR%\build_files\windows\check_submodules.cmd"
if errorlevel 1 goto EOF
if "%BUILD_WITH_NINJA%" == "" (
call "%BLENDER_DIR%\build_files\windows\configure_msbuild.cmd"
if errorlevel 1 goto EOF

View File

@ -66,8 +66,8 @@ const UserDef U_default = {
/** Default so DPI is detected automatically. */
.dpi = 0,
.dpi_fac = 0.0,
.inv_dpi_fac = 0.0, /* run-time. */
.scale_factor = 0.0,
.inv_scale_factor = 0.0, /* run-time. */
.pixelsize = 1,
.virtual_pixel = 0,

View File

@ -389,6 +389,8 @@ class NODE_MT_geometry_node_GEO_MESH_OPERATIONS(Menu):
node_add_menu.add_node_type(layout, "GeometryNodeMeshBoolean")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToCurve")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToPoints")
if _context.preferences.experimental.use_new_volume_nodes:
node_add_menu.add_node_type(layout, "GeometryNodeMeshToSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToVolume")
node_add_menu.add_node_type(layout, "GeometryNodeScaleElements")
node_add_menu.add_node_type(layout, "GeometryNodeSplitEdges")
@ -453,6 +455,8 @@ class NODE_MT_category_GEO_POINT(Menu):
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodePoints")
node_add_menu.add_node_type(layout, "GeometryNodePointsToVertices")
if _context.preferences.experimental.use_new_volume_nodes:
node_add_menu.add_node_type(layout, "GeometryNodePointsToSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodePointsToVolume")
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodeSetPointRadius")
@ -593,6 +597,11 @@ class NODE_MT_category_GEO_VOLUME(Menu):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeVolumeCube")
node_add_menu.add_node_type(layout, "GeometryNodeVolumeToMesh")
if _context.preferences.experimental.use_new_volume_nodes:
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodeMeanFilterSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeOffsetSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeSDFVolumeSphere")
node_add_menu.draw_assets_for_catalog(layout, self.bl_label)

View File

@ -25,6 +25,7 @@ class AssetIdentifier {
AssetIdentifier(const AssetIdentifier &) = default;
std::string full_path() const;
std::string full_library_path() const;
};
} // namespace blender::asset_system

View File

@ -21,6 +21,8 @@ const char *AS_asset_representation_name_get(const AssetRepresentation *asset)
ATTR_WARN_UNUSED_RESULT;
AssetMetaData *AS_asset_representation_metadata_get(const AssetRepresentation *asset)
ATTR_WARN_UNUSED_RESULT;
struct ID *AS_asset_representation_local_id_get(const AssetRepresentation *asset)
ATTR_WARN_UNUSED_RESULT;
bool AS_asset_representation_is_local_id(const AssetRepresentation *asset) ATTR_WARN_UNUSED_RESULT;
bool AS_asset_representation_is_never_link(const AssetRepresentation *asset)
ATTR_WARN_UNUSED_RESULT;

View File

@ -82,6 +82,9 @@ class AssetRepresentation {
* #get_import_method(). Also returns true if there is no predefined import method
* (when #get_import_method() returns no value). */
bool may_override_import_method() const;
/** If this asset is stored inside this current file (#is_local_id() is true), this returns the
* ID's pointer, otherwise null. */
ID *local_id() const;
/** Returns if this asset is stored inside this current file, and as such fully editable. */
bool is_local_id() const;
const AssetLibrary &owner_asset_library() const;
@ -92,7 +95,11 @@ class AssetRepresentation {
/* C-Handle */
struct AssetRepresentation;
const std::string AS_asset_representation_full_path_get(const ::AssetRepresentation *asset);
std::string AS_asset_representation_full_path_get(const ::AssetRepresentation *asset);
/** Get the absolute path to the .blend file containing the given asset. String will be empty if
* the asset could not be mapped to a valid .blend file path. Valid in this case also means that
* the file needs to exist on disk. */
std::string AS_asset_representation_full_library_path_get(const ::AssetRepresentation *asset);
std::optional<eAssetImportMethod> AS_asset_representation_import_method_get(
const ::AssetRepresentation *asset_handle);
bool AS_asset_representation_may_override_import_method(const ::AssetRepresentation *asset_handle);

View File

@ -4,8 +4,11 @@
* \ingroup asset_system
*/
#include <string>
#include "BKE_blendfile.h"
#include "BLI_path_util.h"
#include <iostream>
#include "AS_asset_identifier.hh"
@ -24,4 +27,16 @@ std::string AssetIdentifier::full_path() const
return path;
}
std::string AssetIdentifier::full_library_path() const
{
std::string asset_path = full_path();
char blend_path[1090 /*FILE_MAX_LIBEXTRA*/];
if (!BKE_blendfile_library_path_explode(asset_path.c_str(), blend_path, nullptr, nullptr)) {
return {};
}
return blend_path;
}
} // namespace blender::asset_system

View File

@ -97,6 +97,11 @@ bool AssetRepresentation::may_override_import_method() const
return owner_asset_library_->may_override_import_method_;
}
ID *AssetRepresentation::local_id() const
{
return is_local_id_ ? local_asset_id_ : nullptr;
}
bool AssetRepresentation::is_local_id() const
{
return is_local_id_;
@ -111,7 +116,7 @@ const AssetLibrary &AssetRepresentation::owner_asset_library() const
using namespace blender;
const std::string AS_asset_representation_full_path_get(const AssetRepresentation *asset_handle)
std::string AS_asset_representation_full_path_get(const AssetRepresentation *asset_handle)
{
const asset_system::AssetRepresentation *asset =
reinterpret_cast<const asset_system::AssetRepresentation *>(asset_handle);
@ -119,6 +124,13 @@ const std::string AS_asset_representation_full_path_get(const AssetRepresentatio
return identifier.full_path();
}
std::string AS_asset_representation_full_library_path_get(const AssetRepresentation *asset_handle)
{
const asset_system::AssetRepresentation *asset =
reinterpret_cast<const asset_system::AssetRepresentation *>(asset_handle);
return asset->get_identifier().full_library_path();
}
std::optional<eAssetImportMethod> AS_asset_representation_import_method_get(
const AssetRepresentation *asset_handle)
{
@ -152,6 +164,13 @@ AssetMetaData *AS_asset_representation_metadata_get(const AssetRepresentation *a
return &asset->get_metadata();
}
ID *AS_asset_representation_local_id_get(const AssetRepresentation *asset_handle)
{
const asset_system::AssetRepresentation *asset =
reinterpret_cast<const asset_system::AssetRepresentation *>(asset_handle);
return asset->local_id();
}
bool AS_asset_representation_is_local_id(const AssetRepresentation *asset_handle)
{
const asset_system::AssetRepresentation *asset =

View File

@ -45,7 +45,7 @@ int BLF_set_default(void)
{
ASSERT_DEFAULT_SET;
BLF_size(global_font_default, global_font_size * U.dpi_fac);
BLF_size(global_font_default, global_font_size * UI_SCALE_FAC);
return global_font_default;
}
@ -53,7 +53,7 @@ int BLF_set_default(void)
void BLF_draw_default(float x, float y, float z, const char *str, const size_t str_len)
{
ASSERT_DEFAULT_SET;
BLF_size(global_font_default, global_font_size * U.dpi_fac);
BLF_size(global_font_default, global_font_size * UI_SCALE_FAC);
BLF_position(global_font_default, x, y, z);
BLF_draw(global_font_default, str, str_len);
}

View File

@ -29,7 +29,7 @@
* It's mostly used for modifiers, and has the advantages of not taking much
* resources.
*
* BMesh is a full-on brep, used for editmode, some modifiers, etc. It's much
* BMesh is a full-on BREP, used for edit-mode, some modifiers, etc. It's much
* more capable (if memory-intensive) then CDDM.
*
* DerivedMesh is somewhat hackish. Many places assumes that a DerivedMesh is

View File

@ -19,6 +19,32 @@ struct ReportList;
struct UserDef;
struct bContext;
/**
* Check whether given path ends with a blend file compatible extension
* (`.blend`, `.ble` or `.blend.gz`).
*
* \param str: The path to check.
* \return true is this path ends with a blender file extension.
*/
bool BKE_blendfile_extension_check(const char *str);
/**
* Try to explode given path into its 'library components'
* (i.e. a .blend file, id type/group, and data-block itself).
*
* \param path: the full path to explode.
* \param r_dir: the string that'll contain path up to blend file itself ('library' path).
* WARNING! Must be at least #FILE_MAX_LIBEXTRA long (it also stores group and name strings)!
* \param r_group: a pointer within `r_dir` to the 'group' part of the path, if any ('\0'
* terminated). May be NULL.
* \param r_name: a pointer within `r_dir` to the data-block name, if any ('\0' terminated). May be
* NULL.
* \return true if path contains a blend file.
*/
bool BKE_blendfile_library_path_explode(const char *path,
char *r_dir,
char **r_group,
char **r_name);
/**
* Shared setup function that makes the data from `bfd` into the current blend file,
* replacing the contents of #G.main.

View File

@ -381,6 +381,8 @@ bool CTX_data_editable_gpencil_strokes(const bContext *C, ListBase *list);
const struct AssetLibraryReference *CTX_wm_asset_library_ref(const bContext *C);
struct AssetHandle CTX_wm_asset_handle(const bContext *C, bool *r_is_valid);
struct AssetRepresentation *CTX_wm_asset(const bContext *C);
bool CTX_wm_interface_locked(const bContext *C);
/**

View File

@ -486,6 +486,8 @@ const char *CustomData_get_active_layer_name(const struct CustomData *data, int
*/
const char *CustomData_get_render_layer_name(const struct CustomData *data, int type);
bool CustomData_layer_is_anonymous(const struct CustomData *data, int type, int n);
void CustomData_bmesh_set(const struct CustomData *data,
void *block,
int type,

View File

@ -44,7 +44,7 @@ typedef struct CfraElem {
/* ************** F-Curve Modifiers *************** */
/**
* F-Curve Modifier Type-Info (fmi):
* F-Curve Modifier Type-Info (`fmi`):
* This struct provides function pointers for runtime, so that functions can be
* written more generally (with fewer/no special exceptions for various modifiers).
*

View File

@ -1555,6 +1555,11 @@ void BKE_nodetree_remove_layer_n(struct bNodeTree *ntree, struct Scene *scene, i
#define GEO_NODE_IMAGE 1191
#define GEO_NODE_INTERPOLATE_CURVES 1192
#define GEO_NODE_EDGES_TO_FACE_GROUPS 1193
#define GEO_NODE_POINTS_TO_SDF_VOLUME 1194
#define GEO_NODE_MESH_TO_SDF_VOLUME 1195
#define GEO_NODE_SDF_VOLUME_SPHERE 1196
#define GEO_NODE_MEAN_FILTER_SDF_VOLUME 1197
#define GEO_NODE_OFFSET_SDF_VOLUME 1198
/** \} */

View File

@ -151,13 +151,6 @@ class bNodeTreeRuntime : NonCopyable, NonMovable {
Vector<bNode *> root_frames;
Vector<bNodeSocket *> interface_inputs;
Vector<bNodeSocket *> interface_outputs;
/**
* The location of all sockets in the tree, calculated while drawing the nodes.
* Indexed with #bNodeSocket::index_in_tree(). In the node tree's "world space"
* (the same as #bNode::runtime::totr).
*/
Vector<float2> all_socket_locations;
};
/**
@ -183,6 +176,13 @@ class bNodeSocketRuntime : NonCopyable, NonMovable {
*/
short total_inputs = 0;
/**
* The location of the socket in the tree, calculated while drawing the nodes and invalid if the
* node tree hasn't been drawn yet. In the node tree's "world space" (the same as
* #bNode::runtime::totr).
*/
float2 location;
/** Only valid when #topology_cache_is_dirty is false. */
Vector<bNodeLink *> directly_linked_links;
Vector<bNodeSocket *> directly_linked_sockets;

View File

@ -256,8 +256,8 @@ struct NodeData {
MEM_delete(node_data);
}
};
/* -------------------------------------------------------------------- */
/* -------------------------------------------------------------------- */
/** \name Fix non-manifold edge bleeding.
* \{ */

View File

@ -122,11 +122,11 @@ typedef struct PTCacheID {
/* flags defined in DNA_object_force_types.h */
unsigned int data_types, info_types;
/* copies point data to cache data */
/* Copies point data to cache data. */
int (*write_point)(int index, void *calldata, void **data, int cfra);
/* copies cache cata to point data */
/* Copies cache data to point data. */
void (*read_point)(int index, void *calldata, void **data, float cfra, const float *old_data);
/* interpolated between previously read point data and cache data */
/* Interpolated between previously read point data and cache data. */
void (*interpolate_point)(int index,
void *calldata,
void **data,

View File

@ -76,6 +76,7 @@ VolumeGrid *BKE_volume_grid_get_for_write(struct Volume *volume, int grid_index)
const VolumeGrid *BKE_volume_grid_active_get_for_read(const struct Volume *volume);
/* Tries to find a grid with the given name. Make sure that the volume has been loaded. */
const VolumeGrid *BKE_volume_grid_find_for_read(const struct Volume *volume, const char *name);
VolumeGrid *BKE_volume_grid_find_for_write(struct Volume *volume, const char *name);
/* Tries to set the name of the velocity field. If no such grid exists with the given base name,
* this will try common post-fixes in order to detect velocity fields split into multiple grids.

View File

@ -370,6 +370,26 @@ CustomDataLayer *BKE_id_attribute_duplicate(ID *id, const char *name, ReportList
return BKE_id_attribute_search(id, uniquename, CD_MASK_PROP_ALL, ATTR_DOMAIN_MASK_ALL);
}
static int color_name_to_index(ID *id, const char *name)
{
const CustomDataLayer *layer = BKE_id_attribute_search(
id, name, CD_MASK_COLOR_ALL, ATTR_DOMAIN_MASK_COLOR);
return BKE_id_attribute_to_index(id, layer, ATTR_DOMAIN_MASK_COLOR, CD_MASK_COLOR_ALL);
}
static int color_clamp_index(ID *id, int index)
{
const int length = BKE_id_attributes_length(id, ATTR_DOMAIN_MASK_COLOR, CD_MASK_COLOR_ALL);
return min_ii(index, length - 1);
}
static const char *color_name_from_index(ID *id, int index)
{
const CustomDataLayer *layer = BKE_id_attribute_from_index(
id, index, ATTR_DOMAIN_MASK_COLOR, CD_MASK_COLOR_ALL);
return layer ? layer->name : nullptr;
}
bool BKE_id_attribute_remove(ID *id, const char *name, ReportList *reports)
{
using namespace blender;
@ -391,31 +411,43 @@ bool BKE_id_attribute_remove(ID *id, const char *name, ReportList *reports)
if (BMEditMesh *em = mesh->edit_mesh) {
for (const int domain : IndexRange(ATTR_DOMAIN_NUM)) {
if (CustomData *data = info[domain].customdata) {
int layer_index = CustomData_get_named_layer_index_notype(data, name);
if (layer_index >= 0) {
if (data->layers[layer_index].type == CD_PROP_FLOAT2) {
/* free associated UV map bool layers */
char buffer_src[MAX_CUSTOMDATA_LAYER_NAME];
BM_data_layer_free_named(
em->bm, data, BKE_uv_map_vert_select_name_get(name, buffer_src));
BM_data_layer_free_named(
em->bm, data, BKE_uv_map_edge_select_name_get(name, buffer_src));
BM_data_layer_free_named(em->bm, data, BKE_uv_map_pin_name_get(name, buffer_src));
}
const std::string name_copy = name;
const int layer_index = CustomData_get_named_layer_index_notype(data, name_copy.c_str());
if (layer_index == -1) {
continue;
}
/* Because it's possible that name is owned by the layer and will be freed
* when freeing the layer, do these checks before freeing. */
const bool is_active_color_attribute = name == StringRef(mesh->active_color_attribute);
const bool is_default_color_attribute = name == StringRef(mesh->default_color_attribute);
if (BM_data_layer_free_named(em->bm, data, name)) {
if (is_active_color_attribute) {
MEM_SAFE_FREE(mesh->active_color_attribute);
}
else if (is_default_color_attribute) {
MEM_SAFE_FREE(mesh->default_color_attribute);
}
return true;
const eCustomDataType type = eCustomDataType(data->layers[layer_index].type);
const bool is_active_color_attribute = name_copy.c_str() ==
StringRef(mesh->active_color_attribute);
const bool is_default_color_attribute = name_copy.c_str() ==
StringRef(mesh->default_color_attribute);
const int active_color_index = color_name_to_index(id, mesh->active_color_attribute);
const int default_color_index = color_name_to_index(id, mesh->default_color_attribute);
if (!BM_data_layer_free_named(em->bm, data, name_copy.c_str())) {
BLI_assert_unreachable();
}
if (is_active_color_attribute) {
BKE_id_attributes_active_color_set(
id, color_name_from_index(id, color_clamp_index(id, active_color_index)));
}
if (is_default_color_attribute) {
BKE_id_attributes_default_color_set(
id, color_name_from_index(id, color_clamp_index(id, default_color_index)));
}
if (type == CD_PROP_FLOAT2 && domain == ATTR_DOMAIN_CORNER) {
char buffer[MAX_CUSTOMDATA_LAYER_NAME];
BM_data_layer_free_named(
em->bm, data, BKE_uv_map_vert_select_name_get(name_copy.c_str(), buffer));
BM_data_layer_free_named(
em->bm, data, BKE_uv_map_edge_select_name_get(name_copy.c_str(), buffer));
BM_data_layer_free_named(
em->bm, data, BKE_uv_map_pin_name_get(name_copy.c_str(), buffer));
}
return true;
}
}
return false;
@ -423,21 +455,44 @@ bool BKE_id_attribute_remove(ID *id, const char *name, ReportList *reports)
}
std::optional<MutableAttributeAccessor> attributes = get_attribute_accessor_for_write(*id);
if (!attributes) {
return false;
}
if (GS(id->name) == ID_ME) {
std::optional<blender::bke::AttributeMetaData> metadata = attributes->lookup_meta_data(name);
if (metadata->data_type == CD_PROP_FLOAT2) {
/* remove UV sub-attributes. */
char buffer_src[MAX_CUSTOMDATA_LAYER_NAME];
BKE_id_attribute_remove(id, BKE_uv_map_vert_select_name_get(name, buffer_src), reports);
BKE_id_attribute_remove(id, BKE_uv_map_edge_select_name_get(name, buffer_src), reports);
BKE_id_attribute_remove(id, BKE_uv_map_pin_name_get(name, buffer_src), reports);
const std::string name_copy = name;
std::optional<blender::bke::AttributeMetaData> metadata = attributes->lookup_meta_data(
name_copy);
if (!metadata) {
return false;
}
/* Update active and default color attributes. */
Mesh *mesh = reinterpret_cast<Mesh *>(id);
const bool is_active_color_attribute = name_copy == StringRef(mesh->active_color_attribute);
const bool is_default_color_attribute = name_copy == StringRef(mesh->default_color_attribute);
const int active_color_index = color_name_to_index(id, mesh->active_color_attribute);
const int default_color_index = color_name_to_index(id, mesh->default_color_attribute);
if (!attributes->remove(name_copy)) {
BLI_assert_unreachable();
}
if (is_active_color_attribute) {
BKE_id_attributes_active_color_set(
id, color_name_from_index(id, color_clamp_index(id, active_color_index)));
}
if (is_default_color_attribute) {
BKE_id_attributes_default_color_set(
id, color_name_from_index(id, color_clamp_index(id, default_color_index)));
}
if (metadata->data_type == CD_PROP_FLOAT2 && metadata->domain == ATTR_DOMAIN_CORNER) {
char buffer[MAX_CUSTOMDATA_LAYER_NAME];
attributes->remove(BKE_uv_map_vert_select_name_get(name_copy.c_str(), buffer));
attributes->remove(BKE_uv_map_edge_select_name_get(name_copy.c_str(), buffer));
attributes->remove(BKE_uv_map_pin_name_get(name_copy.c_str(), buffer));
}
return true;
}
return attributes->remove(name);

View File

@ -14,6 +14,7 @@
#include "DNA_scene_types.h"
#include "DNA_screen_types.h"
#include "DNA_space_types.h"
#include "DNA_workspace_types.h"
#include "BLI_fileops.h"
@ -62,6 +63,81 @@
# include "BPY_extern.h"
#endif
/* -------------------------------------------------------------------- */
/** \name Blend/Library Paths
* \{ */
bool BKE_blendfile_extension_check(const char *str)
{
const char *ext_test[4] = {".blend", ".ble", ".blend.gz", nullptr};
return BLI_path_extension_check_array(str, ext_test);
}
bool BKE_blendfile_library_path_explode(const char *path,
char *r_dir,
char **r_group,
char **r_name)
{
/* We might get some data names with slashes,
* so we have to go up in path until we find blend file itself,
* then we know next path item is group, and everything else is data name. */
char *slash = nullptr, *prev_slash = nullptr, c = '\0';
r_dir[0] = '\0';
if (r_group) {
*r_group = nullptr;
}
if (r_name) {
*r_name = nullptr;
}
/* if path leads to an existing directory, we can be sure we're not (in) a library */
if (BLI_is_dir(path)) {
return false;
}
BLI_strncpy(r_dir, path, FILE_MAX_LIBEXTRA);
while ((slash = (char *)BLI_path_slash_rfind(r_dir))) {
char tc = *slash;
*slash = '\0';
if (BKE_blendfile_extension_check(r_dir) && BLI_is_file(r_dir)) {
break;
}
if (STREQ(r_dir, BLO_EMBEDDED_STARTUP_BLEND)) {
break;
}
if (prev_slash) {
*prev_slash = c;
}
prev_slash = slash;
c = tc;
}
if (!slash) {
return false;
}
if (slash[1] != '\0') {
BLI_assert(strlen(slash + 1) < BLO_GROUP_MAX);
if (r_group) {
*r_group = slash + 1;
}
}
if (prev_slash && (prev_slash[1] != '\0')) {
BLI_assert(strlen(prev_slash + 1) < MAX_ID_NAME - 2);
if (r_name) {
*r_name = prev_slash + 1;
}
}
return true;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Blend File IO (High Level)
* \{ */

View File

@ -1493,6 +1493,11 @@ AssetHandle CTX_wm_asset_handle(const bContext *C, bool *r_is_valid)
return AssetHandle{nullptr};
}
AssetRepresentation *CTX_wm_asset(const bContext *C)
{
return static_cast<AssetRepresentation *>(ctx_data_pointer_get(C, "asset"));
}
Depsgraph *CTX_data_depsgraph_pointer(const bContext *C)
{
Main *bmain = CTX_data_main(C);

View File

@ -2686,6 +2686,15 @@ void CustomData_clear_layer_flag(CustomData *data, const int type, const int fla
}
}
bool CustomData_layer_is_anonymous(const struct CustomData *data, int type, int n)
{
const int layer_index = CustomData_get_layer_index_n(data, type, n);
BLI_assert(layer_index >= 0);
return data->layers[layer_index].anonymous_id != nullptr;
}
static bool customData_resize(CustomData *data, const int amount)
{
CustomDataLayer *tmp = static_cast<CustomDataLayer *>(

View File

@ -743,11 +743,9 @@ static GeometrySet curve_calc_modifiers_post(Depsgraph *depsgraph,
Mesh *mesh = geometry_set.get_mesh_for_write();
if (mti->type == eModifierTypeType_OnlyDeform) {
int totvert;
float(*vertex_coords)[3] = BKE_mesh_vert_coords_alloc(mesh, &totvert);
mti->deformVerts(md, &mectx_deform, mesh, vertex_coords, totvert);
BKE_mesh_vert_coords_apply(mesh, vertex_coords);
MEM_freeN(vertex_coords);
mti->deformVerts(
md, &mectx_deform, mesh, BKE_mesh_vert_positions_for_write(mesh), mesh->totvert);
BKE_mesh_tag_positions_changed(mesh);
}
else {
Mesh *output_mesh = mti->modifyMesh(md, &mectx_apply, mesh);

View File

@ -387,7 +387,7 @@ TEST(BKE_fcurve, BKE_fcurve_calc_range)
/* Curve samples. */
const int sample_start = 1;
const int sample_end = 20;
fcurve_store_samples(fcu, NULL, sample_start, sample_end, fcurve_samplingcb_evalcurve);
fcurve_store_samples(fcu, nullptr, sample_start, sample_end, fcurve_samplingcb_evalcurve);
success = BKE_fcurve_calc_range(fcu, &min, &max, true);
EXPECT_TRUE(success) << "FCurve samples should have a range.";
@ -421,8 +421,11 @@ TEST(BKE_fcurve, BKE_fcurve_calc_bounds)
bool success;
/* All keys. */
success = BKE_fcurve_calc_bounds(
fcu, false /* select only */, false /* include handles */, NULL /* frame range */, &bounds);
success = BKE_fcurve_calc_bounds(fcu,
false /* select only */,
false /* include handles */,
nullptr /* frame range */,
&bounds);
EXPECT_TRUE(success) << "A non-empty FCurve should have bounds.";
EXPECT_FLOAT_EQ(fcu->bezt[0].vec[1][0], bounds.xmin);
EXPECT_FLOAT_EQ(fcu->bezt[4].vec[1][0], bounds.xmax);
@ -430,16 +433,22 @@ TEST(BKE_fcurve, BKE_fcurve_calc_bounds)
EXPECT_FLOAT_EQ(fcu->bezt[2].vec[1][1], bounds.ymax);
/* Only selected. */
success = BKE_fcurve_calc_bounds(
fcu, true /* select only */, false /* include handles */, NULL /* frame range */, &bounds);
success = BKE_fcurve_calc_bounds(fcu,
true /* select only */,
false /* include handles */,
nullptr /* frame range */,
&bounds);
EXPECT_FALSE(success)
<< "Using selected keyframes only should not find bounds if nothing is selected.";
fcu->bezt[1].f2 |= SELECT;
fcu->bezt[3].f2 |= SELECT;
success = BKE_fcurve_calc_bounds(
fcu, true /* select only */, false /* include handles */, NULL /* frame range */, &bounds);
success = BKE_fcurve_calc_bounds(fcu,
true /* select only */,
false /* include handles */,
nullptr /* frame range */,
&bounds);
EXPECT_TRUE(success) << "Selected keys should have been found.";
EXPECT_FLOAT_EQ(fcu->bezt[1].vec[1][0], bounds.xmin);
EXPECT_FLOAT_EQ(fcu->bezt[3].vec[1][0], bounds.xmax);
@ -447,8 +456,11 @@ TEST(BKE_fcurve, BKE_fcurve_calc_bounds)
EXPECT_FLOAT_EQ(fcu->bezt[3].vec[1][1], bounds.ymax);
/* Including handles. */
success = BKE_fcurve_calc_bounds(
fcu, false /* select only */, true /* include handles */, NULL /* frame range */, &bounds);
success = BKE_fcurve_calc_bounds(fcu,
false /* select only */,
true /* include handles */,
nullptr /* frame range */,
&bounds);
EXPECT_TRUE(success) << "A non-empty FCurve should have bounds including handles.";
EXPECT_FLOAT_EQ(fcu->bezt[0].vec[0][0], bounds.xmin);
EXPECT_FLOAT_EQ(fcu->bezt[4].vec[2][0], bounds.xmax);
@ -499,10 +511,13 @@ TEST(BKE_fcurve, BKE_fcurve_calc_bounds)
/* Curve samples. */
const int sample_start = 1;
const int sample_end = 20;
fcurve_store_samples(fcu, NULL, sample_start, sample_end, fcurve_samplingcb_evalcurve);
fcurve_store_samples(fcu, nullptr, sample_start, sample_end, fcurve_samplingcb_evalcurve);
success = BKE_fcurve_calc_bounds(
fcu, false /* select only */, false /* include handles */, NULL /* frame range */, &bounds);
success = BKE_fcurve_calc_bounds(fcu,
false /* select only */,
false /* include handles */,
nullptr /* frame range */,
&bounds);
EXPECT_TRUE(success) << "FCurve samples should have a range.";
EXPECT_FLOAT_EQ(sample_start, bounds.xmin);

View File

@ -983,7 +983,7 @@ static float *gpencil_stroke_points_from_editcurve_fixed_resolu(bGPDcurve_point
bool is_cyclic,
int *r_points_len)
{
/* One stride contains: x, y, z, pressure, strength, Vr, Vg, Vb, Vmix_factor */
/* One stride contains: `x, y, z, pressure, strength, Vr, Vg, Vb, Vmix_factor`. */
const uint stride = sizeof(float[9]);
const uint array_last = curve_point_array_len - 1;
const uint resolu_stride = resolution * stride;

View File

@ -685,11 +685,9 @@ void BKE_mball_data_update(Depsgraph *depsgraph, Scene *scene, Object *ob)
mesh->totcol = mball->totcol;
if (ob->parent && ob->parent->type == OB_LATTICE && ob->partype == PARSKEL) {
int verts_num;
float(*positions)[3] = BKE_mesh_vert_coords_alloc(mesh, &verts_num);
BKE_lattice_deform_coords(ob->parent, ob, positions, verts_num, 0, nullptr, 1.0f);
BKE_mesh_vert_coords_apply(mesh, positions);
MEM_freeN(positions);
BKE_lattice_deform_coords(
ob->parent, ob, BKE_mesh_vert_positions_for_write(mesh), mesh->totvert, 0, nullptr, 1.0f);
BKE_mesh_tag_positions_changed(mesh);
}
ob->runtime.geometry_set_eval = new GeometrySet(GeometrySet::create_with_mesh(mesh));

View File

@ -472,8 +472,6 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
const Span<MPoly> polys = me->polys();
const Span<MLoop> loops = me->loops();
int totedges = 0;
/* only to detect edge polylines */
int *edge_users;
@ -497,7 +495,6 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
edl->edge = &mesh_edges[i];
BLI_addtail(&edges, edl);
totedges++;
}
}
MEM_freeN(edge_users);
@ -519,7 +516,6 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
appendPolyLineVert(&polyline, endVert);
totpoly++;
BLI_freelinkN(&edges, edges.last);
totedges--;
while (ok) { /* while connected edges are found... */
EdgeLink *edl = (EdgeLink *)edges.last;
@ -531,10 +527,9 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
if (edge->v1 == endVert) {
endVert = edge->v2;
appendPolyLineVert(&polyline, edge->v2);
appendPolyLineVert(&polyline, endVert);
totpoly++;
BLI_freelinkN(&edges, edl);
totedges--;
ok = true;
}
else if (edge->v2 == endVert) {
@ -542,7 +537,6 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
appendPolyLineVert(&polyline, endVert);
totpoly++;
BLI_freelinkN(&edges, edl);
totedges--;
ok = true;
}
else if (edge->v1 == startVert) {
@ -550,7 +544,6 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
prependPolyLineVert(&polyline, startVert);
totpoly++;
BLI_freelinkN(&edges, edl);
totedges--;
ok = true;
}
else if (edge->v2 == startVert) {
@ -558,7 +551,6 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
prependPolyLineVert(&polyline, startVert);
totpoly++;
BLI_freelinkN(&edges, edl);
totedges--;
ok = true;
}
@ -582,18 +574,18 @@ void BKE_mesh_to_curve_nurblist(const Mesh *me, ListBase *nurblist, const int ed
/* create new 'nurb' within the curve */
nu = MEM_new<Nurb>("MeshNurb", blender::dna::shallow_zero_initialize());
nu->pntsu = polys.size();
nu->pntsu = totpoly;
nu->pntsv = 1;
nu->orderu = 4;
nu->flagu = CU_NURB_ENDPOINT | (closed ? CU_NURB_CYCLIC : 0); /* endpoint */
nu->resolu = 12;
nu->bp = (BPoint *)MEM_calloc_arrayN(polys.size(), sizeof(BPoint), "bpoints");
nu->bp = (BPoint *)MEM_calloc_arrayN(totpoly, sizeof(BPoint), "bpoints");
/* add points */
vl = (VertLink *)polyline.first;
int i;
for (i = 0, bp = nu->bp; i < polys.size(); i++, bp++, vl = (VertLink *)vl->next) {
for (i = 0, bp = nu->bp; i < totpoly; i++, bp++, vl = (VertLink *)vl->next) {
copy_v3_v3(bp->vec, positions[vl->index]);
bp->f1 = SELECT;
bp->radius = bp->weight = 1.0;

View File

@ -1655,6 +1655,19 @@ void BKE_mesh_legacy_convert_mpoly_to_material_indices(Mesh *mesh)
/** \name Generic UV Map Conversion
* \{ */
static const bool *layers_find_bool_named(const Span<CustomDataLayer> layers,
const blender::StringRef name)
{
for (const CustomDataLayer &layer : layers) {
if (layer.type == CD_PROP_BOOL) {
if (layer.name == name) {
return static_cast<const bool *>(layer.data);
}
}
}
return nullptr;
}
void BKE_mesh_legacy_convert_uvs_to_struct(
Mesh *mesh,
blender::ResourceScope &temp_mloopuv_for_convert,
@ -1662,6 +1675,7 @@ void BKE_mesh_legacy_convert_uvs_to_struct(
{
using namespace blender;
using namespace blender::bke;
const int loops_num = mesh->totloop;
Vector<CustomDataLayer, 16> new_layer_to_write;
/* Don't write the boolean UV map sublayers which will be written in the legacy #MLoopUV type. */
@ -1686,20 +1700,19 @@ void BKE_mesh_legacy_convert_uvs_to_struct(
new_layer_to_write.append(layer);
continue;
}
const Span<float2> coords{static_cast<const float2 *>(layer.data), mesh->totloop};
const Span<float2> coords{static_cast<const float2 *>(layer.data), loops_num};
CustomDataLayer mloopuv_layer = layer;
mloopuv_layer.type = CD_MLOOPUV;
MutableSpan<MLoopUV> mloopuv = temp_mloopuv_for_convert.construct<Array<MLoopUV>>(
mesh->totloop);
MutableSpan<MLoopUV> mloopuv = temp_mloopuv_for_convert.construct<Array<MLoopUV>>(loops_num);
mloopuv_layer.data = mloopuv.data();
char buffer[MAX_CUSTOMDATA_LAYER_NAME];
const bool *vert_selection = static_cast<const bool *>(CustomData_get_layer_named(
&mesh->ldata, CD_PROP_BOOL, BKE_uv_map_vert_select_name_get(layer.name, buffer)));
const bool *edge_selection = static_cast<const bool *>(CustomData_get_layer_named(
&mesh->ldata, CD_PROP_BOOL, BKE_uv_map_edge_select_name_get(layer.name, buffer)));
const bool *pin = static_cast<const bool *>(CustomData_get_layer_named(
&mesh->ldata, CD_PROP_BOOL, BKE_uv_map_pin_name_get(layer.name, buffer)));
const bool *vert_selection = layers_find_bool_named(
loop_layers_to_write, BKE_uv_map_vert_select_name_get(layer.name, buffer));
const bool *edge_selection = layers_find_bool_named(
loop_layers_to_write, BKE_uv_map_edge_select_name_get(layer.name, buffer));
const bool *pin = layers_find_bool_named(loop_layers_to_write,
BKE_uv_map_pin_name_get(layer.name, buffer));
threading::parallel_for(mloopuv.index_range(), 2048, [&](IndexRange range) {
for (const int i : range) {

View File

@ -262,9 +262,8 @@ void BKE_mesh_remap_find_best_match_from_mesh(const float (*vert_positions_dst)[
float best_match = FLT_MAX, match;
const int numverts_src = me_src->totvert;
float(*vcos_src)[3] = BKE_mesh_vert_coords_alloc(me_src, nullptr);
mesh_calc_eigen_matrix(nullptr, (const float(*)[3])vcos_src, numverts_src, mat_src);
const float(*vcos_src)[3] = BKE_mesh_vert_positions(me_src);
mesh_calc_eigen_matrix(nullptr, vcos_src, numverts_src, mat_src);
mesh_calc_eigen_matrix(vert_positions_dst, nullptr, numverts_dst, mat_dst);
BLI_space_transform_global_from_matrices(r_space_transform, mat_dst, mat_src);
@ -289,8 +288,6 @@ void BKE_mesh_remap_find_best_match_from_mesh(const float (*vert_positions_dst)[
}
BLI_space_transform_global_from_matrices(r_space_transform, best_mat_dst, mat_src);
MEM_freeN(vcos_src);
}
/** \} */
@ -516,7 +513,7 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
}
else if (ELEM(mode, MREMAP_MODE_VERT_EDGE_NEAREST, MREMAP_MODE_VERT_EDGEINTERP_NEAREST)) {
const blender::Span<MEdge> edges_src = me_src->edges();
float(*vcos_src)[3] = BKE_mesh_vert_coords_alloc(me_src, nullptr);
const float(*vcos_src)[3] = BKE_mesh_vert_positions(me_src);
BKE_bvhtree_from_mesh_get(&treedata, me_src, BVHTREE_FROM_EDGES, 2);
nearest.index = -1;
@ -561,8 +558,6 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
BKE_mesh_remap_item_define_invalid(r_map, i);
}
}
MEM_freeN(vcos_src);
}
else if (ELEM(mode,
MREMAP_MODE_VERT_POLY_NEAREST,
@ -570,7 +565,7 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
MREMAP_MODE_VERT_POLYINTERP_VNORPROJ)) {
const blender::Span<MPoly> polys_src = me_src->polys();
const blender::Span<MLoop> loops_src = me_src->loops();
float(*vcos_src)[3] = BKE_mesh_vert_coords_alloc(me_src, nullptr);
const float(*vcos_src)[3] = BKE_mesh_vert_positions(me_src);
const blender::Span<blender::float3> vert_normals_dst = me_dst->vert_normals();
size_t tmp_buff_size = MREMAP_DEFAULT_BUFSIZE;
@ -598,7 +593,7 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
const MLoopTri *lt = &treedata.looptri[rayhit.index];
const int sources_num = mesh_remap_interp_poly_data_get(polys_src[lt->poly],
loops_src,
(const float(*)[3])vcos_src,
vcos_src,
rayhit.co,
&tmp_buff_size,
&vcos,
@ -635,7 +630,7 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
int index;
mesh_remap_interp_poly_data_get(polys_src[lt->poly],
loops_src,
(const float(*)[3])vcos_src,
vcos_src,
nearest.co,
&tmp_buff_size,
&vcos,
@ -650,7 +645,7 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
else if (mode == MREMAP_MODE_VERT_POLYINTERP_NEAREST) {
const int sources_num = mesh_remap_interp_poly_data_get(polys_src[lt->poly],
loops_src,
(const float(*)[3])vcos_src,
vcos_src,
nearest.co,
&tmp_buff_size,
&vcos,
@ -670,7 +665,6 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
}
}
MEM_freeN(vcos_src);
MEM_freeN(vcos);
MEM_freeN(indices);
MEM_freeN(weights);
@ -721,7 +715,7 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
if (mode == MREMAP_MODE_EDGE_VERT_NEAREST) {
const int num_verts_src = me_src->totvert;
const blender::Span<MEdge> edges_src = me_src->edges();
float(*vcos_src)[3] = BKE_mesh_vert_coords_alloc(me_src, nullptr);
const float(*vcos_src)[3] = BKE_mesh_vert_positions(me_src);
MeshElemMap *vert_to_edge_src_map;
int *vert_to_edge_src_map_mem;
@ -840,7 +834,6 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
}
}
MEM_freeN(vcos_src);
MEM_freeN(v_dst_to_src_map);
MEM_freeN(vert_to_edge_src_map);
MEM_freeN(vert_to_edge_src_map_mem);
@ -874,7 +867,7 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
const blender::Span<MEdge> edges_src = me_src->edges();
const blender::Span<MPoly> polys_src = me_src->polys();
const blender::Span<MLoop> loops_src = me_src->loops();
float(*vcos_src)[3] = BKE_mesh_vert_coords_alloc(me_src, nullptr);
const float(*vcos_src)[3] = BKE_mesh_vert_positions(me_src);
BKE_bvhtree_from_mesh_get(&treedata, me_src, BVHTREE_FROM_LOOPTRI, 2);
@ -900,8 +893,8 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
for (; nloops--; ml_src++) {
const MEdge *edge_src = &edges_src[ml_src->e];
float *co1_src = vcos_src[edge_src->v1];
float *co2_src = vcos_src[edge_src->v2];
const float *co1_src = vcos_src[edge_src->v1];
const float *co2_src = vcos_src[edge_src->v2];
float co_src[3];
float dist_sq;
@ -921,8 +914,6 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
BKE_mesh_remap_item_define_invalid(r_map, i);
}
}
MEM_freeN(vcos_src);
}
else if (mode == MREMAP_MODE_EDGE_EDGEINTERP_VNORPROJ) {
const int num_rays_min = 5, num_rays_max = 100;
@ -1308,7 +1299,7 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
const blender::Span<blender::float3> positions_src = me_src->vert_positions();
const int num_verts_src = me_src->totvert;
float(*vcos_src)[3] = nullptr;
const float(*vcos_src)[3] = nullptr;
const blender::Span<MEdge> edges_src = me_src->edges();
const blender::Span<MPoly> polys_src = me_src->polys();
const blender::Span<MLoop> loops_src = me_src->loops();
@ -1328,7 +1319,7 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
size_t islands_res_buff_size = MREMAP_DEFAULT_BUFSIZE;
if (!use_from_vert) {
vcos_src = BKE_mesh_vert_coords_alloc(me_src, nullptr);
vcos_src = BKE_mesh_vert_positions(me_src);
vcos_interp = static_cast<float(*)[3]>(
MEM_mallocN(sizeof(*vcos_interp) * buff_size_interp, __func__));
@ -2041,7 +2032,7 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
if (mode == MREMAP_MODE_LOOP_POLY_NEAREST) {
mesh_remap_interp_poly_data_get(poly,
loops_src,
(const float(*)[3])vcos_src,
vcos_src,
hit_co,
&buff_size_interp,
&vcos_interp,
@ -2060,18 +2051,17 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
&full_weight);
}
else {
const int sources_num = mesh_remap_interp_poly_data_get(
poly,
loops_src,
(const float(*)[3])vcos_src,
hit_co,
&buff_size_interp,
&vcos_interp,
true,
&indices_interp,
&weights_interp,
true,
nullptr);
const int sources_num = mesh_remap_interp_poly_data_get(poly,
loops_src,
vcos_src,
hit_co,
&buff_size_interp,
&vcos_interp,
true,
&indices_interp,
&weights_interp,
true,
nullptr);
mesh_remap_item_define(r_map,
lidx_dst,
@ -2113,9 +2103,6 @@ void BKE_mesh_remap_calc_loops_from_mesh(const int mode,
BLI_astar_solution_free(&as_solution);
}
if (vcos_src) {
MEM_freeN(vcos_src);
}
if (vert_to_loop_map_src) {
MEM_freeN(vert_to_loop_map_src);
}

View File

@ -2204,10 +2204,8 @@ static PBVH *build_pbvh_from_regular_mesh(Object *ob, Mesh *me_eval_deform, bool
const bool is_deformed = check_sculpt_object_deformed(ob, true);
if (is_deformed && me_eval_deform != nullptr) {
int totvert;
float(*v_cos)[3] = BKE_mesh_vert_coords_alloc(me_eval_deform, &totvert);
BKE_pbvh_vert_coords_apply(pbvh, v_cos, totvert);
MEM_freeN(v_cos);
BKE_pbvh_vert_coords_apply(
pbvh, BKE_mesh_vert_positions(me_eval_deform), me_eval_deform->totvert);
}
return pbvh;

View File

@ -1530,7 +1530,6 @@ void BKE_shrinkwrap_mesh_nearest_surface_deform(bContext *C, Object *ob_source,
Scene *sce = CTX_data_scene(C);
ShrinkwrapModifierData ssmd = {{nullptr}};
ModifierEvalContext ctx = {depsgraph, ob_source, ModifierApplyFlag(0)};
int totvert;
ssmd.target = ob_target;
ssmd.shrinkType = MOD_SHRINKWRAP_NEAREST_SURFACE;
@ -1538,13 +1537,17 @@ void BKE_shrinkwrap_mesh_nearest_surface_deform(bContext *C, Object *ob_source,
ssmd.keepDist = 0.0f;
Mesh *src_me = static_cast<Mesh *>(ob_source->data);
float(*vertexCos)[3] = BKE_mesh_vert_coords_alloc(src_me, &totvert);
shrinkwrapModifier_deform(&ssmd, &ctx, sce, ob_source, src_me, nullptr, -1, vertexCos, totvert);
BKE_mesh_vert_coords_apply(src_me, vertexCos);
MEM_freeN(vertexCos);
shrinkwrapModifier_deform(&ssmd,
&ctx,
sce,
ob_source,
src_me,
nullptr,
-1,
BKE_mesh_vert_positions_for_write(src_me),
src_me->totvert);
BKE_mesh_tag_positions_changed(src_me);
}
void BKE_shrinkwrap_remesh_target_project(Mesh *src_me, Mesh *target_me, Object *ob_target)

View File

@ -1313,6 +1313,19 @@ const VolumeGrid *BKE_volume_grid_find_for_read(const Volume *volume, const char
return nullptr;
}
VolumeGrid *BKE_volume_grid_find_for_write(Volume *volume, const char *name)
{
int num_grids = BKE_volume_num_grids(volume);
for (int i = 0; i < num_grids; i++) {
VolumeGrid *grid = BKE_volume_grid_get_for_write(volume, i);
if (STREQ(BKE_volume_grid_name(grid), name)) {
return grid;
}
}
return nullptr;
}
/* Grid Loading */
bool BKE_volume_grid_load(const Volume *volume, const VolumeGrid *grid)

View File

@ -10,6 +10,24 @@
namespace blender::array_utils {
/**
* Fill the destination span by copying all values from the `src` array. Threaded based on
* grain-size.
*/
void copy(const GVArray &src, GMutableSpan dst, int64_t grain_size = 4096);
/**
* Fill the destination span by copying all values from the `src` array. Threaded based on
* grain-size.
*/
template<typename T>
inline void copy(const Span<T> src, MutableSpan<T> dst, const int64_t grain_size = 4096)
{
BLI_assert(src.size() == dst.size());
threading::parallel_for(src.index_range(), grain_size, [&](const IndexRange range) {
dst.slice(range).copy_from(src.slice(range));
});
}
/**
* Fill the destination span by copying masked values from the `src` array. Threaded based on
* grain-size.

View File

@ -420,7 +420,7 @@ struct CartesianBasis {
};
/**
* Create an CartesianBasis for converting from \a a orientation to \a b orientation.
* Create an CartesianBasis using two orthogonal axes.
* The third axis is chosen by right hand rule to follow blender coordinate system.
* \a forward is Y axis in blender coordinate system.
* \a up is Z axis in blender coordinate system.

View File

@ -156,7 +156,29 @@ template<size_t Size, size_t Alignment> class AlignedBuffer {
*/
template<typename T, int64_t Size = 1> class TypedBuffer {
private:
BLI_NO_UNIQUE_ADDRESS AlignedBuffer<sizeof(T) * size_t(Size), alignof(T)> buffer_;
/** Required so that `sizeof(T)` is not required when `Size` is 0. */
static constexpr size_t get_size()
{
if constexpr (Size == 0) {
return 0;
}
else {
return sizeof(T) * size_t(Size);
}
}
/** Required so that `alignof(T)` is not required when `Size` is 0. */
static constexpr size_t get_alignment()
{
if constexpr (Size == 0) {
return 1;
}
else {
return alignof(T);
}
}
BLI_NO_UNIQUE_ADDRESS AlignedBuffer<get_size(), get_alignment()> buffer_;
public:
operator T *()

View File

@ -108,6 +108,17 @@ class Vector {
template<typename OtherT, int64_t OtherInlineBufferCapacity, typename OtherAllocator>
friend class Vector;
/** Required in case `T` is an incomplete type. */
static constexpr bool is_nothrow_move_constructible()
{
if constexpr (InlineBufferCapacity == 0) {
return true;
}
else {
return std::is_nothrow_move_constructible_v<T>;
}
}
public:
/**
* Create an empty vector.
@ -234,7 +245,7 @@ class Vector {
*/
template<int64_t OtherInlineBufferCapacity>
Vector(Vector<T, OtherInlineBufferCapacity, Allocator> &&other) noexcept(
std::is_nothrow_move_constructible_v<T>)
is_nothrow_move_constructible())
: Vector(NoExceptConstructor(), other.allocator_)
{
const int64_t size = other.size();

View File

@ -4,6 +4,15 @@
namespace blender::array_utils {
void copy(const GVArray &src, GMutableSpan dst, const int64_t grain_size)
{
BLI_assert(src.type() == dst.type());
BLI_assert(src.size() == dst.size());
threading::parallel_for(src.index_range(), grain_size, [&](const IndexRange range) {
src.materialize_to_uninitialized(range, dst.data());
});
}
void copy(const GVArray &src,
const IndexMask selection,
GMutableSpan dst,

View File

@ -63,18 +63,12 @@ TEST(math_rotation_types, Euler3Order)
/* Asserts those match.
* Do not do it in the header to avoid including the DNA header everywhere.
*/
BLI_STATIC_ASSERT(
static_cast<int>(EulerOrder::XYZ) == static_cast<int>(eRotationModes::ROT_MODE_XYZ), "");
BLI_STATIC_ASSERT(
static_cast<int>(EulerOrder::XZY) == static_cast<int>(eRotationModes::ROT_MODE_XZY), "");
BLI_STATIC_ASSERT(
static_cast<int>(EulerOrder::YXZ) == static_cast<int>(eRotationModes::ROT_MODE_YXZ), "");
BLI_STATIC_ASSERT(
static_cast<int>(EulerOrder::YZX) == static_cast<int>(eRotationModes::ROT_MODE_YZX), "");
BLI_STATIC_ASSERT(
static_cast<int>(EulerOrder::ZXY) == static_cast<int>(eRotationModes::ROT_MODE_ZXY), "");
BLI_STATIC_ASSERT(
static_cast<int>(EulerOrder::ZYX) == static_cast<int>(eRotationModes::ROT_MODE_ZYX), "");
BLI_STATIC_ASSERT(int(EulerOrder::XYZ) == int(eRotationModes::ROT_MODE_XYZ), "");
BLI_STATIC_ASSERT(int(EulerOrder::XZY) == int(eRotationModes::ROT_MODE_XZY), "");
BLI_STATIC_ASSERT(int(EulerOrder::YXZ) == int(eRotationModes::ROT_MODE_YXZ), "");
BLI_STATIC_ASSERT(int(EulerOrder::YZX) == int(eRotationModes::ROT_MODE_YZX), "");
BLI_STATIC_ASSERT(int(EulerOrder::ZXY) == int(eRotationModes::ROT_MODE_ZXY), "");
BLI_STATIC_ASSERT(int(EulerOrder::ZYX) == int(eRotationModes::ROT_MODE_ZYX), "");
EXPECT_EQ(float3(Euler3(0, 1, 2, EulerOrder::XYZ).ijk()), float3(0, 1, 2));
EXPECT_EQ(float3(Euler3(0, 1, 2, EulerOrder::XZY).ijk()), float3(0, 2, 1));

View File

@ -859,4 +859,14 @@ TEST(vector, RemoveChunkExceptions)
EXPECT_EQ(vec.size(), 7);
}
struct RecursiveType {
Vector<RecursiveType, 0> my_vector;
};
TEST(vector, RecursiveStructure)
{
RecursiveType my_recursive_type;
my_recursive_type.my_vector.append({});
}
} // namespace blender::tests

View File

@ -313,27 +313,6 @@ void BLO_read_invalidate_message(BlendHandle *bh, struct Main *bmain, const char
#define BLO_GROUP_MAX 32
#define BLO_EMBEDDED_STARTUP_BLEND "<startup.blend>"
/**
* Check whether given path ends with a blend file compatible extension
* (`.blend`, `.ble` or `.blend.gz`).
*
* \param str: The path to check.
* \return true is this path ends with a blender file extension.
*/
bool BLO_has_bfile_extension(const char *str);
/**
* Try to explode given path into its 'library components'
* (i.e. a .blend file, id type/group, and data-block itself).
*
* \param path: the full path to explode.
* \param r_dir: the string that'll contain path up to blend file itself ('library' path).
* WARNING! Must be #FILE_MAX_LIBEXTRA long (it also stores group and name strings)!
* \param r_group: the string that'll contain 'group' part of the path, if any. May be NULL.
* \param r_name: the string that'll contain data's name part of the path, if any. May be NULL.
* \return true if path contains a blend file.
*/
bool BLO_library_path_explode(const char *path, char *r_dir, char **r_group, char **r_name);
/* -------------------------------------------------------------------- */
/** \name BLO Blend File Linking API
* \{ */

View File

@ -1306,76 +1306,6 @@ void blo_filedata_free(FileData *fd)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Public Utilities
* \{ */
bool BLO_has_bfile_extension(const char *str)
{
const char *ext_test[4] = {".blend", ".ble", ".blend.gz", nullptr};
return BLI_path_extension_check_array(str, ext_test);
}
bool BLO_library_path_explode(const char *path, char *r_dir, char **r_group, char **r_name)
{
/* We might get some data names with slashes,
* so we have to go up in path until we find blend file itself,
* then we know next path item is group, and everything else is data name. */
char *slash = nullptr, *prev_slash = nullptr, c = '\0';
r_dir[0] = '\0';
if (r_group) {
*r_group = nullptr;
}
if (r_name) {
*r_name = nullptr;
}
/* if path leads to an existing directory, we can be sure we're not (in) a library */
if (BLI_is_dir(path)) {
return false;
}
strcpy(r_dir, path);
while ((slash = (char *)BLI_path_slash_rfind(r_dir))) {
char tc = *slash;
*slash = '\0';
if (BLO_has_bfile_extension(r_dir) && BLI_is_file(r_dir)) {
break;
}
if (STREQ(r_dir, BLO_EMBEDDED_STARTUP_BLEND)) {
break;
}
if (prev_slash) {
*prev_slash = c;
}
prev_slash = slash;
c = tc;
}
if (!slash) {
return false;
}
if (slash[1] != '\0') {
BLI_assert(strlen(slash + 1) < BLO_GROUP_MAX);
if (r_group) {
*r_group = slash + 1;
}
}
if (prev_slash && (prev_slash[1] != '\0')) {
BLI_assert(strlen(prev_slash + 1) < MAX_ID_NAME - 2);
if (r_name) {
*r_name = prev_slash + 1;
}
}
return true;
}
BlendThumbnail *BLO_thumbnail_from_file(const char *filepath)
{
FileData *fd;

View File

@ -5,6 +5,9 @@
* \ingroup depsgraph
*/
#include <memory>
#include <mutex>
#include "intern/depsgraph_registry.h"
#include "BLI_utildefines.h"
@ -13,7 +16,19 @@
namespace blender::deg {
using GraphRegistry = Map<Main *, VectorSet<Depsgraph *>>;
/* Global registry for dependency graphs associated with a main database.
*
* Threads may add or remove depsgraphs for different mains concurrently
* (for example for preview rendering), but not the same main. */
/* Use pointer for map value to ensure span returned by get_all_registered_graphs
* remains unchanged as other mains are added or removed. */
typedef std::unique_ptr<VectorSet<Depsgraph *>> GraphSetPtr;
struct GraphRegistry {
Map<Main *, GraphSetPtr> map;
std::mutex mutex;
};
static GraphRegistry &get_graph_registry()
{
static GraphRegistry graph_registry;
@ -22,28 +37,37 @@ static GraphRegistry &get_graph_registry()
void register_graph(Depsgraph *depsgraph)
{
GraphRegistry &graph_registry = get_graph_registry();
Main *bmain = depsgraph->bmain;
get_graph_registry().lookup_or_add_default(bmain).add_new(depsgraph);
std::lock_guard<std::mutex> lock{graph_registry.mutex};
graph_registry.map
.lookup_or_add_cb(bmain, []() { return std::make_unique<VectorSet<Depsgraph *>>(); })
->add_new(depsgraph);
}
void unregister_graph(Depsgraph *depsgraph)
{
Main *bmain = depsgraph->bmain;
GraphRegistry &graph_registry = get_graph_registry();
VectorSet<Depsgraph *> &graphs = graph_registry.lookup(bmain);
graphs.remove(depsgraph);
std::lock_guard<std::mutex> lock{graph_registry.mutex};
GraphSetPtr &graphs = graph_registry.map.lookup(bmain);
graphs->remove(depsgraph);
/* If this was the last depsgraph associated with the main, remove the main entry as well. */
if (graphs.is_empty()) {
graph_registry.remove(bmain);
if (graphs->is_empty()) {
graph_registry.map.remove(bmain);
}
}
Span<Depsgraph *> get_all_registered_graphs(Main *bmain)
{
VectorSet<Depsgraph *> *graphs = get_graph_registry().lookup_ptr(bmain);
if (graphs != nullptr) {
return *graphs;
GraphRegistry &graph_registry = get_graph_registry();
std::lock_guard<std::mutex> lock{graph_registry.mutex};
GraphSetPtr *graphs = graph_registry.map.lookup_ptr(bmain);
if (graphs) {
return **graphs;
}
return {};
}

View File

@ -144,6 +144,7 @@ set(SRC
engines/eevee_next/eevee_film.cc
engines/eevee_next/eevee_hizbuffer.cc
engines/eevee_next/eevee_instance.cc
engines/eevee_next/eevee_irradiance_cache.cc
engines/eevee_next/eevee_light.cc
engines/eevee_next/eevee_material.cc
engines/eevee_next/eevee_motion_blur.cc
@ -275,6 +276,7 @@ set(SRC
engines/eevee_next/eevee_film.hh
engines/eevee_next/eevee_hizbuffer.hh
engines/eevee_next/eevee_instance.hh
engines/eevee_next/eevee_irradiance_cache.hh
engines/eevee_next/eevee_light.hh
engines/eevee_next/eevee_material.hh
engines/eevee_next/eevee_motion_blur.hh
@ -425,6 +427,9 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_colorspace_lib.glsl
engines/eevee_next/shaders/eevee_cryptomatte_lib.glsl
engines/eevee_next/shaders/eevee_transparency_lib.glsl
engines/eevee_next/shaders/eevee_debug_surfels_vert.glsl
engines/eevee_next/shaders/eevee_debug_surfels_frag.glsl
engines/eevee_next/shaders/eevee_deferred_light_frag.glsl
engines/eevee_next/shaders/eevee_depth_of_field_accumulator_lib.glsl
engines/eevee_next/shaders/eevee_depth_of_field_bokeh_lut_comp.glsl
engines/eevee_next/shaders/eevee_depth_of_field_downsample_comp.glsl
@ -444,6 +449,7 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_film_cryptomatte_post_comp.glsl
engines/eevee_next/shaders/eevee_film_frag.glsl
engines/eevee_next/shaders/eevee_film_lib.glsl
engines/eevee_next/shaders/eevee_gbuffer_lib.glsl
engines/eevee_next/shaders/eevee_geom_curves_vert.glsl
engines/eevee_next/shaders/eevee_geom_gpencil_vert.glsl
engines/eevee_next/shaders/eevee_geom_mesh_vert.glsl
@ -484,6 +490,7 @@ set(GLSL_SRC
engines/eevee_next/shaders/eevee_shadow_tilemap_finalize_comp.glsl
engines/eevee_next/shaders/eevee_shadow_tilemap_init_comp.glsl
engines/eevee_next/shaders/eevee_shadow_tilemap_lib.glsl
engines/eevee_next/shaders/eevee_spherical_harmonics_lib.glsl
engines/eevee_next/shaders/eevee_surf_deferred_frag.glsl
engines/eevee_next/shaders/eevee_surf_depth_frag.glsl
engines/eevee_next/shaders/eevee_surf_forward_frag.glsl

View File

@ -280,7 +280,7 @@ static void eevee_draw_scene(void *vedata)
SET_FLAG_FROM_TEST(clear_bits, (stl->effects->enabled_effects & EFFECT_SSS), GPU_STENCIL_BIT);
GPU_framebuffer_clear(fbl->main_fb, clear_bits, clear_col, clear_depth, clear_stencil);
/* Depth prepass */
/* Depth pre-pass. */
DRW_stats_group_start("Prepass");
DRW_draw_pass(psl->depth_ps);
DRW_stats_group_end();

View File

@ -107,14 +107,14 @@ void EEVEE_lookdev_init(EEVEE_Data *vedata)
/* Make the viewport width scale the lookdev spheres a bit.
* Scale between 1000px and 2000px. */
const float viewport_scale = clamp_f(
BLI_rcti_size_x(rect) / (2000.0f * U.dpi_fac), 0.5f, 1.0f);
const int sphere_size = U.lookdev_sphere_size * U.dpi_fac * viewport_scale;
BLI_rcti_size_x(rect) / (2000.0f * UI_SCALE_FAC), 0.5f, 1.0f);
const int sphere_size = U.lookdev_sphere_size * UI_SCALE_FAC * viewport_scale;
if (sphere_size != effects->sphere_size || rect->xmax != effects->anchor[0] ||
rect->ymin != effects->anchor[1]) {
/* Make sphere resolution adaptive to viewport_scale, DPI and #U.lookdev_sphere_size. */
float res_scale = clamp_f(
(U.lookdev_sphere_size / 400.0f) * viewport_scale * U.dpi_fac, 0.1f, 1.0f);
(U.lookdev_sphere_size / 400.0f) * viewport_scale * UI_SCALE_FAC, 0.1f, 1.0f);
if (res_scale > 0.7f) {
effects->sphere_lod = DRW_LOD_HIGH;

View File

@ -607,7 +607,7 @@ void EEVEE_render_draw(EEVEE_Data *vedata, RenderEngine *engine, RenderLayer *rl
GPU_framebuffer_bind(fbl->main_fb);
GPU_framebuffer_clear_color_depth_stencil(fbl->main_fb, clear_col, clear_depth, clear_stencil);
/* Depth prepass */
/* Depth pre-pass. */
DRW_draw_pass(psl->depth_ps);
/* Create minmax texture */
EEVEE_create_minmax_buffer(vedata, dtxl->depth, -1);

View File

@ -105,6 +105,9 @@
#define RBUFS_AOV_COLOR_SLOT 5
#define RBUFS_AOV_VALUE_SLOT 6
#define RBUFS_CRYPTOMATTE_SLOT 7
/* G-buffer reuses render passes slots. */
#define GBUF_CLOSURE_SLOT RBUFS_LIGHT_SLOT
#define GBUF_COLOR_SLOT RBUFS_DIFF_COLOR_SLOT
/* Uniform Buffers. */
/* Only during prepass. */

View File

@ -649,7 +649,7 @@ void Film::accumulate(const DRWView *view, GPUTexture *combined_final_tx)
draw::View drw_view("MainView", view);
DRW_manager_get()->submit(accumulate_ps_, drw_view);
inst_.manager->submit(accumulate_ps_, drw_view);
combined_tx_.swap();
weight_tx_.swap();

View File

@ -0,0 +1,89 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
/** \file
* \ingroup eevee
*
* Gbuffer layout used for deferred shading pipeline.
*/
#pragma once
#include "DRW_render.h"
#include "eevee_material.hh"
#include "eevee_shader_shared.hh"
namespace blender::eevee {
class Instance;
/**
* Full-screen textures containing geometric and surface data.
* Used by deferred shading passes. Only one gbuffer is allocated per view
* and is reused for each deferred layer. This is why there can only be temporary
* texture inside it.
*
* Everything is stored inside two array texture, one for each format. This is to fit the
* limitation of the number of images we can bind on a single shader.
*
* First layer is always for reflection. All parameters to shoot a reflection ray are inside
* this layer.
*
* - Layer 1 : Reflection
* - R : Normal packed X
* - G : Normal packed Y
* - B : Roughness
* - A : Unused (Could be used for anisotropic roughness)
*
* Second layer is either for diffuse or transmission. Material mixing both are not
* physically based and are uncommon. So in order to save bandwidth and texture memory, we only
* store one. We use random sampling to mix between both. All parameters to shoot a refraction
* ray are inside this layer.
*
* - Layer 2 : Refraction
* - R : Normal packed X
* - G : Normal packed Y
* - B : Roughness (isotropic)
* - A : IOR
*
* - Layer 2 : Diffuse / Sub-Surface Scattering
* - R : Normal packed X
* - G : Normal packed Y
* - B : Thickness
* - A : Unused (Could be used for diffuse roughness)
*
* Layer 3 is only allocated if Sub-Surface Scattering is needed. All parameters for
* screen-space scattering are inside this layer.
*
* - Layer 3 : Sub-Surface Scattering
* - R : Scattering radius R
* - G : Scattering radius G
* - B : Scattering radius B
* - A : Object ID
*
* For each output closure, we also output the color to apply after the lighting computation.
* The color is stored with a 2 exponent that allows input color with component higher than 1.
* Color degradation is expected to happen in this case.
*/
struct GBuffer {
/* TODO(fclem): Use texture from pool once they support texture array. */
Texture closure_tx = {"GbufferClosure"};
Texture color_tx = {"GbufferColor"};
void acquire(int2 extent, eClosureBits closure_bits_)
{
const bool use_sss = (closure_bits_ & CLOSURE_SSS) != 0;
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE;
closure_tx.ensure_2d_array(GPU_RGBA16, extent, use_sss ? 3 : 2, usage);
color_tx.ensure_2d_array(GPU_RGB10_A2, extent, 2, usage);
}
void release()
{
/* TODO(fclem): Use texture from pool once they support texture array. */
// closure_tx.release();
// color_tx.release();
}
};
} // namespace blender::eevee

View File

@ -70,6 +70,7 @@ void Instance::init(const int2 &output_res,
shadows.init();
motion_blur.init();
main_view.init();
irradiance_cache.init();
}
void Instance::set_time(float time)
@ -104,6 +105,7 @@ void Instance::begin_sync()
velocity.begin_sync(); /* NOTE: Also syncs camera. */
lights.begin_sync();
shadows.begin_sync();
pipelines.begin_sync();
cryptomatte.begin_sync();
gpencil_engine_enabled = false;
@ -113,10 +115,10 @@ void Instance::begin_sync()
depth_of_field.sync();
motion_blur.sync();
hiz_buffer.sync();
pipelines.sync();
main_view.sync();
world.sync();
film.sync();
irradiance_cache.sync();
}
void Instance::scene_sync()
@ -204,6 +206,7 @@ void Instance::end_sync()
sampling.end_sync();
film.end_sync();
cryptomatte.end_sync();
pipelines.end_sync();
}
void Instance::render_sync()

View File

@ -19,7 +19,9 @@
#include "eevee_cryptomatte.hh"
#include "eevee_depth_of_field.hh"
#include "eevee_film.hh"
#include "eevee_gbuffer.hh"
#include "eevee_hizbuffer.hh"
#include "eevee_irradiance_cache.hh"
#include "eevee_light.hh"
#include "eevee_material.hh"
#include "eevee_motion_blur.hh"
@ -53,6 +55,7 @@ class Instance {
MotionBlurModule motion_blur;
DepthOfField depth_of_field;
Cryptomatte cryptomatte;
GBuffer gbuffer;
HiZBuffer hiz_buffer;
Sampling sampling;
Camera camera;
@ -60,6 +63,7 @@ class Instance {
RenderBuffers render_buffers;
MainView main_view;
World world;
IrradianceCache irradiance_cache;
/** Input data. */
Depsgraph *depsgraph;
@ -103,7 +107,8 @@ class Instance {
film(*this),
render_buffers(*this),
main_view(*this),
world(*this){};
world(*this),
irradiance_cache(*this){};
~Instance(){};
void init(const int2 &output_res,

View File

@ -0,0 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_rand.hh"
#include "eevee_instance.hh"
#include "eevee_irradiance_cache.hh"
namespace blender::eevee {
void IrradianceCache::generate_random_surfels()
{
const int surfels_len = 256;
debug_surfels.resize(surfels_len);
RandomNumberGenerator rng;
rng.seed(0);
for (DebugSurfel &surfel : debug_surfels) {
float3 random = rng.get_unit_float3();
surfel.position = random * 3.0f;
surfel.normal = random;
surfel.color = float4(rng.get_float(), rng.get_float(), rng.get_float(), 1.0f);
}
debug_surfels.push_update();
}
void IrradianceCache::init()
{
if (debug_surfels_sh_ == nullptr) {
debug_surfels_sh_ = inst_.shaders.static_shader_get(DEBUG_SURFELS);
}
/* TODO: Remove this. */
generate_random_surfels();
}
void IrradianceCache::sync()
{
debug_pass_sync();
}
void IrradianceCache::debug_pass_sync()
{
if (inst_.debug_mode == eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS) {
debug_surfels_ps_.init();
debug_surfels_ps_.state_set(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH |
DRW_STATE_DEPTH_LESS_EQUAL);
debug_surfels_ps_.shader_set(debug_surfels_sh_);
debug_surfels_ps_.bind_ssbo("surfels_buf", debug_surfels);
debug_surfels_ps_.push_constant("surfel_radius", 0.25f);
debug_surfels_ps_.draw_procedural(GPU_PRIM_TRI_STRIP, debug_surfels.size(), 4);
}
}
void IrradianceCache::debug_draw(View &view, GPUFrameBuffer *view_fb)
{
if (inst_.debug_mode == eDebugMode::DEBUG_IRRADIANCE_CACHE_SURFELS) {
inst_.info = "Debug Mode: Irradiance Cache Surfels";
GPU_framebuffer_bind(view_fb);
inst_.manager->submit(debug_surfels_ps_, view);
}
}
} // namespace blender::eevee

View File

@ -0,0 +1,33 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include "eevee_shader_shared.hh"
namespace blender::eevee {
class Instance;
class IrradianceCache {
private:
Instance &inst_;
DebugSurfelBuf debug_surfels;
PassSimple debug_surfels_ps_ = {"IrradianceCache.Debug"};
GPUShader *debug_surfels_sh_ = nullptr;
/* TODO: Remove this. */
void generate_random_surfels();
public:
IrradianceCache(Instance &inst) : inst_(inst){};
~IrradianceCache(){};
void init();
void sync();
void debug_pass_sync();
void debug_draw(View &view, GPUFrameBuffer *view_fb);
};
} // namespace blender::eevee

View File

@ -193,11 +193,6 @@ MaterialPass MaterialModule::material_pass_get(Object *ob,
inst_.sampling.reset();
}
if ((pipeline_type == MAT_PIPE_DEFERRED) &&
GPU_material_flag_get(matpass.gpumat, GPU_MATFLAG_SHADER_TO_RGBA)) {
pipeline_type = MAT_PIPE_FORWARD;
}
if (ELEM(pipeline_type,
MAT_PIPE_FORWARD,
MAT_PIPE_FORWARD_PREPASS,
@ -240,10 +235,6 @@ Material &MaterialModule::material_sync(Object *ob,
(has_motion ? MAT_PIPE_DEFERRED_PREPASS_VELOCITY :
MAT_PIPE_DEFERRED_PREPASS);
/* TEST until we have deferred pipeline up and running. */
surface_pipe = MAT_PIPE_FORWARD;
prepass_pipe = has_motion ? MAT_PIPE_FORWARD_PREPASS_VELOCITY : MAT_PIPE_FORWARD_PREPASS;
MaterialKey material_key(blender_mat, geometry_type, surface_pipe);
Material &mat = material_map_.lookup_or_add_cb(material_key, [&]() {

View File

@ -278,4 +278,229 @@ void ForwardPipeline::render(View &view,
/** \} */
/* -------------------------------------------------------------------- */
/** \name Deferred Layer
* \{ */
void DeferredLayer::begin_sync()
{
{
prepass_ps_.init();
{
/* Common resources. */
/* Textures. */
prepass_ps_.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
/* Uniform Buf. */
prepass_ps_.bind_ubo(CAMERA_BUF_SLOT, inst_.camera.ubo_get());
inst_.velocity.bind_resources(&prepass_ps_);
inst_.sampling.bind_resources(&prepass_ps_);
}
DRWState state_depth_only = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS;
DRWState state_depth_color = DRW_STATE_WRITE_DEPTH | DRW_STATE_DEPTH_LESS |
DRW_STATE_WRITE_COLOR;
prepass_double_sided_static_ps_ = &prepass_ps_.sub("DoubleSided.Static");
prepass_double_sided_static_ps_->state_set(state_depth_only);
prepass_single_sided_static_ps_ = &prepass_ps_.sub("SingleSided.Static");
prepass_single_sided_static_ps_->state_set(state_depth_only | DRW_STATE_CULL_BACK);
prepass_double_sided_moving_ps_ = &prepass_ps_.sub("DoubleSided.Moving");
prepass_double_sided_moving_ps_->state_set(state_depth_color);
prepass_single_sided_moving_ps_ = &prepass_ps_.sub("SingleSided.Moving");
prepass_single_sided_moving_ps_->state_set(state_depth_color | DRW_STATE_CULL_BACK);
}
{
gbuffer_ps_.init();
gbuffer_ps_.clear_stencil(0x00u);
gbuffer_ps_.state_stencil(0x01u, 0x01u, 0x01u);
{
/* Common resources. */
/* G-buffer. */
gbuffer_ps_.bind_image(GBUF_CLOSURE_SLOT, &inst_.gbuffer.closure_tx);
gbuffer_ps_.bind_image(GBUF_COLOR_SLOT, &inst_.gbuffer.color_tx);
/* RenderPasses. */
gbuffer_ps_.bind_image(RBUFS_NORMAL_SLOT, &inst_.render_buffers.normal_tx);
/* TODO(fclem): Pack all render pass into the same texture. */
// gbuffer_ps_.bind_image(RBUFS_DIFF_COLOR_SLOT, &inst_.render_buffers.diffuse_color_tx);
gbuffer_ps_.bind_image(RBUFS_SPEC_COLOR_SLOT, &inst_.render_buffers.specular_color_tx);
gbuffer_ps_.bind_image(RBUFS_EMISSION_SLOT, &inst_.render_buffers.emission_tx);
/* AOVs. */
gbuffer_ps_.bind_image(RBUFS_AOV_COLOR_SLOT, &inst_.render_buffers.aov_color_tx);
gbuffer_ps_.bind_image(RBUFS_AOV_VALUE_SLOT, &inst_.render_buffers.aov_value_tx);
/* Cryptomatte. */
gbuffer_ps_.bind_image(RBUFS_CRYPTOMATTE_SLOT, &inst_.render_buffers.cryptomatte_tx);
/* Storage Buf. */
gbuffer_ps_.bind_ssbo(RBUFS_AOV_BUF_SLOT, &inst_.film.aovs_info);
/* Textures. */
gbuffer_ps_.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
/* Uniform Buf. */
gbuffer_ps_.bind_ubo(CAMERA_BUF_SLOT, inst_.camera.ubo_get());
inst_.sampling.bind_resources(&gbuffer_ps_);
inst_.cryptomatte.bind_resources(&gbuffer_ps_);
}
DRWState state = DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM | DRW_STATE_DEPTH_EQUAL |
DRW_STATE_WRITE_STENCIL | DRW_STATE_STENCIL_ALWAYS;
gbuffer_double_sided_ps_ = &gbuffer_ps_.sub("DoubleSided");
gbuffer_double_sided_ps_->state_set(state);
gbuffer_single_sided_ps_ = &gbuffer_ps_.sub("SingleSided");
gbuffer_single_sided_ps_->state_set(state | DRW_STATE_CULL_BACK);
}
}
void DeferredLayer::end_sync()
{
/* Use stencil test to reject pixel not written by this layer. */
/* WORKAROUND: Stencil write is only here to avoid rasterizer discard. */
DRWState state = DRW_STATE_WRITE_STENCIL | DRW_STATE_STENCIL_EQUAL;
/* Allow output to combined pass for the last pass. */
DRWState state_write_color = state | DRW_STATE_WRITE_COLOR | DRW_STATE_BLEND_CUSTOM;
if (closure_bits_ & (CLOSURE_DIFFUSE | CLOSURE_REFLECTION)) {
const bool is_last_eval_pass = true;
eval_light_ps_.init();
eval_light_ps_.state_set(is_last_eval_pass ? state_write_color : state);
eval_light_ps_.state_stencil(0x00u, 0x01u, 0xFFu);
eval_light_ps_.shader_set(inst_.shaders.static_shader_get(DEFERRED_LIGHT));
eval_light_ps_.bind_image("out_diffuse_light_img", &diffuse_light_tx_);
eval_light_ps_.bind_image("out_specular_light_img", &specular_light_tx_);
eval_light_ps_.bind_texture("gbuffer_closure_tx", &inst_.gbuffer.closure_tx);
eval_light_ps_.bind_texture("gbuffer_color_tx", &inst_.gbuffer.color_tx);
eval_light_ps_.push_constant("is_last_eval_pass", is_last_eval_pass);
eval_light_ps_.bind_image(RBUFS_LIGHT_SLOT, &inst_.render_buffers.light_tx);
eval_light_ps_.bind_texture(RBUFS_UTILITY_TEX_SLOT, inst_.pipelines.utility_tx);
inst_.lights.bind_resources(&eval_light_ps_);
inst_.shadows.bind_resources(&eval_light_ps_);
inst_.sampling.bind_resources(&eval_light_ps_);
inst_.hiz_buffer.bind_resources(&eval_light_ps_);
eval_light_ps_.barrier(GPU_BARRIER_TEXTURE_FETCH | GPU_BARRIER_SHADER_IMAGE_ACCESS);
eval_light_ps_.draw_procedural(GPU_PRIM_TRIS, 1, 3);
}
}
PassMain::Sub *DeferredLayer::prepass_add(::Material *blender_mat,
GPUMaterial *gpumat,
bool has_motion)
{
PassMain::Sub *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ?
(has_motion ? prepass_single_sided_moving_ps_ :
prepass_single_sided_static_ps_) :
(has_motion ? prepass_double_sided_moving_ps_ :
prepass_double_sided_static_ps_);
return &pass->sub(GPU_material_get_name(gpumat));
}
PassMain::Sub *DeferredLayer::material_add(::Material *blender_mat, GPUMaterial *gpumat)
{
closure_bits_ |= shader_closure_bits_from_flag(gpumat);
PassMain::Sub *pass = (blender_mat->blend_flag & MA_BL_CULL_BACKFACE) ?
gbuffer_single_sided_ps_ :
gbuffer_double_sided_ps_;
return &pass->sub(GPU_material_get_name(gpumat));
}
void DeferredLayer::render(View &view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
int2 extent)
{
GPU_framebuffer_bind(prepass_fb);
inst_.manager->submit(prepass_ps_, view);
inst_.hiz_buffer.set_dirty();
inst_.shadows.set_view(view);
inst_.gbuffer.acquire(extent, closure_bits_);
GPU_framebuffer_bind(combined_fb);
inst_.manager->submit(gbuffer_ps_, view);
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_SHADER_READ | GPU_TEXTURE_USAGE_SHADER_WRITE;
diffuse_light_tx_.acquire(extent, GPU_RGBA16F, usage);
specular_light_tx_.acquire(extent, GPU_RGBA16F, usage);
diffuse_light_tx_.clear(float4(0.0f));
specular_light_tx_.clear(float4(0.0f));
inst_.manager->submit(eval_light_ps_, view);
diffuse_light_tx_.release();
specular_light_tx_.release();
inst_.gbuffer.release();
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Deferred Pipeline
*
* Closure data are written to intermediate buffer allowing screen space processing.
* \{ */
void DeferredPipeline::begin_sync()
{
opaque_layer_.begin_sync();
refraction_layer_.begin_sync();
}
void DeferredPipeline::end_sync()
{
opaque_layer_.end_sync();
refraction_layer_.end_sync();
}
PassMain::Sub *DeferredPipeline::prepass_add(::Material *blender_mat,
GPUMaterial *gpumat,
bool has_motion)
{
if (blender_mat->blend_flag & MA_BL_SS_REFRACTION) {
return refraction_layer_.prepass_add(blender_mat, gpumat, has_motion);
}
else {
return opaque_layer_.prepass_add(blender_mat, gpumat, has_motion);
}
}
PassMain::Sub *DeferredPipeline::material_add(::Material *blender_mat, GPUMaterial *gpumat)
{
if (blender_mat->blend_flag & MA_BL_SS_REFRACTION) {
return refraction_layer_.material_add(blender_mat, gpumat);
}
else {
return opaque_layer_.material_add(blender_mat, gpumat);
}
}
void DeferredPipeline::render(View &view,
Framebuffer &prepass_fb,
Framebuffer &combined_fb,
int2 extent)
{
DRW_stats_group_start("Deferred.Opaque");
opaque_layer_.render(view, prepass_fb, combined_fb, extent);
DRW_stats_group_end();
DRW_stats_group_start("Deferred.Refract");
refraction_layer_.render(view, prepass_fb, combined_fb, extent);
DRW_stats_group_end();
}
/** \} */
} // namespace blender::eevee

View File

@ -113,6 +113,77 @@ class ForwardPipeline {
/** \} */
/* -------------------------------------------------------------------- */
/** \name Deferred lighting.
* \{ */
class DeferredLayer {
private:
Instance &inst_;
PassMain prepass_ps_ = {"Prepass"};
PassMain::Sub *prepass_single_sided_static_ps_ = nullptr;
PassMain::Sub *prepass_single_sided_moving_ps_ = nullptr;
PassMain::Sub *prepass_double_sided_static_ps_ = nullptr;
PassMain::Sub *prepass_double_sided_moving_ps_ = nullptr;
PassMain gbuffer_ps_ = {"Shading"};
PassMain::Sub *gbuffer_single_sided_ps_ = nullptr;
PassMain::Sub *gbuffer_double_sided_ps_ = nullptr;
PassSimple eval_light_ps_ = {"EvalLights"};
/* Closures bits from the materials in this pass. */
eClosureBits closure_bits_;
/**
* Accumulation textures for all stages of lighting evaluation (Light, SSR, SSSS, SSGI ...).
* These are split and separate from the main radiance buffer in order to accumulate light for
* the render passes and avoid too much bandwidth waste. Otherwise, we would have to load the
* BSDF color and do additive blending for each of the lighting step.
*
* NOTE: Not to be confused with the render passes.
*/
TextureFromPool diffuse_light_tx_ = {"diffuse_light_accum_tx"};
TextureFromPool specular_light_tx_ = {"specular_light_accum_tx"};
public:
DeferredLayer(Instance &inst) : inst_(inst){};
void begin_sync();
void end_sync();
PassMain::Sub *prepass_add(::Material *blender_mat, GPUMaterial *gpumat, bool has_motion);
PassMain::Sub *material_add(::Material *blender_mat, GPUMaterial *gpumat);
void render(View &view, Framebuffer &prepass_fb, Framebuffer &combined_fb, int2 extent);
};
class DeferredPipeline {
private:
Instance &inst_;
/* Gbuffer filling passes. We could have an arbitrary number of them but for now we just have
* a hardcoded number of them. */
DeferredLayer opaque_layer_;
DeferredLayer refraction_layer_;
DeferredLayer volumetric_layer_;
public:
DeferredPipeline(Instance &inst)
: inst_(inst), opaque_layer_(inst), refraction_layer_(inst), volumetric_layer_(inst){};
void begin_sync();
void end_sync();
PassMain::Sub *prepass_add(::Material *material, GPUMaterial *gpumat, bool has_motion);
PassMain::Sub *material_add(::Material *material, GPUMaterial *gpumat);
void render(View &view, Framebuffer &prepass_fb, Framebuffer &combined_fb, int2 extent);
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Utility texture
*
@ -197,22 +268,25 @@ class UtilityTexture : public Texture {
class PipelineModule {
public:
WorldPipeline world;
// DeferredPipeline deferred;
DeferredPipeline deferred;
ForwardPipeline forward;
ShadowPipeline shadow;
// VelocityPipeline velocity;
UtilityTexture utility_tx;
public:
PipelineModule(Instance &inst) : world(inst), forward(inst), shadow(inst){};
PipelineModule(Instance &inst) : world(inst), deferred(inst), forward(inst), shadow(inst){};
void sync()
void begin_sync()
{
// deferred.sync();
deferred.begin_sync();
forward.sync();
shadow.sync();
// velocity.sync();
}
void end_sync()
{
deferred.end_sync();
}
PassMain::Sub *material_add(Object *ob,
@ -222,7 +296,7 @@ class PipelineModule {
{
switch (pipeline_type) {
case MAT_PIPE_DEFERRED_PREPASS:
// return deferred.prepass_add(blender_mat, gpumat, false);
return deferred.prepass_add(blender_mat, gpumat, false);
case MAT_PIPE_FORWARD_PREPASS:
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
return forward.prepass_transparent_add(ob, blender_mat, gpumat);
@ -230,7 +304,7 @@ class PipelineModule {
return forward.prepass_opaque_add(blender_mat, gpumat, false);
case MAT_PIPE_DEFERRED_PREPASS_VELOCITY:
// return deferred.prepass_add(blender_mat, gpumat, true);
return deferred.prepass_add(blender_mat, gpumat, true);
case MAT_PIPE_FORWARD_PREPASS_VELOCITY:
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
return forward.prepass_transparent_add(ob, blender_mat, gpumat);
@ -238,7 +312,7 @@ class PipelineModule {
return forward.prepass_opaque_add(blender_mat, gpumat, true);
case MAT_PIPE_DEFERRED:
// return deferred.material_add(blender_mat, gpumat);
return deferred.material_add(blender_mat, gpumat);
case MAT_PIPE_FORWARD:
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
return forward.material_transparent_add(ob, blender_mat, gpumat);

View File

@ -86,6 +86,8 @@ const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_
return "eevee_film_comp";
case FILM_CRYPTOMATTE_POST:
return "eevee_film_cryptomatte_post";
case DEFERRED_LIGHT:
return "eevee_deferred_light";
case HIZ_DEBUG:
return "eevee_hiz_debug";
case HIZ_UPDATE:
@ -98,6 +100,8 @@ const char *ShaderModule::static_shader_create_info_name_get(eShaderType shader_
return "eevee_motion_blur_tiles_flatten_render";
case MOTION_BLUR_TILE_FLATTEN_VIEWPORT:
return "eevee_motion_blur_tiles_flatten_viewport";
case DEBUG_SURFELS:
return "eevee_debug_surfels";
case DOF_BOKEH_LUT:
return "eevee_depth_of_field_bokeh_lut";
case DOF_DOWNSAMPLE:
@ -239,6 +243,11 @@ void ShaderModule::material_create_info_ammend(GPUMaterial *gpumat, GPUCodegenOu
}
}
/* WORKAROUND: Needed because node_tree isn't present in test shaders. */
if (pipeline_type == MAT_PIPE_DEFERRED) {
info.define("MAT_RENDER_PASS_SUPPORT");
}
if (GPU_material_flag_get(gpumat, GPU_MATFLAG_TRANSPARENT)) {
info.define("MAT_TRANSPARENT");
/* Transparent material do not have any velocity specific pipeline. */

View File

@ -30,6 +30,10 @@ enum eShaderType {
FILM_COMP,
FILM_CRYPTOMATTE_POST,
DEFERRED_LIGHT,
DEBUG_SURFELS,
DOF_BOKEH_LUT,
DOF_DOWNSAMPLE,
DOF_FILTER,

View File

@ -48,6 +48,10 @@ enum eDebugMode : uint32_t {
* Show incorrectly downsample tiles in red.
*/
DEBUG_HIZ_VALIDATION = 2u,
/**
* Display IrradianceCache surfels.
*/
DEBUG_IRRADIANCE_CACHE_SURFELS = 3u,
/**
* Show tiles depending on their status.
*/
@ -821,6 +825,21 @@ static inline ShadowTileDataPacked shadow_tile_pack(ShadowTileData tile)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Debug
* \{ */
struct DebugSurfel {
packed_float3 position;
int _pad0;
packed_float3 normal;
int _pad1;
float4 color;
};
BLI_STATIC_ASSERT_ALIGN(DebugSurfel, 16)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Hierarchical-Z Buffer
* \{ */
@ -928,6 +947,7 @@ using DepthOfFieldDataBuf = draw::UniformBuffer<DepthOfFieldData>;
using DepthOfFieldScatterListBuf = draw::StorageArrayBuffer<ScatterRect, 16, true>;
using DrawIndirectBuf = draw::StorageBuffer<DrawCommand, true>;
using FilmDataBuf = draw::UniformBuffer<FilmData>;
using DebugSurfelBuf = draw::StorageArrayBuffer<DebugSurfel, 64>;
using HiZDataBuf = draw::UniformBuffer<HiZData>;
using LightCullingDataBuf = draw::StorageBuffer<LightCullingData>;
using LightCullingKeyBuf = draw::StorageArrayBuffer<uint, LIGHT_CHUNK, true>;

View File

@ -123,8 +123,7 @@ void ShadingView::render()
/* TODO(fclem): Move it after the first prepass (and hiz update) once pipeline is stabilized. */
inst_.lights.set_view(render_view_new_, extent_);
// inst_.pipelines.deferred.render(
// render_view_, rt_buffer_opaque_, rt_buffer_refract_, depth_tx_, combined_tx_);
inst_.pipelines.deferred.render(render_view_new_, prepass_fb_, combined_fb_, extent_);
// inst_.lightprobes.draw_cache_display();
@ -136,6 +135,8 @@ void ShadingView::render()
inst_.hiz_buffer.debug_draw(render_view_new_, combined_fb_);
inst_.shadows.debug_draw(render_view_new_, combined_fb_);
inst_.irradiance_cache.debug_draw(render_view_new_, combined_fb_);
GPUTexture *combined_final_tx = render_postfx(rbufs.combined_tx);
inst_.film.accumulate(sub_view_, combined_final_tx);

View File

@ -0,0 +1,21 @@
void main()
{
DebugSurfel surfel = surfels_buf[surfel_index];
out_color = surfel.color;
/* Display surfels as circles. */
if (distance(P, surfel.position) > surfel_radius) {
discard;
return;
}
/* Display backfacing surfels with a transparent checkerboard grid. */
if (!gl_FrontFacing) {
ivec2 grid_uv = ivec2(gl_FragCoord) / 5;
if ((grid_uv.x + grid_uv.y) % 2 == 0) {
discard;
return;
}
}
}

View File

@ -0,0 +1,38 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_geom_lib.glsl)
void main()
{
surfel_index = gl_InstanceID;
DebugSurfel surfel = surfels_buf[surfel_index];
vec3 lP;
switch (gl_VertexID) {
case 0:
lP = vec3(-1, 1, 0);
break;
case 1:
lP = vec3(-1, -1, 0);
break;
case 2:
lP = vec3(1, 1, 0);
break;
case 3:
lP = vec3(1, -1, 0);
break;
}
vec3 N = surfel.normal;
vec3 T, B;
make_orthonormal_basis(N, T, B);
mat4 model_matrix = mat4(vec4(T * surfel_radius, 0),
vec4(B * surfel_radius, 0),
vec4(N * surfel_radius, 0),
vec4(surfel.position, 1));
P = (model_matrix * vec4(lP, 1)).xyz;
gl_Position = point_world_to_ndc(P);
}

View File

@ -0,0 +1,97 @@
/**
* Compute light objects lighting contribution using Gbuffer data.
*
* Output light either directly to the radiance buffers or to temporary radiance accumulation
* buffer that will be processed by other deferred lighting passes.
*/
#pragma BLENDER_REQUIRE(eevee_gbuffer_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_light_eval_lib.glsl)
void main()
{
ivec2 texel = ivec2(gl_FragCoord.xy);
float depth = texelFetch(hiz_tx, ivec2(gl_FragCoord.xy), 0).r;
vec3 P = get_world_space_from_depth(uvcoordsvar.xy, depth);
/* TODO(fclem): High precision derivative. */
vec3 Ng = safe_normalize(cross(dFdx(P), dFdy(P)));
vec3 V = cameraVec(P);
float vP_z = dot(cameraForward, P) - dot(cameraForward, cameraPos);
vec4 gbuffer_0_packed = texelFetch(gbuffer_closure_tx, ivec3(texel, 0), 0);
vec4 gbuffer_1_packed = texelFetch(gbuffer_closure_tx, ivec3(texel, 1), 0);
ClosureReflection reflection_data;
reflection_data.N = gbuffer_normal_unpack(gbuffer_0_packed.xy);
reflection_data.roughness = gbuffer_0_packed.z;
ClosureDiffuse diffuse_data;
diffuse_data.N = gbuffer_normal_unpack(gbuffer_1_packed.xy);
/* These are only set for SSS case. */
diffuse_data.sss_radius = vec3(0.0);
diffuse_data.sss_id = 0u;
float thickness = 0.0;
bool is_refraction = gbuffer_is_refraction(gbuffer_1_packed);
if (is_refraction) {
/* Still evaluate the diffuse light so that dithered SSS / Refraction combination still
* produces a complete diffuse light buffer that will be correctly convolved by the SSSS.
* The refraction pixels will just set the diffuse radiance to 0. */
}
else if (false /* TODO */) {
vec4 gbuffer_2_packed = texelFetch(gbuffer_closure_tx, ivec3(texel, 2), 0);
diffuse_data.sss_radius = gbuffer_sss_radii_unpack(gbuffer_2_packed.xyz);
diffuse_data.sss_id = gbuffer_object_id_unorm16_unpack(gbuffer_2_packed.w);
thickness = gbuffer_thickness_pack(gbuffer_1_packed.z);
}
vec3 diffuse_light = vec3(0.0);
vec3 reflection_light = vec3(0.0);
light_eval(
diffuse_data, reflection_data, P, Ng, V, vP_z, thickness, diffuse_light, reflection_light);
if (is_last_eval_pass) {
/* Apply color and output lighting to render-passes. */
vec4 color_0_packed = texelFetch(gbuffer_color_tx, ivec3(texel, 0), 0);
vec4 color_1_packed = texelFetch(gbuffer_color_tx, ivec3(texel, 1), 0);
reflection_data.color = gbuffer_color_unpack(color_0_packed);
diffuse_data.color = gbuffer_color_unpack(color_1_packed);
if (is_refraction) {
diffuse_data.color = vec3(0.0);
}
reflection_light *= reflection_data.color;
diffuse_light *= diffuse_data.color;
/* Add radiance to light pass. */
imageStore(
rp_light_img, ivec3(texel, RENDER_PASS_LAYER_DIFFUSE_LIGHT), vec4(diffuse_light, 1.0));
imageStore(
rp_light_img, ivec3(texel, RENDER_PASS_LAYER_SPECULAR_LIGHT), vec4(reflection_light, 1.0));
/* Add radiance to combined pass. */
out_radiance = vec4(diffuse_light + reflection_light, 0.0);
out_transmittance = vec4(1.0);
}
else {
/* Store lighting for next deferred pass. */
/* Output diffuse light along with object ID for sub-surface screen space processing. */
vec4 diffuse_radiance;
diffuse_radiance.xyz = diffuse_light;
diffuse_radiance.w = gbuffer_object_id_f16_pack(diffuse_data.sss_id);
imageStore(out_diffuse_light_img, texel, diffuse_radiance);
imageStore(out_specular_light_img, texel, vec4(reflection_light, 0.0));
/* Final radiance will be amended by the last pass.
* This should do nothing as color write should be disabled in this case. */
out_radiance = vec4(0.0);
out_transmittance = vec4(0.0);
}
}

View File

@ -0,0 +1,106 @@
/**
* G-buffer: Packing and upacking of G-buffer data.
*
* See #GBuffer for a breakdown of the G-buffer layout.
*/
#pragma BLENDER_REQUIRE(gpu_shader_math_vector_lib.glsl)
vec2 gbuffer_normal_pack(vec3 N)
{
N /= length_manhattan(N);
N.xy = (N.z >= 0.0) ? N.xy : ((1.0 - abs(N.yx)) * sign(N.xy));
N.xy = N.xy * 0.5 + 0.5;
return N.xy;
}
vec3 gbuffer_normal_unpack(vec2 N_packed)
{
N_packed = N_packed * 2.0 - 1.0;
vec3 N = vec3(N_packed.x, N_packed.y, 1.0 - abs(N_packed.x) - abs(N_packed.y));
float t = clamp(-N.z, 0.0, 1.0);
N.x += (N.x >= 0.0) ? -t : t;
N.y += (N.y >= 0.0) ? -t : t;
return normalize(N);
}
float gbuffer_ior_pack(float ior)
{
return (ior > 1.0) ? (1.0 - 0.5 / ior) : (0.5 * ior);
}
float gbuffer_ior_unpack(float ior_packed)
{
return (ior_packed > 0.5) ? (-1.0 / (ior_packed * 2.0 + 2.0)) : (2.0 * ior_packed);
}
float gbuffer_thickness_pack(float thickness)
{
/* TODO(fclem): Something better. */
return gbuffer_ior_pack(thickness);
}
float gbuffer_thickness_unpack(float thickness_packed)
{
/* TODO(fclem): Something better. */
return gbuffer_ior_unpack(thickness_packed);
}
vec3 gbuffer_sss_radii_pack(vec3 sss_radii)
{
/* TODO(fclem): Something better. */
return vec3(
gbuffer_ior_pack(sss_radii.x), gbuffer_ior_pack(sss_radii.y), gbuffer_ior_pack(sss_radii.z));
}
vec3 gbuffer_sss_radii_unpack(vec3 sss_radii_packed)
{
/* TODO(fclem): Something better. */
return vec3(gbuffer_ior_unpack(sss_radii_packed.x),
gbuffer_ior_unpack(sss_radii_packed.y),
gbuffer_ior_unpack(sss_radii_packed.z));
}
vec4 gbuffer_color_pack(vec3 color)
{
float max_comp = max(color.x, max(color.y, color.z));
/* Store 2bit exponent inside Alpha. Allows values up to 8 with some color degradation.
* Above 8, the result will be clampped when writing the data to the output buffer. */
float exponent = (max_comp > 1) ? ((max_comp > 2) ? ((max_comp > 4) ? 3.0 : 2.0) : 1.0) : 0.0;
/* TODO(fclem): Could try dithering to avoid banding artifacts on higher exponents. */
return vec4(color / exp2(exponent), exponent / 3.0);
}
vec3 gbuffer_color_unpack(vec4 color_packed)
{
float exponent = color_packed.a * 3.0;
return color_packed.rgb * exp2(exponent);
}
float gbuffer_object_id_unorm16_pack(uint object_id)
{
return float(object_id & 0xFFFFu) / float(0xFFFF);
}
uint gbuffer_object_id_unorm16_unpack(float object_id_packed)
{
return uint(object_id_packed * float(0xFFFF));
}
float gbuffer_object_id_f16_pack(uint object_id)
{
/* TODO(fclem): Make use of all the 16 bits in a half float.
* This here only correctly represent values up to 1024. */
return float(object_id);
}
uint gbuffer_object_id_f16_unpack(float object_id_packed)
{
return uint(object_id_packed);
}
bool gbuffer_is_refraction(vec4 gbuffer)
{
return gbuffer.w < 1.0;
}

View File

@ -29,14 +29,16 @@ bool closure_select(float weight, inout float total_weight, inout float r)
float x = weight / total_weight;
bool chosen = (r < x);
/* Assuming that if r is in the interval [0,x] or [x,1], it's still uniformly distributed within
* that interval, so you remapping to [0,1] again to explore this space of probability. */
* that interval, so remapping to [0,1] again to explore this space of probability. */
r = (chosen) ? (r / x) : ((r - x) / (1.0 - x));
return chosen;
}
#define SELECT_CLOSURE(destination, random, candidate) \
if (closure_select(candidate.weight, destination.weight, random)) { \
float tmp = destination.weight; \
destination = candidate; \
destination.weight = tmp; \
}
float g_closure_rand;

View File

@ -82,7 +82,7 @@ void main()
{
vec2 screen_uv = gl_FragCoord.xy / vec2(fb_resolution);
float opaque_depth = texelFetch(hiz_tx, int2(gl_FragCoord.xy), fb_lod).r;
float opaque_depth = texelFetch(hiz_tx, ivec2(gl_FragCoord.xy), fb_lod).r;
vec3 ws_opaque = get_world_space_from_depth(screen_uv, opaque_depth);
vec3 ws_near_plane = get_world_space_from_depth(screen_uv, 0);

View File

@ -0,0 +1,215 @@
/* -------------------------------------------------------------------- */
/** \name Spherical Harmonics Functions
*
* `L` denote the row and `M` the column in the spherical harmonics table (1).
* `p` denote positive column and `n` negative ones.
*
* Use precomputed constants to avoid constant folding differences across compilers.
* Note that (2) doesn't use Condon-Shortley phase whereas our implementation does.
*
* Reference:
* (1) https://en.wikipedia.org/wiki/Spherical_harmonics#/media/File:Sphericalfunctions.svg
* (2) https://en.wikipedia.org/wiki/Table_of_spherical_harmonics#Real_spherical_harmonics
* (3) https://seblagarde.wordpress.com/2012/01/08/pi-or-not-to-pi-in-game-lighting-equation/
*
* \{ */
/* L0 Band. */
float spherical_harmonics_L0_M0(vec3 v)
{
return 0.282094792;
}
/* L1 Band. */
float spherical_harmonics_L1_Mn1(vec3 v)
{
return -0.488602512 * v.y;
}
float spherical_harmonics_L1_M0(vec3 v)
{
return 0.488602512 * v.z;
}
float spherical_harmonics_L1_Mp1(vec3 v)
{
return -0.488602512 * v.x;
}
/* L2 Band. */
float spherical_harmonics_L2_Mn2(vec3 v)
{
return 1.092548431 * (v.x * v.y);
}
float spherical_harmonics_L2_Mn1(vec3 v)
{
return -1.092548431 * (v.y * v.z);
}
float spherical_harmonics_L2_M0(vec3 v)
{
return 0.315391565 * (3.0 * v.z * v.z - 1.0);
}
float spherical_harmonics_L2_Mp1(vec3 v)
{
return -1.092548431 * (v.x * v.z);
}
float spherical_harmonics_L2_Mp2(vec3 v)
{
return 0.546274215 * (v.x * v.x - v.y * v.y);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Structure
* \{ */
struct SphericalHarmonicBandL0 {
vec3 M0;
};
struct SphericalHarmonicBandL1 {
vec3 Mn1;
vec3 M0;
vec3 Mp1;
};
struct SphericalHarmonicBandL2 {
vec3 Mn2;
vec3 Mn1;
vec3 M0;
vec3 Mp1;
vec3 Mp2;
};
struct SphericalHarmonicL0 {
SphericalHarmonicBandL0 L0;
};
struct SphericalHarmonicL1 {
SphericalHarmonicBandL0 L0;
SphericalHarmonicBandL1 L1;
};
struct SphericalHarmonicL2 {
SphericalHarmonicBandL0 L0;
SphericalHarmonicBandL1 L1;
SphericalHarmonicBandL2 L2;
};
/** \} */
/* -------------------------------------------------------------------- */
/** \name Encode
*
* Decompose an input signal into spherical harmonic coefficients.
* \{ */
void spherical_harmonics_L0_encode_signal_sample(vec3 direction,
vec3 amplitude,
inout SphericalHarmonicBandL0 r_L0)
{
r_L0.M0 += spherical_harmonics_L0_M0(direction) * amplitude;
}
void spherical_harmonics_L1_encode_signal_sample(vec3 direction,
vec3 amplitude,
inout SphericalHarmonicBandL1 r_L1)
{
r_L1.Mn1 += spherical_harmonics_L1_Mn1(direction) * amplitude;
r_L1.M0 += spherical_harmonics_L1_M0(direction) * amplitude;
r_L1.Mp1 += spherical_harmonics_L1_Mp1(direction) * amplitude;
}
void spherical_harmonics_L2_encode_signal_sample(vec3 direction,
vec3 amplitude,
inout SphericalHarmonicBandL2 r_L2)
{
r_L2.Mn2 += spherical_harmonics_L2_Mn2(direction) * amplitude;
r_L2.Mn1 += spherical_harmonics_L2_Mn1(direction) * amplitude;
r_L2.M0 += spherical_harmonics_L2_M0(direction) * amplitude;
r_L2.Mp1 += spherical_harmonics_L2_Mp1(direction) * amplitude;
r_L2.Mp2 += spherical_harmonics_L2_Mp2(direction) * amplitude;
}
void spherical_harmonics_encode_signal_sample(vec3 direction,
vec3 amplitude,
inout SphericalHarmonicL0 sh)
{
spherical_harmonics_L0_encode_signal_sample(direction, amplitude, sh.L0);
}
void spherical_harmonics_encode_signal_sample(vec3 direction,
vec3 amplitude,
inout SphericalHarmonicL1 sh)
{
spherical_harmonics_L0_encode_signal_sample(direction, amplitude, sh.L0);
spherical_harmonics_L1_encode_signal_sample(direction, amplitude, sh.L1);
}
void spherical_harmonics_encode_signal_sample(vec3 direction,
vec3 amplitude,
inout SphericalHarmonicL2 sh)
{
spherical_harmonics_L0_encode_signal_sample(direction, amplitude, sh.L0);
spherical_harmonics_L1_encode_signal_sample(direction, amplitude, sh.L1);
spherical_harmonics_L2_encode_signal_sample(direction, amplitude, sh.L2);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Decode
*
* Evaluate an encoded signal in a given unit vector direction.
* \{ */
vec3 spherical_harmonics_L0_evaluate(vec3 direction, SphericalHarmonicBandL0 L0)
{
return spherical_harmonics_L0_M0(direction) * L0.M0;
}
vec3 spherical_harmonics_L1_evaluate(vec3 direction, SphericalHarmonicBandL1 L1)
{
return spherical_harmonics_L1_Mn1(direction) * L1.Mn1 +
spherical_harmonics_L1_M0(direction) * L1.M0 +
spherical_harmonics_L1_Mp1(direction) * L1.Mp1;
}
vec3 spherical_harmonics_L2_evaluate(vec3 direction, SphericalHarmonicBandL2 L2)
{
return spherical_harmonics_L2_Mn2(direction) * L2.Mn2 +
spherical_harmonics_L2_Mn1(direction) * L2.Mn1 +
spherical_harmonics_L2_M0(direction) * L2.M0 +
spherical_harmonics_L2_Mp1(direction) * L2.Mp1 +
spherical_harmonics_L2_Mp2(direction) * L2.Mp2;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Evaluation
* \{ */
/**
* Convolve a spherical harmonic encoded irradiance signal as a lambertian reflection.
* Returns the lambertian radiance (cosine lobe divided by PI) so the coefficients simplify to 1,
* 2/3 and 1/4. See this reference for more explanation:
* https://seblagarde.wordpress.com/2012/01/08/pi-or-not-to-pi-in-game-lighting-equation/
*/
vec3 spherical_harmonics_evaluate_lambert(vec3 N, SphericalHarmonicL0 sh)
{
return spherical_harmonics_L0_evaluate(N, sh.L0);
}
vec3 spherical_harmonics_evaluate_lambert(vec3 N, SphericalHarmonicL1 sh)
{
return spherical_harmonics_L0_evaluate(N, sh.L0) +
spherical_harmonics_L1_evaluate(N, sh.L1) * (2.0 / 3.0);
}
vec3 spherical_harmonics_evaluate_lambert(vec3 N, SphericalHarmonicL2 sh)
{
return spherical_harmonics_L0_evaluate(N, sh.L0) +
spherical_harmonics_L1_evaluate(N, sh.L1) * (2.0 / 3.0) +
spherical_harmonics_L2_evaluate(N, sh.L2) * (1.0 / 4.0);
}
/** \} */

View File

@ -3,17 +3,136 @@
* Deferred lighting evaluation: Lighting is evaluated in a separate pass.
*
* Outputs shading parameter per pixel using a randomized set of BSDFs.
**/
* Some render-pass are written during this pass.
*/
#pragma BLENDER_REQUIRE(eevee_gbuffer_lib.glsl)
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(common_math_lib.glsl)
#pragma BLENDER_REQUIRE(common_hair_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_surf_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_nodetree_lib.glsl)
#pragma BLENDER_REQUIRE(eevee_sampling_lib.glsl)
vec4 closure_to_rgba(Closure cl)
{
vec4 out_color;
out_color.rgb = g_emission;
out_color.a = saturate(1.0 - avg(g_transmittance));
/* Reset for the next closure tree. */
closure_weights_reset();
return out_color;
}
void main()
{
init_globals();
float noise = utility_tx_fetch(utility_tx, gl_FragCoord.xy, UTIL_BLUE_NOISE_LAYER).r;
g_closure_rand = fract(noise + sampling_rng_1D_get(SAMPLING_CLOSURE));
fragment_displacement();
nodetree_surface();
g_holdout = saturate(g_holdout);
out_transmittance = vec4(1.0 - g_holdout);
float transmittance_mono = saturate(avg(g_transmittance));
float thickness = nodetree_thickness();
g_diffuse_data.color *= g_diffuse_data.weight;
g_reflection_data.color *= g_reflection_data.weight;
g_refraction_data.color *= g_refraction_data.weight;
/* TODO(fclem): This feels way too complex for what is it. */
bool has_any_bsdf_weight = g_diffuse_data.weight != 0.0 || g_reflection_data.weight != 0.0 ||
g_refraction_data.weight != 0.0;
vec3 out_normal = has_any_bsdf_weight ? vec3(0.0) : g_data.N;
out_normal += g_diffuse_data.N * g_diffuse_data.weight;
out_normal += g_reflection_data.N * g_reflection_data.weight;
out_normal += g_refraction_data.N * g_refraction_data.weight;
out_normal = safe_normalize(out_normal);
vec3 specular_color = g_reflection_data.color + g_refraction_data.color;
/* ----- Render Passes output ----- */
ivec2 out_texel = ivec2(gl_FragCoord.xy);
#ifdef MAT_RENDER_PASS_SUPPORT /* Needed because node_tree isn't present in test shaders. */
/* Some render pass can be written during the gbuffer pass. Light passes are written later. */
vec4 cryptomatte_output = vec4(cryptomatte_object_buf[resource_id], node_tree.crypto_hash, 0.0);
imageStore(rp_cryptomatte_img, out_texel, cryptomatte_output);
imageStore(rp_normal_img, out_texel, vec4(out_normal, 1.0));
/* TODO(fclem): For now, just don't do anything. In the future all render passes should be in an
* array texture and have a UBO with indirection to the correct layer. */
// imageStore(rp_diffuse_color_img, out_texel, vec4(g_diffuse_data.color, 1.0));
imageStore(rp_specular_color_img, out_texel, vec4(specular_color, 1.0));
imageStore(rp_emission_img, out_texel, vec4(g_emission, 1.0));
#endif
/* ----- GBuffer output ----- */
if (true) {
/* Reflection. */
vec4 out_reflect = vec4(gbuffer_normal_pack(g_reflection_data.N),
g_reflection_data.roughness,
g_reflection_data.roughness);
imageStore(out_gbuff_closure_img, ivec3(out_texel, 0), out_reflect);
vec4 color = gbuffer_color_pack(g_reflection_data.color);
imageStore(out_gbuff_color_img, ivec3(out_texel, 0), color);
}
/* TODO(fclem) other RNG. */
float refract_rand = fract(g_closure_rand * 6.1803398875);
float combined_weight = g_refraction_data.weight + g_diffuse_data.weight;
bool output_refraction = combined_weight > 0.0 &&
(refract_rand * combined_weight) < g_refraction_data.weight;
if (output_refraction) {
/* Refraction. */
vec4 closure;
closure.xy = gbuffer_normal_pack(g_refraction_data.N);
closure.z = g_refraction_data.roughness;
closure.w = gbuffer_ior_pack(g_refraction_data.ior);
/* Clamp to just bellow 1 to be able to distinguish between refraction and diffuse.
* Ceiling value is chosen by the storage format (16bit UNORM). */
closure.w = min(closure.w, float(0xFFFFu - 1u) / float(0xFFFFu));
imageStore(out_gbuff_closure_img, ivec3(out_texel, 1), closure);
vec4 color = gbuffer_color_pack(g_refraction_data.color);
imageStore(out_gbuff_color_img, ivec3(out_texel, 1), color);
}
else {
/* Diffuse. */
vec4 closure;
closure.xy = gbuffer_normal_pack(g_diffuse_data.N);
closure.z = gbuffer_thickness_pack(thickness);
/* Used to detect the refraction case. Could be used for roughness. */
closure.w = 1.0;
imageStore(out_gbuff_closure_img, ivec3(out_texel, 1), closure);
vec4 color = gbuffer_color_pack(g_diffuse_data.color);
imageStore(out_gbuff_color_img, ivec3(out_texel, 1), color);
}
if (true) {
/* SubSurface Scattering. */
vec4 closure;
closure.xyz = gbuffer_sss_radii_pack(g_diffuse_data.sss_radius);
closure.w = gbuffer_object_id_unorm16_pack(g_diffuse_data.sss_id);
imageStore(out_gbuff_closure_img, ivec3(out_texel, 2), closure);
}
/* ----- Radiance output ----- */
/* Only output emission during the gbuffer pass. */
out_radiance = vec4(g_emission, 0.0);
out_radiance.rgb *= 1.0 - g_holdout;
out_transmittance.rgb = g_transmittance;
out_transmittance.a = saturate(avg(g_transmittance));
}

View File

@ -0,0 +1,47 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "eevee_defines.hh"
#include "gpu_shader_create_info.hh"
#define image_out(slot, qualifier, format, name) \
image(slot, format, qualifier, ImageType::FLOAT_2D, name, Frequency::PASS)
#define image_array_out(slot, qualifier, format, name) \
image(slot, format, qualifier, ImageType::FLOAT_2D_ARRAY, name, Frequency::PASS)
/**
* Specific deferred pass accumulate the computed lighting to either:
* - a split diffuse / specular temporary light buffer.
* or to
* - the combined pass & the light render-pass (if needed).
*
* This is in order to minimize the number of blending step.
*/
GPU_SHADER_CREATE_INFO(eevee_deferred_base)
/* Early fragment test is needed to avoid processing fragments without correct GBuffer data. */
.early_fragment_test(true)
/* Select which output to write to. */
.push_constant(Type::BOOL, "is_last_eval_pass")
/* Combined pass output. */
.fragment_out(0, Type::VEC4, "out_radiance", DualBlend::SRC_0)
.fragment_out(0, Type::VEC4, "out_transmittance", DualBlend::SRC_1)
/* Light pass output. */
.image_array_out(RBUFS_LIGHT_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_light_img")
/* Chaining to next pass. */
.image_out(2, Qualifier::READ_WRITE, GPU_RGBA16F, "out_diffuse_light_img")
.image_out(3, Qualifier::READ_WRITE, GPU_RGBA16F, "out_specular_light_img");
GPU_SHADER_CREATE_INFO(eevee_deferred_light)
.fragment_source("eevee_deferred_light_frag.glsl")
.sampler(0, ImageType::FLOAT_2D_ARRAY, "gbuffer_closure_tx")
.sampler(1, ImageType::FLOAT_2D_ARRAY, "gbuffer_color_tx")
.additional_info("eevee_shared",
"eevee_utility_texture",
"eevee_light_data",
"eevee_shadow_data",
"eevee_deferred_base",
"eevee_hiz_data",
"draw_view",
"draw_fullscreen")
.do_static_compilation(true);
#undef image_array_out

View File

@ -0,0 +1,16 @@
#include "eevee_defines.hh"
#include "gpu_shader_create_info.hh"
GPU_SHADER_INTERFACE_INFO(eeve_debug_surfel_iface, "")
.smooth(Type::VEC3, "P")
.flat(Type::INT, "surfel_index");
GPU_SHADER_CREATE_INFO(eevee_debug_surfels)
.additional_info("eevee_shared", "draw_view")
.vertex_source("eevee_debug_surfels_vert.glsl")
.vertex_out(eeve_debug_surfel_iface)
.fragment_source("eevee_debug_surfels_frag.glsl")
.fragment_out(0, Type::VEC4, "out_color")
.storage_buf(0, Qualifier::READ, "DebugSurfel", "surfels_buf[]")
.push_constant(Type::FLOAT, "surfel_radius")
.do_static_compilation(true);

View File

@ -88,11 +88,11 @@ GPU_SHADER_CREATE_INFO(eevee_aov_out)
GPU_SHADER_CREATE_INFO(eevee_render_pass_out)
.define("MAT_RENDER_PASS_SUPPORT")
.image_out(RBUFS_NORMAL_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_normal_img")
.image_array_out(RBUFS_LIGHT_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_light_img")
.image_out(RBUFS_DIFF_COLOR_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
.image_out(RBUFS_SPEC_COLOR_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_specular_color_img")
.image_out(RBUFS_EMISSION_SLOT, Qualifier::READ_WRITE, GPU_RGBA16F, "rp_emission_img");
.image_out(RBUFS_NORMAL_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_normal_img")
.image_array_out(RBUFS_LIGHT_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_light_img")
.image_out(RBUFS_DIFF_COLOR_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
.image_out(RBUFS_SPEC_COLOR_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_specular_color_img")
.image_out(RBUFS_EMISSION_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_emission_img");
GPU_SHADER_CREATE_INFO(eevee_cryptomatte_out)
.storage_buf(CRYPTOMATTE_BUF_SLOT, Qualifier::READ, "vec2", "cryptomatte_object_buf[]")
@ -101,23 +101,29 @@ GPU_SHADER_CREATE_INFO(eevee_cryptomatte_out)
GPU_SHADER_CREATE_INFO(eevee_surf_deferred)
.vertex_out(eevee_surf_iface)
/* NOTE: This removes the possibility of using gl_FragDepth. */
// .early_fragment_test(true)
/* Direct output. */
.early_fragment_test(true)
/* Direct output. (Emissive, Holdout) */
.fragment_out(0, Type::VEC4, "out_radiance", DualBlend::SRC_0)
.fragment_out(0, Type::VEC4, "out_transmittance", DualBlend::SRC_1)
/* Gbuffer. */
// .image_out(0, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_transmit_color")
// .image_out(1, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_transmit_data")
// .image_out(2, Qualifier::WRITE, GPU_RGBA16F, "gbuff_transmit_normal")
// .image_out(3, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_reflection_color")
// .image_out(4, Qualifier::WRITE, GPU_RGBA16F, "gbuff_reflection_normal")
// .image_out(5, Qualifier::WRITE, GPU_R11F_G11F_B10F, "gbuff_emission")
/* Render-passes. */
// .image_out(6, Qualifier::READ_WRITE, GPU_RGBA16F, "rpass_volume_light")
/* Everything is stored inside a two layered target, one for each format. This is to fit the
* limitation of the number of images we can bind on a single shader. */
.image_array_out(GBUF_CLOSURE_SLOT, Qualifier::WRITE, GPU_RGBA16, "out_gbuff_closure_img")
.image_array_out(GBUF_COLOR_SLOT, Qualifier::WRITE, GPU_RGB10_A2, "out_gbuff_color_img")
/* Render-passes need to be declared manually to avoid overlap with the G-buffer which reuse
* some of binding points. */
.image_out(RBUFS_NORMAL_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_normal_img")
// .image_array_out(RBUFS_LIGHT_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_light_img")
/* TODO(fclem): Merge all render-pass into the same texture array. */
// .image_out(RBUFS_DIFF_COLOR_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_diffuse_color_img")
.image_out(RBUFS_SPEC_COLOR_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_specular_color_img")
.image_out(RBUFS_EMISSION_SLOT, Qualifier::WRITE, GPU_RGBA16F, "rp_emission_img")
.fragment_source("eevee_surf_deferred_frag.glsl")
.additional_info("eevee_camera",
"eevee_utility_texture",
"eevee_sampling_data",
/* Added manually to avoid overlap. */
// "eevee_render_pass_out",
"eevee_cryptomatte_out",
"eevee_aov_out");
GPU_SHADER_CREATE_INFO(eevee_surf_forward)

View File

@ -221,7 +221,7 @@ static void external_cache_populate(void *vedata, Object *ob)
}
struct GPUBatch *geom = DRW_cache_object_surface_get(ob);
if (geom) {
/* Depth Prepass */
/* Depth Pre-pass. */
DRW_shgroup_call(stl->g_data->depth_shgrp, geom, ob);
}
}

View File

@ -309,7 +309,7 @@ typedef struct GPENCIL_PrivateData {
float fade_3d_object_opacity;
/* Mask opacity uniform. */
float mask_opacity;
/* Xray transparency in solid mode. */
/* X-ray transparency in solid mode. */
float xray_alpha;
/* Mask invert uniform. */
int mask_invert;

View File

@ -158,7 +158,7 @@ void OVERLAY_edit_uv_init(OVERLAY_Data *vedata)
pd->edit_uv.uv_opacity = sima->uv_opacity;
pd->edit_uv.do_tiled_image_overlay = show_overlays && is_image_type && is_tiled_image;
pd->edit_uv.do_tiled_image_border_overlay = is_image_type && is_tiled_image;
pd->edit_uv.dash_length = 4.0f * UI_DPI_FAC;
pd->edit_uv.dash_length = 4.0f * UI_SCALE_FAC;
pd->edit_uv.line_style = edit_uv_line_style_from_space_image(sima);
pd->edit_uv.do_smooth_wire = ((U.gpu_flag & USER_GPU_FLAG_OVERLAY_SMOOTH_WIRE) != 0);
pd->edit_uv.do_stencil_overlay = show_overlays && do_stencil_overlay;
@ -237,7 +237,7 @@ void OVERLAY_edit_uv_cache_init(OVERLAY_Data *vedata)
GPUShader *sh = OVERLAY_shader_edit_uv_verts_get();
pd->edit_uv_verts_grp = DRW_shgroup_create(sh, psl->edit_uv_verts_ps);
const float point_size = UI_GetThemeValuef(TH_VERTEX_SIZE) * U.dpi_fac;
const float point_size = UI_GetThemeValuef(TH_VERTEX_SIZE) * UI_SCALE_FAC;
DRW_shgroup_uniform_block(pd->edit_uv_verts_grp, "globalsBlock", G_draw.block_ubo);
DRW_shgroup_uniform_float_copy(
@ -261,7 +261,7 @@ void OVERLAY_edit_uv_cache_init(OVERLAY_Data *vedata)
/* uv face dots */
if (pd->edit_uv.do_face_dots) {
const float point_size = UI_GetThemeValuef(TH_FACEDOT_SIZE) * U.dpi_fac;
const float point_size = UI_GetThemeValuef(TH_FACEDOT_SIZE) * UI_SCALE_FAC;
GPUShader *sh = OVERLAY_shader_edit_uv_face_dots_get();
pd->edit_uv_face_dots_grp = DRW_shgroup_create(sh, psl->edit_uv_verts_ps);
DRW_shgroup_uniform_block(pd->edit_uv_face_dots_grp, "globalsBlock", G_draw.block_ubo);

View File

@ -225,7 +225,7 @@ static void mesh_render_data_polys_sorted_build(MeshRenderData *mr, MeshBufferCa
int i;
BM_ITER_MESH_INDEX (f, &iter, mr->bm, BM_FACES_OF_MESH, i) {
if (!BM_elem_flag_test(f, BM_ELEM_HIDDEN)) {
const int mat = min_ii(f->mat_nr, mat_last);
const int mat = clamp_i(f->mat_nr, 0, mat_last);
tri_first_index[i] = mat_tri_offs[mat];
mat_tri_offs[mat] += f->len - 2;
}
@ -238,7 +238,7 @@ static void mesh_render_data_polys_sorted_build(MeshRenderData *mr, MeshBufferCa
for (int i = 0; i < mr->poly_len; i++) {
if (!(mr->use_hide && mr->hide_poly && mr->hide_poly[i])) {
const MPoly &poly = mr->polys[i];
const int mat = min_ii(mr->material_indices ? mr->material_indices[i] : 0, mat_last);
const int mat = mr->material_indices ? clamp_i(mr->material_indices[i], 0, mat_last) : 0;
tri_first_index[i] = mat_tri_offs[mat];
mat_tri_offs[mat] += poly.totloop - 2;
}
@ -263,7 +263,7 @@ static void mesh_render_data_mat_tri_len_bm_range_fn(void *__restrict userdata,
BMesh *bm = mr->bm;
BMFace *efa = BM_face_at_index(bm, iter);
if (!BM_elem_flag_test(efa, BM_ELEM_HIDDEN)) {
int mat = min_ii(efa->mat_nr, mr->mat_len - 1);
int mat = clamp_i(efa->mat_nr, 0, mr->mat_len - 1);
mat_tri_len[mat] += efa->len - 2;
}
}
@ -277,7 +277,9 @@ static void mesh_render_data_mat_tri_len_mesh_range_fn(void *__restrict userdata
const MPoly &poly = mr->polys[iter];
if (!(mr->use_hide && mr->hide_poly && mr->hide_poly[iter])) {
int mat = min_ii(mr->material_indices ? mr->material_indices[iter] : 0, mr->mat_len - 1);
const int mat = mr->material_indices ?
clamp_i(mr->material_indices[iter], 0, mr->mat_len - 1) :
0;
mat_tri_len[mat] += poly.totloop - 2;
}
}

View File

@ -358,7 +358,7 @@ static DRW_MeshCDMask mesh_cd_calc_used_gpu_layers(const Object *object,
CustomData_get_named_layer(cd_ldata, CD_PROP_FLOAT2, name) :
CustomData_get_render_layer(cd_ldata, CD_PROP_FLOAT2);
}
if (layer != -1) {
if (layer != -1 && !CustomData_layer_is_anonymous(cd_ldata, CD_PROP_FLOAT2, layer)) {
cd_used.uv |= (1 << layer);
}
break;

View File

@ -838,8 +838,12 @@ static DRWSubdivCache *mesh_batch_cache_ensure_subdiv_cache(MeshBatchCache *mbc)
static void draw_subdiv_invalidate_evaluator_for_orco(Subdiv *subdiv, Mesh *mesh)
{
if (!(subdiv && subdiv->evaluator)) {
return;
}
const bool has_orco = CustomData_has_layer(&mesh->vdata, CD_ORCO);
if (has_orco && subdiv->evaluator && !subdiv->evaluator->hasVertexData(subdiv->evaluator)) {
if (has_orco && !subdiv->evaluator->hasVertexData(subdiv->evaluator)) {
/* If we suddenly have/need original coordinates, recreate the evaluator if the extra
* source was not created yet. The refiner also has to be recreated as refinement for source
* and vertex data is done only once. */
@ -2113,13 +2117,13 @@ static bool draw_subdiv_create_requested_buffers(Object *ob,
bm = mesh->edit_mesh->bm;
}
draw_subdiv_invalidate_evaluator_for_orco(runtime_data->subdiv_gpu, mesh_eval);
Subdiv *subdiv = BKE_subsurf_modifier_subdiv_descriptor_ensure(runtime_data, mesh_eval, true);
if (!subdiv) {
return false;
}
draw_subdiv_invalidate_evaluator_for_orco(subdiv, mesh_eval);
if (!BKE_subdiv_eval_begin_from_mesh(
subdiv, mesh_eval, nullptr, SUBDIV_EVALUATOR_TYPE_GPU, evaluator_cache)) {
/* This could happen in two situations:

View File

@ -1235,9 +1235,7 @@ static void sculpt_draw_cb(DRWSculptCallbackData *scd,
if (scd->use_mats) {
index = drw_pbvh_material_index_get(batches);
if (index >= scd->num_shading_groups) {
index = 0;
}
index = clamp_i(index, 0, scd->num_shading_groups - 1);
}
DRWShadingGroup *shgrp = scd->shading_groups[index];

View File

@ -127,7 +127,7 @@ static void drw_text_cache_draw_ex(DRWTextStore *dt, ARegion *region)
const uiStyle *style = UI_style_get();
BLF_size(font_id, style->widget.points * U.dpi_fac);
BLF_size(font_id, style->widget.points * UI_SCALE_FAC);
BLI_memiter_iter_init(dt->cache_strings, &it);
while ((vos = static_cast<ViewCachedString *>(BLI_memiter_iter_step(&it)))) {
@ -249,7 +249,7 @@ void DRW_text_edit_mesh_measure_stats(ARegion *region,
if ((v3d->overlay.edit_flag & V3D_OVERLAY_EDIT_INDICES) && (em->selectmode & SCE_SELECT_EDGE)) {
edge_tex_count += 1;
}
const short edge_tex_sep = short((edge_tex_count - 1) * 5.0f * U.dpi_fac);
const short edge_tex_sep = short((edge_tex_count - 1) * 5.0f * UI_SCALE_FAC);
/* Make the precision of the display value proportionate to the grid-size. */

View File

@ -206,7 +206,8 @@ void DRW_texture_pool_reset(DRWTexturePool *pool)
}
}
BLI_assert(pool->tmp_tex_acquired.is_empty());
BLI_assert_msg(pool->tmp_tex_acquired.is_empty(),
"Missing a TextureFromPool.release() before end of draw.");
for (GPUTexture *tmp_tex : pool->tmp_tex_pruned) {
GPU_texture_free(tmp_tex);
}

Some files were not shown because too many files have changed in this diff Show More