Geometry Nodes: initial Volume Grid socket support #115270

Merged
Jacques Lucke merged 193 commits from LukasTonne/blender:volume-grid-sockets into main 2023-12-20 22:33:26 +01:00
26 changed files with 201 additions and 106 deletions
Showing only changes of commit 7d84a84d2f - Show all commits

View File

@ -60,7 +60,7 @@ class CPUKernels {
int x,
int y,
float threshold,
bool reset,
int reset,
int offset,
int stride)>;

View File

@ -40,7 +40,7 @@ class OneapiDeviceQueue : public DeviceQueue {
virtual void copy_to_device(device_memory &mem) override;
virtual void copy_from_device(device_memory &mem) override;
virtual bool supports_local_atomic_sort() const
virtual bool supports_local_atomic_sort() const override
{
return true;
}

View File

@ -27,7 +27,6 @@ struct DeviceKernelArguments {
POINTER,
INT32,
FLOAT32,
BOOLEAN,
KERNEL_FILM_CONVERT,
};
@ -66,10 +65,6 @@ struct DeviceKernelArguments {
{
add(FLOAT32, value, sizeof(float));
}
void add(const bool *value)
{
add(BOOLEAN, value, 4);
}
void add(const Type type, const void *value, size_t size)
{
assert(count < MAX_ARGS);

View File

@ -103,7 +103,7 @@ class DenoiserGPU : public Denoiser {
int denoised_offset;
int num_components;
bool use_compositing;
int use_compositing;
bool use_denoising_albedo;
};

View File

@ -1055,6 +1055,7 @@ int PathTraceWorkGPU::adaptive_sampling_convergence_check_count_active(float thr
queue_->zero_to_device(num_active_pixels);
const int work_size = effective_buffer_params_.width * effective_buffer_params_.height;
const int reset_int = reset; /* No bool kernel arguments. */
DeviceKernelArguments args(&buffers_->buffer.device_pointer,
&effective_buffer_params_.full_x,
@ -1062,7 +1063,7 @@ int PathTraceWorkGPU::adaptive_sampling_convergence_check_count_active(float thr
&effective_buffer_params_.width,
&effective_buffer_params_.height,
&threshold,
&reset,
&reset_int,
&effective_buffer_params_.offset,
&effective_buffer_params_.stride,
&num_active_pixels.device_pointer);

View File

@ -101,7 +101,7 @@ bool KERNEL_FUNCTION_FULL_NAME(adaptive_sampling_convergence_check)(
int x,
int y,
float threshold,
bool reset,
int reset,
int offset,
int stride);

View File

@ -164,7 +164,7 @@ bool KERNEL_FUNCTION_FULL_NAME(adaptive_sampling_convergence_check)(
int x,
int y,
float threshold,
bool reset,
int reset,
int offset,
int stride)
{

View File

@ -668,7 +668,7 @@ ccl_gpu_kernel(GPU_KERNEL_BLOCK_NUM_THREADS, GPU_KERNEL_MAX_REGISTERS)
int sw,
int sh,
float threshold,
bool reset,
int reset,
int offset,
int stride,
ccl_global uint *num_active_pixels)
@ -1104,7 +1104,7 @@ ccl_gpu_kernel(GPU_KERNEL_BLOCK_NUM_THREADS, GPU_KERNEL_MAX_REGISTERS)
int pass_denoised,
int pass_sample_count,
int num_components,
bool use_compositing)
int use_compositing)
{
const int work_index = ccl_gpu_global_id_x();
const int y = work_index / width;

View File

@ -34,7 +34,7 @@ ccl_device bool film_adaptive_sampling_convergence_check(KernelGlobals kg,
int x,
int y,
float threshold,
bool reset,
int reset,
int offset,
int stride)
{

View File

@ -3372,14 +3372,23 @@ PrincipledHairBsdfNode::PrincipledHairBsdfNode() : BsdfBaseNode(get_node_type())
closure = CLOSURE_BSDF_HAIR_HUANG_ID;
}
void PrincipledHairBsdfNode::attributes(Shader *shader, AttributeRequestSet *attributes)
/* Treat hair as transparent if the hit is outside of the projected width. */
bool PrincipledHairBsdfNode::has_surface_transparent()
{
if (model == NODE_PRINCIPLED_HAIR_HUANG) {
/* Make sure we have the normal for elliptical cross section tracking. */
if (aspect_ratio != 1.0f || input("Aspect Ratio")->link) {
attributes->add(ATTR_STD_VERTEX_NORMAL);
return true;
}
}
return false;
}
void PrincipledHairBsdfNode::attributes(Shader *shader, AttributeRequestSet *attributes)
{
if (has_surface_transparent()) {
/* Make sure we have the normal for elliptical cross section tracking. */
attributes->add(ATTR_STD_VERTEX_NORMAL);
}
if (!input("Random")->link) {
/* Enable retrieving Hair Info -> Random if Random isn't linked. */

View File

@ -851,6 +851,8 @@ class PrincipledHairBsdfNode : public BsdfBaseNode {
{
return ShaderNode::get_feature() | KERNEL_FEATURE_NODE_PRINCIPLED_HAIR;
}
bool has_surface_transparent();
};
class HairBsdfNode : public BsdfNode {

View File

@ -32,7 +32,7 @@ FileHandlerType *BKE_file_handler_find(const char *name)
void BKE_file_handler_add(std::unique_ptr<FileHandlerType> file_handler)
{
BLI_assert(BKE_file_handler_find(file_handler->idname) != nullptr);
BLI_assert(BKE_file_handler_find(file_handler->idname) == nullptr);
/** Load all extensions from the string list into the list. */
const char char_separator = ';';

View File

@ -816,6 +816,7 @@ class Vector {
{
const T *prev_end = this->end();
end_ = std::remove_if(this->begin(), this->end(), predicate);
destruct_n(end_, prev_end - end_);
UPDATE_VECTOR_SIZE(this);
return int64_t(prev_end - end_);
}

View File

@ -431,6 +431,17 @@ TEST(vector, RemoveIf)
EXPECT_EQ_ARRAY(vec.data(), expected_vec.data(), size_t(vec.size()));
}
TEST(vector, RemoveIfNonTrivialDestructible)
{
Vector<Vector<int, 0, GuardedAllocator>> vec;
for ([[maybe_unused]] const int64_t i : IndexRange(10)) {
/* This test relies on leak detection to run after tests. */
vec.append(Vector<int, 0, GuardedAllocator>(100));
}
vec.remove_if([&](const auto & /*value*/) { return true; });
EXPECT_TRUE(vec.is_empty());
}
TEST(vector, ExtendSmallVector)
{
Vector<int> a = {2, 3, 4};

View File

@ -23,6 +23,14 @@ void main()
vec4 sampled_color = textureGrad(input_tx, projected_coordinates, x_gradient, y_gradient);
/* The plane mask is 1 if it is inside the plane and 0 otherwise. However, we use the alpha value
* of the sampled color for pixels outside of the plane to utilize the anti-aliasing effect of
* the anisotropic filtering. Therefore, the input_tx sampler should use anisotropic filtering
* and be clamped to zero border color. */
bool is_inside_plane = all(greaterThanEqual(projected_coordinates, vec2(0.0))) &&
all(lessThanEqual(projected_coordinates, vec2(1.0)));
float mask_value = is_inside_plane ? 1.0 : sampled_color.a;
imageStore(output_img, texel, sampled_color);
imageStore(mask_img, texel, sampled_color.aaaa);
imageStore(mask_img, texel, vec4(mask_value));
}

View File

@ -661,6 +661,16 @@ bool ui_popup_context_menu_for_button(bContext *C, uiBut *but, const wmEvent *ev
PointerRNA op_ptr;
wmOperatorType *ot;
ot = WM_operatortype_find("ANIM_OT_view_curve_in_graph_editor", false);
uiItemFullO_ptr(layout,
ot,
CTX_IFACE_(BLT_I18NCONTEXT_OPERATOR_DEFAULT, "View All in Graph Editor"),
ICON_NONE,
nullptr,
WM_OP_INVOKE_DEFAULT,
UI_ITEM_NONE,
&op_ptr);
RNA_boolean_set(&op_ptr, "all", true);
uiItemFullO_ptr(
layout,
ot,
@ -671,16 +681,6 @@ bool ui_popup_context_menu_for_button(bContext *C, uiBut *but, const wmEvent *ev
UI_ITEM_NONE,
&op_ptr);
RNA_boolean_set(&op_ptr, "all", false);
uiItemFullO_ptr(layout,
ot,
CTX_IFACE_(BLT_I18NCONTEXT_OPERATOR_DEFAULT, "View All in Graph Editor"),
ICON_NONE,
nullptr,
WM_OP_INVOKE_DEFAULT,
UI_ITEM_NONE,
&op_ptr);
RNA_boolean_set(&op_ptr, "all", true);
}
else {
PointerRNA op_ptr;

View File

@ -260,7 +260,7 @@ static void partialvis_update_mesh(Object &object,
if (action == VisAction::Show && mask.is_empty()) {
mesh_show_all(object, nodes);
}
else {
else if (!mask.is_empty()) {
vert_hide_update(object, nodes, [&](const Span<int> verts, MutableSpan<bool> hide) {
for (const int i : verts.index_range()) {
if (mask[verts[i]] > 0.5f) {

View File

@ -1028,7 +1028,7 @@ static int sculpt_face_set_change_visibility_exec(bContext *C, wmOperator *op)
else {
face_hide_update(object, nodes, [&](const Span<int> faces, MutableSpan<bool> hide) {
for (const int i : hide.index_range()) {
hide[i] = face_sets[faces[i]] == active_face_set;
hide[i] = face_sets[faces[i]] != active_face_set;
}
});
}

View File

@ -886,8 +886,6 @@ if(WITH_GTESTS)
if(WITH_OPENGL_DRAW_TESTS)
list(APPEND TEST_SRC
tests/gpu_testing.cc
tests/buffer_texture_test.cc
tests/compute_test.cc
tests/framebuffer_test.cc
@ -899,8 +897,7 @@ if(WITH_GTESTS)
tests/storage_buffer_test.cc
tests/texture_test.cc
tests/vertex_buffer_test.cc
tests/gpu_testing.hh
tests/shader_create_info_test.cc
)
endif()
@ -911,7 +908,19 @@ if(WITH_GTESTS)
)
endif()
# Enable shader validation on buildbot for Metal
if(WITH_METAL_BACKEND AND NOT WITH_OPENGL_DRAW_TESTS)
list(APPEND TEST_SRC
tests/shader_create_info_test.cc
)
endif()
if (TEST_SRC)
list(APPEND TEST_SRC
tests/gpu_testing.cc
tests/gpu_testing.hh
)
include(GTestTesting)
blender_add_test_lib(bf_gpu_tests "${TEST_SRC}" "${INC};${TEST_INC}" "${INC_SYS}" "${LIB};${TEST_LIB}")
endif()

View File

@ -6,6 +6,8 @@
* \ingroup gpu
*/
#include <iomanip>
#include "BKE_global.h"
#include "BLI_string.h"
@ -1022,116 +1024,116 @@ bool GLShader::do_geometry_shader_injection(const shader::ShaderCreateInfo *info
/** \name Shader stage creation
* \{ */
static char *glsl_patch_default_get()
static const char *glsl_patch_default_get()
{
/** Used for shader patching. Init once. */
static char patch[2048] = "\0";
if (patch[0] != '\0') {
return patch;
static std::string patch;
if (!patch.empty()) {
return patch.c_str();
}
size_t slen = 0;
std::stringstream ss;
/* Version need to go first. */
if (epoxy_gl_version() >= 43) {
STR_CONCAT(patch, slen, "#version 430\n");
ss << "#version 430\n";
}
else {
STR_CONCAT(patch, slen, "#version 330\n");
ss << "#version 330\n";
}
/* Enable extensions for features that are not part of our base GLSL version
* don't use an extension for something already available! */
if (GLContext::texture_gather_support) {
STR_CONCAT(patch, slen, "#extension GL_ARB_texture_gather: enable\n");
ss << "#extension GL_ARB_texture_gather: enable\n";
/* Some drivers don't agree on epoxy_has_gl_extension("GL_ARB_texture_gather") and the actual
* support in the shader so double check the preprocessor define (see #56544). */
STR_CONCAT(patch, slen, "#ifdef GL_ARB_texture_gather\n");
STR_CONCAT(patch, slen, "# define GPU_ARB_texture_gather\n");
STR_CONCAT(patch, slen, "#endif\n");
ss << "#ifdef GL_ARB_texture_gather\n";
ss << "# define GPU_ARB_texture_gather\n";
ss << "#endif\n";
}
if (GLContext::shader_draw_parameters_support) {
STR_CONCAT(patch, slen, "#extension GL_ARB_shader_draw_parameters : enable\n");
STR_CONCAT(patch, slen, "#define GPU_ARB_shader_draw_parameters\n");
STR_CONCAT(patch, slen, "#define gpu_BaseInstance gl_BaseInstanceARB\n");
ss << "#extension GL_ARB_shader_draw_parameters : enable\n";
ss << "#define GPU_ARB_shader_draw_parameters\n";
ss << "#define gpu_BaseInstance gl_BaseInstanceARB\n";
}
if (GLContext::geometry_shader_invocations) {
STR_CONCAT(patch, slen, "#extension GL_ARB_gpu_shader5 : enable\n");
STR_CONCAT(patch, slen, "#define GPU_ARB_gpu_shader5\n");
ss << "#extension GL_ARB_gpu_shader5 : enable\n";
ss << "#define GPU_ARB_gpu_shader5\n";
}
if (GLContext::texture_cube_map_array_support) {
STR_CONCAT(patch, slen, "#extension GL_ARB_texture_cube_map_array : enable\n");
STR_CONCAT(patch, slen, "#define GPU_ARB_texture_cube_map_array\n");
ss << "#extension GL_ARB_texture_cube_map_array : enable\n";
ss << "#define GPU_ARB_texture_cube_map_array\n";
}
if (epoxy_has_gl_extension("GL_ARB_conservative_depth")) {
STR_CONCAT(patch, slen, "#extension GL_ARB_conservative_depth : enable\n");
ss << "#extension GL_ARB_conservative_depth : enable\n";
}
if (GPU_shader_image_load_store_support()) {
STR_CONCAT(patch, slen, "#extension GL_ARB_shader_image_load_store: enable\n");
STR_CONCAT(patch, slen, "#extension GL_ARB_shading_language_420pack: enable\n");
ss << "#extension GL_ARB_shader_image_load_store: enable\n";
ss << "#extension GL_ARB_shading_language_420pack: enable\n";
}
if (GLContext::layered_rendering_support) {
STR_CONCAT(patch, slen, "#extension GL_ARB_shader_viewport_layer_array: enable\n");
STR_CONCAT(patch, slen, "#define gpu_Layer gl_Layer\n");
STR_CONCAT(patch, slen, "#define gpu_ViewportIndex gl_ViewportIndex\n");
ss << "#extension GL_ARB_shader_viewport_layer_array: enable\n";
ss << "#define gpu_Layer gl_Layer\n";
ss << "#define gpu_ViewportIndex gl_ViewportIndex\n";
}
if (GLContext::native_barycentric_support) {
STR_CONCAT(patch, slen, "#extension GL_AMD_shader_explicit_vertex_parameter: enable\n");
ss << "#extension GL_AMD_shader_explicit_vertex_parameter: enable\n";
}
if (GLContext::framebuffer_fetch_support) {
STR_CONCAT(patch, slen, "#extension GL_EXT_shader_framebuffer_fetch: enable\n");
ss << "#extension GL_EXT_shader_framebuffer_fetch: enable\n";
}
/* Fallbacks. */
if (!GLContext::shader_draw_parameters_support) {
STR_CONCAT(patch, slen, "uniform int gpu_BaseInstance;\n");
ss << "uniform int gpu_BaseInstance;\n";
}
/* Vulkan GLSL compatibility. */
STR_CONCAT(patch, slen, "#define gpu_InstanceIndex (gl_InstanceID + gpu_BaseInstance)\n");
STR_CONCAT(patch, slen, "#define gpu_EmitVertex EmitVertex\n");
ss << "#define gpu_InstanceIndex (gl_InstanceID + gpu_BaseInstance)\n";
ss << "#define gpu_EmitVertex EmitVertex\n";
/* Array compatibility. */
STR_CONCAT(patch, slen, "#define gpu_Array(_type) _type[]\n");
ss << "#define gpu_Array(_type) _type[]\n";
/* Derivative sign can change depending on implementation. */
STR_CONCATF(patch, slen, "#define DFDX_SIGN %1.1f\n", GLContext::derivative_signs[0]);
STR_CONCATF(patch, slen, "#define DFDY_SIGN %1.1f\n", GLContext::derivative_signs[1]);
ss << "#define DFDX_SIGN " << std::setprecision(2) << GLContext::derivative_signs[0] << "\n";
ss << "#define DFDY_SIGN " << std::setprecision(2) << GLContext::derivative_signs[1] << "\n";
/* GLSL Backend Lib. */
STR_CONCAT(patch, slen, datatoc_glsl_shader_defines_glsl);
ss << datatoc_glsl_shader_defines_glsl;
BLI_assert(slen < sizeof(patch));
return patch;
patch = ss.str();
return patch.c_str();
}
static char *glsl_patch_compute_get()
static const char *glsl_patch_compute_get()
{
/** Used for shader patching. Init once. */
static char patch[2048] = "\0";
if (patch[0] != '\0') {
return patch;
static std::string patch;
if (!patch.empty()) {
return patch.c_str();
}
size_t slen = 0;
std::stringstream ss;
/* Version need to go first. */
STR_CONCAT(patch, slen, "#version 430\n");
STR_CONCAT(patch, slen, "#extension GL_ARB_compute_shader :enable\n");
ss << "#version 430\n";
ss << "#extension GL_ARB_compute_shader :enable\n";
if (GLContext::texture_cube_map_array_support) {
STR_CONCAT(patch, slen, "#extension GL_ARB_texture_cube_map_array : enable\n");
STR_CONCAT(patch, slen, "#define GPU_ARB_texture_cube_map_array\n");
ss << "#extension GL_ARB_texture_cube_map_array : enable\n";
ss << "#define GPU_ARB_texture_cube_map_array\n";
}
/* Array compatibility. */
STR_CONCAT(patch, slen, "#define gpu_Array(_type) _type[]\n");
ss << "#define gpu_Array(_type) _type[]\n";
STR_CONCAT(patch, slen, datatoc_glsl_shader_defines_glsl);
ss << datatoc_glsl_shader_defines_glsl;
BLI_assert(slen < sizeof(patch));
return patch;
patch = ss.str();
return patch.c_str();
}
char *GLShader::glsl_patch_get(GLenum gl_stage)
const char *GLShader::glsl_patch_get(GLenum gl_stage)
{
if (gl_stage == GL_COMPUTE_SHADER) {
return glsl_patch_compute_get();

View File

@ -88,7 +88,7 @@ class GLShader : public Shader {
}
private:
char *glsl_patch_get(GLenum gl_stage);
const char *glsl_patch_get(GLenum gl_stage);
/** Create, compile and attach the shader stage to the shader program. */
GLuint create_shader_stage(GLenum gl_stage, MutableSpan<const char *> sources);

View File

@ -15,14 +15,12 @@
#include "GHOST_C-api.h"
#include "BKE_global.h"
namespace blender::gpu {
void GPUTest::SetUp()
{
prev_g_debug_ = G.debug;
G.debug |= G_DEBUG_GPU | G_DEBUG_GPU_RENDERDOC;
G.debug |= g_debug_flags_;
CLG_init();
GPU_backend_type_selection_set(gpu_backend_type);

View File

@ -4,19 +4,18 @@
#include "testing/testing.h"
#include "BKE_global.h"
#include "GHOST_C-api.h"
#include "GPU_platform.h"
struct GPUContext;
namespace blender::gpu {
/* Test class that setups a GPUContext for test cases.
*
* Usage:
* TEST_F(GPUTest, my_gpu_test) {
* ...
* }
/**
* Test class that setups a GPUContext for test cases.
*/
class GPUTest : public ::testing::Test {
private:
@ -26,11 +25,16 @@ class GPUTest : public ::testing::Test {
GHOST_ContextHandle ghost_context;
GPUContext *context;
int32_t g_debug_flags_;
int32_t prev_g_debug_;
protected:
GPUTest(GHOST_TDrawingContextType draw_context_type, eGPUBackendType gpu_backend_type)
: draw_context_type(draw_context_type), gpu_backend_type(gpu_backend_type)
GPUTest(GHOST_TDrawingContextType draw_context_type,
eGPUBackendType gpu_backend_type,
int32_t g_debug_flags)
: draw_context_type(draw_context_type),
gpu_backend_type(gpu_backend_type),
g_debug_flags_(g_debug_flags)
{
}
@ -41,7 +45,12 @@ class GPUTest : public ::testing::Test {
#ifdef WITH_OPENGL_BACKEND
class GPUOpenGLTest : public GPUTest {
public:
GPUOpenGLTest() : GPUTest(GHOST_kDrawingContextTypeOpenGL, GPU_BACKEND_OPENGL) {}
GPUOpenGLTest()
: GPUTest(GHOST_kDrawingContextTypeOpenGL,
GPU_BACKEND_OPENGL,
G_DEBUG_GPU | G_DEBUG_GPU_RENDERDOC)
{
}
};
# define GPU_OPENGL_TEST(test_name) \
TEST_F(GPUOpenGLTest, test_name) \
@ -55,10 +64,24 @@ class GPUOpenGLTest : public GPUTest {
#ifdef WITH_METAL_BACKEND
class GPUMetalTest : public GPUTest {
public:
GPUMetalTest() : GPUTest(GHOST_kDrawingContextTypeMetal, GPU_BACKEND_METAL) {}
GPUMetalTest() : GPUTest(GHOST_kDrawingContextTypeMetal, GPU_BACKEND_METAL, G_DEBUG_GPU) {}
};
class GPUMetalWorkaroundsTest : public GPUTest {
public:
GPUMetalWorkaroundsTest()
: GPUTest(GHOST_kDrawingContextTypeMetal,
GPU_BACKEND_METAL,
G_DEBUG_GPU | G_DEBUG_GPU_FORCE_WORKAROUNDS)
{
}
};
# define GPU_METAL_TEST(test_name) \
TEST_F(GPUMetalTest, test_name) \
{ \
test_##test_name(); \
} \
TEST_F(GPUMetalWorkaroundsTest, test_name) \
{ \
test_##test_name(); \
}
@ -69,10 +92,29 @@ class GPUMetalTest : public GPUTest {
#ifdef WITH_VULKAN_BACKEND
class GPUVulkanTest : public GPUTest {
public:
GPUVulkanTest() : GPUTest(GHOST_kDrawingContextTypeVulkan, GPU_BACKEND_VULKAN) {}
GPUVulkanTest()
: GPUTest(GHOST_kDrawingContextTypeVulkan,
GPU_BACKEND_VULKAN,
G_DEBUG_GPU | G_DEBUG_GPU_RENDERDOC)
{
}
};
class GPUVulkanWorkaroundsTest : public GPUTest {
public:
GPUVulkanWorkaroundsTest()
: GPUTest(GHOST_kDrawingContextTypeVulkan,
GPU_BACKEND_VULKAN,
G_DEBUG_GPU | G_DEBUG_GPU_RENDERDOC | G_DEBUG_GPU_FORCE_WORKAROUNDS)
{
}
};
# define GPU_VULKAN_TEST(test_name) \
TEST_F(GPUVulkanTest, test_name) \
{ \
test_##test_name(); \
} \
TEST_F(GPUVulkanWorkaroundsTest, test_name) \
{ \
test_##test_name(); \
}

View File

@ -0,0 +1,21 @@
/* SPDX-FileCopyrightText: 2023 Blender Authors
*
* SPDX-License-Identifier: Apache-2.0 */
#include "testing/testing.h"
#include "gpu_shader_create_info_private.hh"
#include "gpu_testing.hh"
namespace blender::gpu::tests {
/**
* Test if all static shaders can be compiled.
*/
static void test_static_shaders()
{
EXPECT_TRUE(gpu_shader_create_info_compile(nullptr));
}
GPU_TEST(static_shaders)
} // namespace blender::gpu::tests

View File

@ -29,12 +29,6 @@ namespace blender::gpu::tests {
using namespace blender::gpu::shader;
static void test_shader_compile_statically_defined()
{
EXPECT_TRUE(gpu_shader_create_info_compile(nullptr));
}
GPU_TEST(shader_compile_statically_defined)
static void test_shader_compute_2d()
{

View File

@ -54,7 +54,9 @@ enum {
* the array is recalculated from scratch; there is no extra attempt to maintain the validity over
* time.
*
* #MLoopTri is stored in an array, where triangles tessellated from faces stored contiguously.
* #MLoopTri is stored in an array, where triangles from each face are stored sequentially.
* The triangles order is guaranteed to match the face order where the first triangle will always
* be from the first face, and the last triangle from the last face.
* The number of triangles for each polygon is guaranteed to be the corner count - 2, even for
* degenerate geometry (see #bke::mesh::face_triangles_num).
*