WIP: Onion Skinning Prototype #107641

Closed
Christoph Lendenfeld wants to merge 22 commits from ChrisLend/blender:onion_skin_test into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
110 changed files with 3434 additions and 888 deletions
Showing only changes of commit bbe5da28f6 - Show all commits

View File

@ -503,7 +503,7 @@ if(NOT APPLE)
mark_as_advanced(WITH_CYCLES_DEVICE_CUDA)
option(WITH_CYCLES_CUDA_BINARIES "Build Cycles NVIDIA CUDA binaries" OFF)
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 sm_86 compute_75 CACHE STRING "CUDA architectures to build binaries for")
set(CYCLES_CUDA_BINARIES_ARCH sm_30 sm_35 sm_37 sm_50 sm_52 sm_60 sm_61 sm_70 sm_75 sm_86 sm_89 compute_75 CACHE STRING "CUDA architectures to build binaries for")
option(WITH_CYCLES_CUDA_BUILD_SERIAL "Build cubins one after another (useful on machines with limited RAM)" OFF)
option(WITH_CUDA_DYNLOAD "Dynamically load CUDA libraries at runtime (for developers, makes cuda-gdb work)" ON)

View File

@ -14,6 +14,7 @@ set(EMBREE_EXTRA_ARGS
-DEMBREE_BACKFACE_CULLING=OFF
-DEMBREE_BACKFACE_CULLING_CURVES=ON
-DEMBREE_BACKFACE_CULLING_SPHERES=ON
-DEMBREE_NO_SPLASH=ON
-DEMBREE_TASKING_SYSTEM=TBB
-DEMBREE_TBB_ROOT=${LIBDIR}/tbb
-DTBB_ROOT=${LIBDIR}/tbb

View File

@ -477,9 +477,9 @@ set(SQLITE_HASH_TYPE SHA1)
set(SQLITE_FILE sqlite-autoconf-${SQLLITE_LONG_VERSION}.tar.gz)
set(SQLITE_CPE "cpe:2.3:a:sqlite:sqlite:${SQLITE_VERSION}:*:*:*:*:*:*:*")
set(EMBREE_VERSION 4.0.1)
set(EMBREE_VERSION 4.1.0)
set(EMBREE_URI https://github.com/embree/embree/archive/v${EMBREE_VERSION}.zip)
set(EMBREE_HASH dd26617719a587e126b341d1b32f7fd0)
set(EMBREE_HASH 4b525955b08e1249a700dea5b5ffc8b2)
set(EMBREE_HASH_TYPE MD5)
set(EMBREE_FILE embree-v${EMBREE_VERSION}.zip)

View File

@ -24,144 +24,3 @@ index 7c2f43d..106b1d5 100644
DISABLE_STACK_PROTECTOR_FOR_INTERSECTORS(${EMBREE_LIBRARY_FILES_AVX2})
ADD_LIBRARY(embree_avx2 STATIC ${EMBREE_LIBRARY_FILES_AVX2})
TARGET_LINK_LIBRARIES(embree_avx2 PRIVATE tasking)
diff --git a/include/embree4/rtcore_device.h b/include/embree4/rtcore_device.h
index 45bf95583..62ee7787d 100644
--- a/include/embree4/rtcore_device.h
+++ b/include/embree4/rtcore_device.h
@@ -55,6 +55,7 @@ enum RTCDeviceProperty
RTC_DEVICE_PROPERTY_FILTER_FUNCTION_SUPPORTED = 66,
RTC_DEVICE_PROPERTY_IGNORE_INVALID_RAYS_ENABLED = 67,
RTC_DEVICE_PROPERTY_COMPACT_POLYS_ENABLED = 68,
+ RTC_DEVICE_PROPERTY_BACKFACE_CULLING_SPHERES_ENABLED = 69,
RTC_DEVICE_PROPERTY_TRIANGLE_GEOMETRY_SUPPORTED = 96,
RTC_DEVICE_PROPERTY_QUAD_GEOMETRY_SUPPORTED = 97,
diff --git a/kernels/common/device.cpp b/kernels/common/device.cpp
index 3ffac7e37..215ccc961 100644
--- a/kernels/common/device.cpp
+++ b/kernels/common/device.cpp
@@ -170,6 +170,9 @@ namespace embree
#if defined (EMBREE_BACKFACE_CULLING_CURVES)
v += "backfacecullingcurves ";
#endif
+#if defined (EMBREE_BACKFACE_CULLING_SPHERES)
+ v += "backfacecullingspheres ";
+#endif
#if defined(EMBREE_FILTER_FUNCTION)
v += "intersection_filter ";
#endif
@@ -477,6 +480,12 @@ namespace embree
case RTC_DEVICE_PROPERTY_BACKFACE_CULLING_CURVES_ENABLED: return 0;
#endif
+#if defined(EMBREE_BACKFACE_CULLING_SPHERES)
+ case RTC_DEVICE_PROPERTY_BACKFACE_CULLING_SPHERES_ENABLED: return 1;
+#else
+ case RTC_DEVICE_PROPERTY_BACKFACE_CULLING_SPHERES_ENABLED: return 0;
+#endif
+
#if defined(EMBREE_COMPACT_POLYS)
case RTC_DEVICE_PROPERTY_COMPACT_POLYS_ENABLED: return 1;
#else
diff --git a/kernels/config.h.in b/kernels/config.h.in
index f02c90360..ba9acde56 100644
--- a/kernels/config.h.in
+++ b/kernels/config.h.in
@@ -5,6 +5,7 @@
#cmakedefine EMBREE_STAT_COUNTERS
#cmakedefine EMBREE_BACKFACE_CULLING
#cmakedefine EMBREE_BACKFACE_CULLING_CURVES
+#cmakedefine EMBREE_BACKFACE_CULLING_SPHERES
#cmakedefine EMBREE_FILTER_FUNCTION
#cmakedefine EMBREE_IGNORE_INVALID_RAYS
#cmakedefine EMBREE_GEOMETRY_TRIANGLE
diff --git a/kernels/geometry/sphere_intersector.h b/kernels/geometry/sphere_intersector.h
index 074f910a2..30f490818 100644
--- a/kernels/geometry/sphere_intersector.h
+++ b/kernels/geometry/sphere_intersector.h
@@ -106,8 +106,13 @@ namespace embree
const vbool<M> valid_front = valid & (ray.tnear() <= t_front) & (t_front <= ray.tfar);
const vbool<M> valid_back = valid & (ray.tnear() <= t_back ) & (t_back <= ray.tfar);
+#if defined (EMBREE_BACKFACE_CULLING_SPHERES)
+ /* check if there is a first hit */
+ const vbool<M> valid_first = valid_front;
+#else
/* check if there is a first hit */
const vbool<M> valid_first = valid_front | valid_back;
+#endif
if (unlikely(none(valid_first)))
return false;
@@ -120,7 +125,8 @@ namespace embree
/* invoke intersection filter for first hit */
const bool is_hit_first = epilog(valid_first, hit);
-
+
+#if !defined (EMBREE_BACKFACE_CULLING_SPHERES)
/* check for possible second hits before potentially accepted hit */
const vfloat<M> t_second = t_back;
const vbool<M> valid_second = valid_front & valid_back & (t_second <= ray.tfar);
@@ -131,7 +137,9 @@ namespace embree
const Vec3vf<M> Ng_second = td_back * ray_dir - perp;
hit = SphereIntersectorHitM<M> (t_second, Ng_second);
const bool is_hit_second = epilog(valid_second, hit);
-
+#else
+ constexpr bool is_hit_second = false;
+#endif
return is_hit_first | is_hit_second;
}
@@ -186,8 +194,13 @@ namespace embree
const vbool<M> valid_front = valid & (ray.tnear()[k] <= t_front) & (t_front <= ray.tfar[k]);
const vbool<M> valid_back = valid & (ray.tnear()[k] <= t_back ) & (t_back <= ray.tfar[k]);
+#if defined (EMBREE_BACKFACE_CULLING_SPHERES)
+ /* check if there is a first hit */
+ const vbool<M> valid_first = valid_front;
+#else
/* check if there is a first hit */
const vbool<M> valid_first = valid_front | valid_back;
+#endif
if (unlikely(none(valid_first)))
return false;
@@ -200,7 +213,8 @@ namespace embree
/* invoke intersection filter for first hit */
const bool is_hit_first = epilog(valid_first, hit);
-
+
+#if !defined (EMBREE_BACKFACE_CULLING_SPHERES)
/* check for possible second hits before potentially accepted hit */
const vfloat<M> t_second = t_back;
const vbool<M> valid_second = valid_front & valid_back & (t_second <= ray.tfar[k]);
@@ -211,7 +225,9 @@ namespace embree
const Vec3vf<M> Ng_second = td_back * ray_dir - perp;
hit = SphereIntersectorHitM<M> (t_second, Ng_second);
const bool is_hit_second = epilog(valid_second, hit);
-
+#else
+ constexpr bool is_hit_second = false;
+#endif
return is_hit_first | is_hit_second;
}
};
diff -ruN a/kernels/sycl/rthwif_embree_builder.cpp b/kernels/sycl/rthwif_embree_builder.cpp
--- a/kernels/sycl/rthwif_embree_builder.cpp 2023-03-28 17:23:06.429190200 +0200
+++ b/kernels/sycl/rthwif_embree_builder.cpp 2023-03-28 17:35:01.291938600 +0200
@@ -540,7 +540,12 @@
assert(offset <= geomDescrData.size());
}
+ /* Force running BVH building sequentially from the calling thread if using TBB < 2021, as it otherwise leads to runtime issues. */
+#if TBB_VERSION_MAJOR<2021
+ RTHWIF_PARALLEL_OPERATION parallelOperation = nullptr;
+#else
RTHWIF_PARALLEL_OPERATION parallelOperation = rthwifNewParallelOperation();
+#endif
/* estimate static accel size */
BBox1f time_range(0,1);

View File

@ -31,7 +31,8 @@ IF(NOT PYTHON_ROOT_DIR AND NOT $ENV{PYTHON_ROOT_DIR} STREQUAL "")
SET(PYTHON_ROOT_DIR $ENV{PYTHON_ROOT_DIR})
ENDIF()
SET(PYTHON_VERSION 3.10 CACHE STRING "Python Version (major and minor only)")
SET(_PYTHON_VERSION_SUPPORTED 3.10)
SET(PYTHON_VERSION ${_PYTHON_VERSION_SUPPORTED} CACHE STRING "Python Version (major and minor only)")
MARK_AS_ADVANCED(PYTHON_VERSION)
@ -178,8 +179,24 @@ UNSET(_IS_LIB_PATH_DEF)
# handle the QUIETLY and REQUIRED arguments and SET PYTHONLIBSUNIX_FOUND to TRUE IF
# all listed variables are TRUE
INCLUDE(FindPackageHandleStandardArgs)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(PythonLibsUnix DEFAULT_MSG
PYTHON_LIBRARY PYTHON_LIBPATH PYTHON_INCLUDE_DIR PYTHON_INCLUDE_CONFIG_DIR)
FIND_PACKAGE_HANDLE_STANDARD_ARGS(PythonLibsUnix
# NOTE(@ideasman42): Instead of `DEFAULT_MSG` use a custom message because users
# may have newer versions Python and not be using pre-compiled libraries
# (on other UNIX systems or using an esoteric architecture).
# Some Python developers might want to use the newer features of Python too.
# While we could automatically detect and use newer versions but this would result in
# developers using a configuration which isn't officially supported without realizing it.
# So warn that the officially supported Python version is not found and let the developer
# explicitly set the newer version if they wish.
# From a maintenance perspective it's typically not a problem to support newer versions,
# doing so can help ease the process of upgrading too, nevertheless these versions don't
# have the same level of testing & support.
"\
'PYTHON_VERSION=${_PYTHON_VERSION_SUPPORTED}' not found! \
This is the only officially supported version. \
If you wish to use a newer Python version you may set 'PYTHON_VERSION' \
however we do not guarantee full compatibility in this case."
PYTHON_LIBRARY PYTHON_LIBPATH PYTHON_INCLUDE_DIR PYTHON_INCLUDE_CONFIG_DIR)
IF(PYTHONLIBSUNIX_FOUND)
# Assign cache items
@ -215,6 +232,7 @@ IF(PYTHONLIBSUNIX_FOUND)
ENDIF()
UNSET(_PYTHON_ABI_FLAGS)
UNSET(_PYTHON_VERSION_SUPPORTED)
UNSET(_python_SEARCH_DIRS)
MARK_AS_ADVANCED(

View File

@ -363,7 +363,11 @@ windows_find_package(Freetype REQUIRED)
if(WITH_FFTW3)
set(FFTW3 ${LIBDIR}/fftw3)
set(FFTW3_LIBRARIES ${FFTW3}/lib/libfftw.lib)
if(EXISTS ${FFTW3}/lib/libfftw3-3.lib) # 3.6 libraries
set(FFTW3_LIBRARIES ${FFTW3}/lib/libfftw3-3.lib ${FFTW3}/lib/libfftw3f.lib)
else()
set(FFTW3_LIBRARIES ${FFTW3}/lib/libfftw.lib) # 3.5 Libraries
endif()
set(FFTW3_INCLUDE_DIRS ${FFTW3}/include)
set(FFTW3_LIBPATH ${FFTW3}/lib)
endif()

View File

@ -8,6 +8,8 @@ buildbot:
version: '10.1.243'
cuda11:
version: '11.4.1'
cuda12:
version: '12.1.1'
hip:
version: '5.5.30571'
hiprt:

View File

@ -122,8 +122,12 @@ bool OneapiDevice::check_peer_access(Device * /*peer_device*/)
bool OneapiDevice::can_use_hardware_raytracing_for_features(uint requested_features) const
{
/* MNEE and Ray-trace kernels currently don't work correctly with HWRT. */
/* MNEE and Raytrace kernels work correctly with Hardware Raytracing starting with Embree 4.1. */
# if defined(RTC_VERSION) && RTC_VERSION < 40100
return !(requested_features & (KERNEL_FEATURE_MNEE | KERNEL_FEATURE_NODE_RAYTRACE));
# else
return true;
# endif
}
BVHLayoutMask OneapiDevice::get_bvh_layout_mask(uint requested_features) const

View File

@ -540,12 +540,12 @@ if(WITH_CYCLES_CUDA_BINARIES)
elseif(${arch} MATCHES ".*_7." AND "${CUDA_VERSION}" LESS 100)
message(STATUS "CUDA binaries for ${arch} require CUDA 10.0+, skipped.")
elseif(${arch} MATCHES ".*_8.")
if(DEFINED CUDA11_NVCC_EXECUTABLE)
set(cuda_nvcc_executable ${CUDA11_NVCC_EXECUTABLE})
set(cuda_toolkit_root_dir ${CUDA11_TOOLKIT_ROOT_DIR})
elseif("${CUDA_VERSION}" GREATER_EQUAL 111) # Support for sm_86 was introduced in CUDA 11
if("${CUDA_VERSION}" GREATER_EQUAL 111) # Support for sm_86 was introduced in CUDA 11
set(cuda_nvcc_executable ${CUDA_NVCC_EXECUTABLE})
set(cuda_toolkit_root_dir ${CUDA_TOOLKIT_ROOT_DIR})
elseif(DEFINED CUDA11_NVCC_EXECUTABLE)
set(cuda_nvcc_executable ${CUDA11_NVCC_EXECUTABLE})
set(cuda_toolkit_root_dir ${CUDA11_TOOLKIT_ROOT_DIR})
else()
message(STATUS "CUDA binaries for ${arch} require CUDA 11.1+, skipped.")
endif()

View File

@ -174,27 +174,30 @@ bool oneapi_kernel_is_required_for_features(const std::string &kernel_name,
return true;
}
bool oneapi_kernel_is_raytrace_or_mnee(const std::string &kernel_name)
bool oneapi_kernel_is_compatible_with_hardware_raytracing(const std::string &kernel_name)
{
return (kernel_name.find(device_kernel_as_string(DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_MNEE)) !=
std::string::npos) ||
/* MNEE and Raytrace kernels work correctly with Hardware Raytracing starting with Embree 4.1.
*/
# if defined(RTC_VERSION) && RTC_VERSION < 40100
return (kernel_name.find(device_kernel_as_string(DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_MNEE)) ==
std::string::npos) &&
(kernel_name.find(device_kernel_as_string(
DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE)) != std::string::npos);
DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE)) == std::string::npos);
# else
return true;
# endif
}
bool oneapi_kernel_is_using_embree(const std::string &kernel_name)
bool oneapi_kernel_has_intersections(const std::string &kernel_name)
{
# ifdef WITH_EMBREE_GPU
/* MNEE and Ray-trace kernels aren't yet enabled to use Embree. */
for (int i = 0; i < (int)DEVICE_KERNEL_NUM; i++) {
DeviceKernel kernel = (DeviceKernel)i;
if (device_kernel_has_intersection(kernel)) {
if (kernel_name.find(device_kernel_as_string(kernel)) != std::string::npos) {
return !oneapi_kernel_is_raytrace_or_mnee(kernel_name);
return true;
}
}
}
# endif
return false;
}
@ -217,7 +220,8 @@ bool oneapi_load_kernels(SyclQueue *queue_,
const std::string &kernel_name = kernel_id.get_name();
if (!oneapi_kernel_is_required_for_features(kernel_name, kernel_features) ||
!oneapi_kernel_is_using_embree(kernel_name))
!(oneapi_kernel_has_intersections(kernel_name) &&
oneapi_kernel_is_compatible_with_hardware_raytracing(kernel_name)))
{
continue;
}
@ -260,14 +264,14 @@ bool oneapi_load_kernels(SyclQueue *queue_,
/* In case HWRT is on, compilation of kernels using Embree is already handled in previous
* block. */
if (!oneapi_kernel_is_required_for_features(kernel_name, kernel_features) ||
(use_hardware_raytracing && oneapi_kernel_is_using_embree(kernel_name)))
(use_hardware_raytracing && oneapi_kernel_has_intersections(kernel_name) &&
oneapi_kernel_is_compatible_with_hardware_raytracing(kernel_name)))
{
continue;
}
# ifdef WITH_EMBREE_GPU
if (oneapi_kernel_is_using_embree(kernel_name) ||
oneapi_kernel_is_raytrace_or_mnee(kernel_name)) {
if (oneapi_kernel_has_intersections(kernel_name)) {
sycl::kernel_bundle<sycl::bundle_state::input> one_kernel_bundle_input =
sycl::get_kernel_bundle<sycl::bundle_state::input>(queue->get_context(), {kernel_id});
one_kernel_bundle_input

View File

@ -487,7 +487,8 @@ bool LightTree::should_split(LightTreeEmitter *emitters,
float total_cost = 0.0f;
float min_cost = FLT_MAX;
for (int dim = 0; dim < 3; dim++) {
/* If the centroid bounding box is 0 along a given dimension, skip it. */
/* If the centroid bounding box is 0 along a given dimension and the node measure is already
* computed, skip it. */
if (centroid_bbox.size()[dim] == 0.0f && dim != 0) {
continue;
}
@ -524,6 +525,11 @@ bool LightTree::should_split(LightTreeEmitter *emitters,
break;
}
/* If the centroid bounding box is 0 along a given dimension, skip it. */
if (centroid_bbox.size()[dim] == 0.0f) {
continue;
}
total_cost = measure.calculate();
if (total_cost == 0.0f) {
break;

View File

@ -100,6 +100,10 @@ struct LightTreeMeasure {
/* Taken from Eq. 2 in the paper. */
__forceinline float calculate()
{
if (is_zero()) {
return 0.0f;
}
float area = bbox.area();
float area_measure = area == 0 ? len(bbox.size()) : area;
return energy * area_measure * bcone.calculate_measure();

View File

@ -406,7 +406,6 @@ GHOST_ContextVK::~GHOST_ContextVK()
GHOST_TSuccess GHOST_ContextVK::destroySwapchain()
{
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
m_in_flight_images.resize(0);
VkDevice device = vulkan_device->device;
for (auto semaphore : m_image_available_semaphores) {
@ -442,36 +441,9 @@ GHOST_TSuccess GHOST_ContextVK::swapBuffers()
return GHOST_kFailure;
}
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
VkDevice device = vulkan_device->device;
vkWaitForFences(device, 1, &m_in_flight_fences[m_currentFrame], VK_TRUE, UINT64_MAX);
VkResult result = vkAcquireNextImageKHR(device,
m_swapchain,
UINT64_MAX,
m_image_available_semaphores[m_currentFrame],
VK_NULL_HANDLE,
&m_currentImage);
if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) {
/* Swap-chain is out of date. Recreate swap-chain and skip this frame. */
destroySwapchain();
createSwapchain();
if (m_lastFrame != m_currentFrame) {
return GHOST_kSuccess;
}
else if (result != VK_SUCCESS) {
fprintf(stderr,
"Error: Failed to acquire swap chain image : %s\n",
vulkan_error_as_string(result));
return GHOST_kFailure;
}
/* Check if a previous frame is using this image (i.e. there is its fence to wait on) */
if (m_in_flight_images[m_currentImage] != VK_NULL_HANDLE) {
vkWaitForFences(device, 1, &m_in_flight_images[m_currentImage], VK_TRUE, UINT64_MAX);
}
m_in_flight_images[m_currentImage] = m_in_flight_fences[m_currentFrame];
VkPipelineStageFlags wait_stages[] = {VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT};
@ -501,16 +473,15 @@ GHOST_TSuccess GHOST_ContextVK::swapBuffers()
VkSubmitInfo submit_info = {};
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &m_image_available_semaphores[m_currentFrame];
submit_info.pWaitDstStageMask = wait_stages;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &m_command_buffers[m_currentImage];
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &m_render_finished_semaphores[m_currentFrame];
vkResetFences(device, 1, &m_in_flight_fences[m_currentFrame]);
VkDevice device = vulkan_device->device;
VkResult result;
VK_CHECK(vkQueueSubmit(m_graphic_queue, 1, &submit_info, m_in_flight_fences[m_currentFrame]));
do {
result = vkWaitForFences(device, 1, &m_in_flight_fences[m_currentFrame], VK_TRUE, 10000);
@ -543,6 +514,7 @@ GHOST_TSuccess GHOST_ContextVK::swapBuffers()
}
m_currentFrame = (m_currentFrame + 1) % MAX_FRAMES_IN_FLIGHT;
vkResetFences(device, 1, &m_in_flight_fences[m_currentFrame]);
return GHOST_kSuccess;
}
@ -553,6 +525,20 @@ GHOST_TSuccess GHOST_ContextVK::getVulkanBackbuffer(
if (m_swapchain == VK_NULL_HANDLE) {
return GHOST_kFailure;
}
if (m_currentFrame != m_lastFrame) {
assert(vulkan_device.has_value() && vulkan_device->device != VK_NULL_HANDLE);
VkDevice device = vulkan_device->device;
vkAcquireNextImageKHR(device,
m_swapchain,
UINT64_MAX,
m_image_available_semaphores[m_currentFrame],
VK_NULL_HANDLE,
&m_currentImage);
m_lastFrame = m_currentFrame;
}
*((VkImage *)image) = m_swapchain_images[m_currentImage];
*((VkFramebuffer *)framebuffer) = m_swapchain_framebuffers[m_currentImage];
*((VkRenderPass *)render_pass) = m_render_pass;
@ -741,6 +727,7 @@ static GHOST_TSuccess selectPresentMode(VkPhysicalDevice device,
VkSurfaceKHR surface,
VkPresentModeKHR *r_presentMode)
{
// TODO cleanup: we are not going to use MAILBOX as it isn't supported by renderdoc.
uint32_t present_count;
vkGetPhysicalDeviceSurfacePresentModesKHR(device, surface, &present_count, NULL);
vector<VkPresentModeKHR> presents(present_count);
@ -847,7 +834,7 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
}
/* Driver can stall if only using minimal image count. */
uint32_t image_count = capabilities.minImageCount;
uint32_t image_count = capabilities.minImageCount + 1;
/* Note: maxImageCount == 0 means no limit. */
if (image_count > capabilities.maxImageCount && capabilities.maxImageCount > 0) {
image_count = capabilities.maxImageCount;
@ -881,7 +868,6 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
m_swapchain_images.resize(image_count);
vkGetSwapchainImagesKHR(device, m_swapchain, &image_count, m_swapchain_images.data());
m_in_flight_images.resize(image_count, VK_NULL_HANDLE);
m_swapchain_image_views.resize(image_count);
m_swapchain_framebuffers.resize(image_count);
for (int i = 0; i < image_count; i++) {
@ -921,18 +907,18 @@ GHOST_TSuccess GHOST_ContextVK::createSwapchain()
m_image_available_semaphores.resize(MAX_FRAMES_IN_FLIGHT);
m_render_finished_semaphores.resize(MAX_FRAMES_IN_FLIGHT);
m_in_flight_fences.resize(MAX_FRAMES_IN_FLIGHT);
for (int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) {
VkSemaphoreCreateInfo semaphore_info = {};
semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkSemaphoreCreateInfo semaphore_info = {};
semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkFenceCreateInfo fence_info = {};
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
for (int i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) {
VK_CHECK(vkCreateSemaphore(device, &semaphore_info, NULL, &m_image_available_semaphores[i]));
VK_CHECK(vkCreateSemaphore(device, &semaphore_info, NULL, &m_render_finished_semaphores[i]));
VkFenceCreateInfo fence_info = {};
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.flags = VK_FENCE_CREATE_SIGNALED_BIT;
VK_CHECK(vkCreateFence(device, &fence_info, NULL, &m_in_flight_fences[i]));
}

View File

@ -181,9 +181,17 @@ class GHOST_ContextVK : public GHOST_Context {
std::vector<VkSemaphore> m_image_available_semaphores;
std::vector<VkSemaphore> m_render_finished_semaphores;
std::vector<VkFence> m_in_flight_fences;
std::vector<VkFence> m_in_flight_images;
/** frame modulo swapchain_len. Used as index for sync objects. */
int m_currentFrame = 0;
/**
* Last frame where the vulkan handles where retrieved from. This attribute is used to determine
* if a new image from the swap chain needs to be acquired.
*
* In a regular vulkan application this is done in the same method, but due to GHOST API this
* isn't possible. Swap chains are triggered by the window manager and the GPUBackend isn't
* informed about these changes.
*/
int m_lastFrame = -1;
/** Image index in the swapchain. Used as index for render objects. */
uint32_t m_currentImage = 0;
/** Used to unique framebuffer ids to return when swapchain is recreated. */

View File

@ -7,7 +7,7 @@
inkscape:output_extension="org.inkscape.output.svg.inkscape"
sodipodi:docname="blender_icons.svg"
version="1.0"
inkscape:version="1.2 (dc2aeda, 2022-05-15)"
inkscape:version="1.2.2 (732a01da63, 2022-12-09)"
sodipodi:version="0.32"
id="svg2"
height="640"
@ -27,7 +27,6 @@
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
@ -42,16 +41,16 @@
guidetolerance="10"
inkscape:pageopacity="0"
inkscape:pageshadow="2"
inkscape:window-width="1728"
inkscape:window-height="1051"
inkscape:window-width="3840"
inkscape:window-height="2036"
id="namedview34335"
showgrid="false"
inkscape:zoom="1.2495612"
inkscape:cx="196.46897"
inkscape:cy="320.51252"
inkscape:window-x="767"
inkscape:window-y="120"
inkscape:window-maximized="0"
inkscape:zoom="2"
inkscape:cx="180.75"
inkscape:cy="338.5"
inkscape:window-x="-12"
inkscape:window-y="-12"
inkscape:window-maximized="1"
inkscape:current-layer="layer8"
inkscape:showpageshadow="2"
inkscape:deskcolor="#808080" />
@ -7148,6 +7147,10 @@
sodipodi:nodetypes="ccccccccccccc" />
</g>
</g>
<g
inkscape:groupmode="layer"
id="layer1"
inkscape:label="J-23" />
<g
inkscape:groupmode="layer"
id="layer8"
@ -15195,6 +15198,103 @@
id="circle8021-5"
inkscape:connector-curvature="0" />
</g>
<g
id="g13706"
inkscape:label="J-23"
style="display:inline;enable-background:new">
<g
id="g17336"
style="display:inline;opacity:0.7;stroke-width:1.44731;stroke-dasharray:none;enable-background:new"
transform="matrix(0.6914962,0,0,0.6915053,144.53231,65.869882)">
<g
id="g17408"
transform="translate(-1.1860055,2.3126067)"
style="stroke-width:1.44731;stroke-dasharray:none">
<path
style="fill:none;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 471.48986,207.6 v -6.605"
id="path13630"
sodipodi:nodetypes="cc" />
<path
style="fill:none;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
d="m 469.75403,202.73197 1.73583,-1.73697 1.73628,1.73697"
id="path13701"
sodipodi:nodetypes="ccc" />
</g>
<g
id="g17404"
transform="rotate(90,473.15911,208.96873)"
style="stroke-width:1.44731;stroke-dasharray:none">
<path
style="display:inline;fill:none;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="M 475.88023,210.00984 V 203.9052"
id="path13630-5"
sodipodi:nodetypes="cc" />
<path
style="display:inline;fill:none;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="m 474.14424,205.64156 1.73599,-1.73636 1.73612,1.73636"
id="path13701-0"
sodipodi:nodetypes="ccc" />
</g>
<circle
style="display:inline;fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
id="path12466"
cx="470.30371"
cy="211.68971"
r="1.8006321" />
</g>
<g
id="g17336-1"
style="display:inline;stroke-width:1.44731;stroke-dasharray:none;enable-background:new"
transform="matrix(0.6914962,0,0,0.6915053,150.788,59.912101)">
<g
id="g17408-1"
transform="translate(-1.1860055,2.3126067)"
style="stroke-width:1.44731;stroke-dasharray:none">
<path
style="fill:none;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1"
d="m 471.48986,207.6 v -6.605"
id="path13630-2"
sodipodi:nodetypes="cc" />
<path
style="fill:none;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1"
d="m 469.75403,202.73197 1.73583,-1.73697 1.73628,1.73697"
id="path13701-4"
sodipodi:nodetypes="ccc" />
</g>
<g
id="g17404-6"
transform="rotate(90,473.15911,208.96873)"
style="stroke-width:1.44731;stroke-dasharray:none">
<path
style="display:inline;fill:none;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:butt;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="M 475.88023,210.00984 V 203.9052"
id="path13630-5-8"
sodipodi:nodetypes="cc" />
<path
style="display:inline;fill:none;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="m 474.14424,205.64156 1.73599,-1.73636 1.73612,1.73636"
id="path13701-0-3"
sodipodi:nodetypes="ccc" />
</g>
<circle
style="display:inline;fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:1.44731;stroke-linecap:round;stroke-linejoin:round;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
id="path12466-6"
cx="470.30371"
cy="211.68971"
r="1.8006321" />
</g>
<path
style="display:inline;opacity:0.7;fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:1.00082;stroke-linecap:round;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="m 473.31787,208.67619 0.4302,-0.44676"
id="path24883-4"
sodipodi:nodetypes="cc" />
<path
style="display:inline;opacity:0.7;fill:none;fill-opacity:1;stroke:#ffffff;stroke-width:1.00082;stroke-linecap:round;stroke-linejoin:miter;stroke-dasharray:none;stroke-opacity:1;enable-background:new"
d="m 471.76625,210.24534 0.4302,-0.44676"
id="path24883-4-0"
sodipodi:nodetypes="cc" />
</g>
<g
style="display:inline;fill:#ffffff;enable-background:new"
id="g14253"

Before

Width:  |  Height:  |  Size: 2.6 MiB

After

Width:  |  Height:  |  Size: 2.6 MiB

View File

@ -1018,9 +1018,44 @@ class WM_OT_url_open(Operator):
description="URL to open",
)
@staticmethod
def _add_utm_param_to_url(url, utm_source):
import urllib
# Make sure we have a scheme otherwise we can't parse the url.
if not url.startswith(("http://", "https://")):
url = "https://" + url
# Parse the URL to get its domain and query parameters.
parsed_url = urllib.parse.urlparse(url)
domain = parsed_url.netloc
# Only add a utm source if it points to a blender.org domain.
if not (domain.endswith(".blender.org") or domain == "blender.org"):
return url
# Parse the query parameters and add or update the utm_source parameter.
query_params = urllib.parse.parse_qs(parsed_url.query)
query_params["utm_source"] = utm_source
new_query = urllib.parse.urlencode(query_params, doseq=True)
# Create a new URL with the updated query parameters.
new_url_parts = list(parsed_url)
new_url_parts[4] = new_query
new_url = urllib.parse.urlunparse(new_url_parts)
return new_url
@staticmethod
def _get_utm_source():
version = bpy.app.version_string
formatted_version = version.replace(' ', '-').lower()
return f"blender-{formatted_version}"
def execute(self, _context):
import webbrowser
webbrowser.open(self.url)
complete_url = self._add_utm_param_to_url(self.url, self._get_utm_source())
webbrowser.open(complete_url)
return {'FINISHED'}
@ -1102,10 +1137,7 @@ class WM_OT_url_open_preset(Operator):
url = url(self, context)
break
import webbrowser
webbrowser.open(url)
return {'FINISHED'}
return bpy.ops.wm.url_open(url=url)
class WM_OT_path_open(Operator):
@ -1307,9 +1339,7 @@ class WM_OT_doc_view_manual(Operator):
)
return {'CANCELLED'}
else:
import webbrowser
webbrowser.open(url)
return {'FINISHED'}
return bpy.ops.wm.url_open(url=url)
class WM_OT_doc_view(Operator):
@ -1325,10 +1355,7 @@ class WM_OT_doc_view(Operator):
if url is None:
return {'CANCELLED'}
import webbrowser
webbrowser.open(url)
return {'FINISHED'}
return bpy.ops.wm.url_open(url=url)
rna_path = StringProperty(

View File

@ -614,6 +614,7 @@ class NODE_MT_category_GEO_VOLUME(Menu):
node_add_menu.add_node_type(layout, "GeometryNodeMeanFilterSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeOffsetSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeSDFVolumeSphere")
node_add_menu.add_node_type(layout, "GeometryNodeInputSignedDistance")
node_add_menu.draw_assets_for_catalog(layout, self.bl_label)

View File

@ -435,7 +435,9 @@ class IMAGE_MT_uvs(Menu):
layout.separator()
layout.operator_context = 'INVOKE_DEFAULT'
layout.operator("uv.pack_islands")
layout.operator_context = 'EXEC_REGION_WIN'
layout.operator("uv.average_islands_scale")
layout.separator()

View File

@ -5,6 +5,8 @@
#include "BLI_array.hh"
#include "BLI_color.hh"
#include "BLI_cpp_type.hh"
#include "BLI_generic_span.hh"
#include "BLI_generic_virtual_array.hh"
#include "BLI_math_color.hh"
#include "BLI_math_vector.h"
#include "BLI_math_vector.hh"
@ -587,4 +589,16 @@ template<typename T> using DefaultMixer = typename DefaultMixerStruct<T>::type;
/** \} */
/* -------------------------------------------------------------------- */
/** \name Generic Array Utils Implementations
*
* Extra implementations of functions from #BLI_array_utils.hh for all attribute types,
* used to avoid templating the same logic for each type in many places.
* \{ */
void gather(GSpan src, Span<int> map, GMutableSpan dst);
void gather(const GVArray &src, Span<int> map, GMutableSpan dst);
/** \} */
} // namespace blender::bke::attribute_math

View File

@ -1587,6 +1587,7 @@ void BKE_nodetree_remove_layer_n(struct bNodeTree *ntree, struct Scene *scene, i
/* Function nodes use the range starting at 1200. */
#define GEO_NODE_SIMULATION_INPUT 2100
#define GEO_NODE_SIMULATION_OUTPUT 2101
#define GEO_NODE_INPUT_SIGNED_DISTANCE 2102
/** \} */

View File

@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_array_utils.hh"
#include "BKE_attribute_math.hh"
namespace blender::bke::attribute_math {
@ -128,4 +130,20 @@ void ColorGeometry4bMixer::finalize(const IndexMask mask)
});
}
void gather(const GSpan src, const Span<int> map, GMutableSpan dst)
{
attribute_math::convert_to_static_type(src.type(), [&](auto dummy) {
using T = decltype(dummy);
array_utils::gather(src.typed<T>(), map, dst.typed<T>());
});
}
void gather(const GVArray &src, const Span<int> map, GMutableSpan dst)
{
attribute_math::convert_to_static_type(src.type(), [&](auto dummy) {
using T = decltype(dummy);
array_utils::gather(src.typed<T>(), map, dst.typed<T>());
});
}
} // namespace blender::bke::attribute_math

View File

@ -1124,14 +1124,6 @@ static void copy_construct_data(const GSpan src, GMutableSpan dst)
src.type().copy_construct_n(src.data(), dst.data(), src.size());
}
static void copy_with_map(const GSpan src, const Span<int> map, GMutableSpan dst)
{
attribute_math::convert_to_static_type(src.type(), [&](auto dummy) {
using T = decltype(dummy);
array_utils::gather(src.typed<T>(), map, dst.typed<T>());
});
}
static CurvesGeometry copy_with_removed_points(
const CurvesGeometry &curves,
const IndexMask points_to_delete,
@ -1216,7 +1208,7 @@ static CurvesGeometry copy_with_removed_points(
attribute.dst.span.copy_from(attribute.src);
}
else {
copy_with_map(attribute.src, new_curve_orig_indices, attribute.dst.span);
bke::attribute_math::gather(attribute.src, new_curve_orig_indices, attribute.dst.span);
}
}
});

View File

@ -67,6 +67,8 @@
#include "atomic_ops.h"
#include "lib_intern.h"
//#define DEBUG_TIME
#ifdef DEBUG_TIME
@ -439,7 +441,7 @@ void BKE_lib_id_expand_local(Main *bmain, ID *id, const int flags)
/**
* Ensure new (copied) ID is fully made local.
*/
static void lib_id_copy_ensure_local(Main *bmain, const ID *old_id, ID *new_id, const int flags)
void lib_id_copy_ensure_local(Main *bmain, const ID *old_id, ID *new_id, const int flags)
{
if (ID_IS_LINKED(old_id)) {
BKE_lib_id_expand_local(bmain, new_id, flags);

View File

@ -7,6 +7,24 @@
#pragma once
#include "BKE_lib_remap.h"
#ifdef __cplusplus
extern "C" {
#endif
extern BKE_library_free_notifier_reference_cb free_notifier_reference_cb;
extern BKE_library_remap_editor_id_reference_cb remap_editor_id_reference_cb;
struct ID;
struct Main;
void lib_id_copy_ensure_local(struct Main *bmain,
const struct ID *old_id,
struct ID *new_id,
const int flags);
#ifdef __cplusplus
}
#endif

View File

@ -61,6 +61,8 @@
#include "atomic_ops.h"
#include "lib_intern.h"
#define OVERRIDE_AUTO_CHECK_DELAY 0.2 /* 200ms between auto-override checks. */
//#define DEBUG_OVERRIDE_TIMEIT
@ -277,9 +279,11 @@ static ID *lib_override_library_create_from(Main *bmain,
id_us_min(local_id);
/* TODO: Handle this properly in LIB_NO_MAIN case as well (i.e. resync case). Or offload to
* generic ID copy code? */
if ((lib_id_copy_flags & LIB_ID_CREATE_NO_MAIN) == 0) {
local_id->lib = owner_library;
* generic ID copy code? Would probably be better to have a version of #BKE_id_copy_ex that takes
* an extra `target_lib` parameter. */
local_id->lib = owner_library;
if ((lib_id_copy_flags & LIB_ID_CREATE_NO_MAIN) != 0 && owner_library == nullptr) {
lib_id_copy_ensure_local(bmain, reference_id, local_id, 0);
}
BKE_lib_override_library_init(local_id, reference_id);

View File

@ -645,6 +645,7 @@ template<typename T> struct AssertUnitEpsilon {
using char2 = blender::VecBase<int8_t, 2>;
using char3 = blender::VecBase<int8_t, 3>;
using char4 = blender::VecBase<int8_t, 4>;
using uchar3 = blender::VecBase<uint8_t, 3>;
using uchar4 = blender::VecBase<uint8_t, 4>;
@ -659,6 +660,7 @@ using uint4 = VecBase<uint32_t, 4>;
using short2 = blender::VecBase<int16_t, 2>;
using short3 = blender::VecBase<int16_t, 3>;
using short4 = blender::VecBase<int16_t, 4>;
using ushort2 = VecBase<uint16_t, 2>;
using ushort3 = blender::VecBase<uint16_t, 3>;

View File

@ -351,7 +351,16 @@ void mat3_normalized_to_quat_fast(float q[4], const float mat[3][3])
}
BLI_assert(!(q[0] < 0.0f));
BLI_ASSERT_UNIT_QUAT(q);
/* Sometimes normalisation is necessary due to round-off errors in the above
* calculations. The comparison here uses tighter tolerances than
* BLI_ASSERT_UNIT_QUAT(), so it's likely that even after a few more
* transformations the quaternion will still be considered unit-ish. */
const float q_len_squared = dot_qtqt(q, q);
const float threshold = 0.0002f /* BLI_ASSERT_UNIT_EPSILON */ * 3;
if (fabs(q_len_squared - 1.0f) >= threshold) {
normalize_qt(q);
}
}
static void mat3_normalized_to_quat_with_checks(float q[4], float mat[3][3])

View File

@ -169,6 +169,32 @@ TEST(math_rotation, quat_split_swing_and_twist_negative)
EXPECT_V4_NEAR(twist, expected_twist, FLT_EPSILON);
}
TEST(math_rotation, mat3_normalized_to_quat_fast_degenerate)
{
/* This input will cause floating point issues, which would produce a non-unit
* quaternion if the call to `normalize_qt` were to be removed. This
* particular matrix was taken from a production file of Pet Projects that
* caused problems. */
const float input[3][3] = {
{1.0000000000, -0.0000006315, -0.0000000027},
{0.0000009365, 1.0000000000, -0.0000000307},
{0.0000001964, 0.2103530765, 0.9776254892},
};
const float expect_quat[4] = {
0.99860459566116333,
-0.052810292690992355,
4.9985139582986449e-08,
-3.93654971730939e-07,
};
ASSERT_FLOAT_EQ(1.0f, dot_qtqt(expect_quat, expect_quat))
<< "expected quaternion should be normal";
float actual_quat[4];
mat3_normalized_to_quat_fast(actual_quat, input);
EXPECT_FLOAT_EQ(1.0f, dot_qtqt(actual_quat, actual_quat));
EXPECT_V4_NEAR(expect_quat, actual_quat, FLT_EPSILON);
}
/* -------------------------------------------------------------------- */
/** \name Test `sin_cos_from_fraction` Accuracy & Exact Symmetry
* \{ */

View File

@ -453,15 +453,14 @@ void OutputOpenExrMultiLayerOperation::update_memory_buffer_partial(MemoryBuffer
const rcti &area,
Span<MemoryBuffer *> inputs)
{
const MemoryBuffer *input_image = inputs[0];
for (int i = 0; i < layers_.size(); i++) {
OutputOpenExrLayer &layer = layers_[i];
int layer_num_channels = COM_data_type_num_channels(layer.datatype);
if (layer.output_buffer) {
MemoryBuffer output_buf(layer.output_buffer,
COM_data_type_num_channels(layer.datatype),
this->get_width(),
this->get_height());
output_buf.copy_from(input_image, area);
MemoryBuffer output_buf(
layer.output_buffer, layer_num_channels, this->get_width(), this->get_height());
/* Input node always has 4 channels. Not all are needed depending on datatype. */
output_buf.copy_from(inputs[i], area, 0, layer_num_channels, 0);
}
}
}

View File

@ -327,6 +327,7 @@ void postEditBoneDuplicate(struct ListBase *editbones, Object *ob)
if (!ebone_dst) {
ebone_dst = ED_armature_ebone_get_mirrored(editbones, ebone_src);
}
if (ebone_dst) {
BLI_ghash_insert(name_map, ebone_src->name, ebone_dst->name);
}
@ -334,22 +335,28 @@ void postEditBoneDuplicate(struct ListBase *editbones, Object *ob)
LISTBASE_FOREACH (EditBone *, ebone_src, editbones) {
EditBone *ebone_dst = ebone_src->temp.ebone;
if (ebone_dst) {
bPoseChannel *pchan_src = BKE_pose_channel_find_name(ob->pose, ebone_src->name);
if (pchan_src) {
bPoseChannel *pchan_dst = BKE_pose_channel_find_name(ob->pose, ebone_dst->name);
if (pchan_dst) {
if (pchan_src->custom_tx) {
pchan_dst->custom_tx = pchan_duplicate_map(ob->pose, name_map, pchan_src->custom_tx);
}
if (pchan_src->bbone_prev) {
pchan_dst->bbone_prev = pchan_duplicate_map(ob->pose, name_map, pchan_src->bbone_prev);
}
if (pchan_src->bbone_next) {
pchan_dst->bbone_next = pchan_duplicate_map(ob->pose, name_map, pchan_src->bbone_next);
}
}
}
if (!ebone_dst) {
continue;
}
bPoseChannel *pchan_src = BKE_pose_channel_find_name(ob->pose, ebone_src->name);
if (!pchan_src) {
continue;
}
bPoseChannel *pchan_dst = BKE_pose_channel_find_name(ob->pose, ebone_dst->name);
if (!pchan_dst) {
continue;
}
if (pchan_src->custom_tx) {
pchan_dst->custom_tx = pchan_duplicate_map(ob->pose, name_map, pchan_src->custom_tx);
}
if (pchan_src->bbone_prev) {
pchan_dst->bbone_prev = pchan_duplicate_map(ob->pose, name_map, pchan_src->bbone_prev);
}
if (pchan_src->bbone_next) {
pchan_dst->bbone_next = pchan_duplicate_map(ob->pose, name_map, pchan_src->bbone_next);
}
}
@ -1120,71 +1127,80 @@ static int armature_symmetrize_exec(bContext *C, wmOperator *op)
Object **objects = BKE_view_layer_array_from_objects_in_edit_mode_unique_data(
scene, view_layer, CTX_wm_view3d(C), &objects_len);
for (uint ob_index = 0; ob_index < objects_len; ob_index++) {
Object *obedit = objects[ob_index];
bArmature *arm = obedit->data;
EditBone *ebone_iter;
/* The beginning of the duplicated mirrored bones in the edbo list */
EditBone *ebone_first_dupe = NULL;
Object *obedit = objects[ob_index];
bArmature *arm = obedit->data;
ED_armature_edit_sync_selection(arm->edbo); /* XXX why is this needed? */
preEditBoneDuplicate(arm->edbo);
/* Select mirrored bones */
/* Deselect ebones depending on input axis and direction.
* A symmetrizable selection contains selected ebones of the input direction
* and unique selected bones with an unique flippable name.
*
* Storing temp ptrs to mirrored unselected ebones. */
for (ebone_iter = arm->edbo->first; ebone_iter; ebone_iter = ebone_iter->next) {
if (EBONE_VISIBLE(arm, ebone_iter) && (ebone_iter->flag & BONE_SELECTED)) {
char name_flip[MAXBONENAME];
if (!(EBONE_VISIBLE(arm, ebone_iter) && (ebone_iter->flag & BONE_SELECTED))) {
/* Skipping invisible selected bones. */
continue;
}
BLI_string_flip_side_name(name_flip, ebone_iter->name, false, sizeof(name_flip));
char name_flip[MAXBONENAME];
if (ebone_iter == NULL) {
continue;
}
if (STREQ(name_flip, ebone_iter->name)) {
/* if the name matches, we don't have the potential to be mirrored, just skip */
ebone_iter->flag &= ~(BONE_SELECTED | BONE_TIPSEL | BONE_ROOTSEL);
}
else {
EditBone *ebone = ED_armature_ebone_find_name(arm->edbo, name_flip);
BLI_string_flip_side_name(name_flip, ebone_iter->name, false, sizeof(name_flip));
if (ebone) {
if ((ebone->flag & BONE_SELECTED) == 0) {
/* simple case, we're selected, the other bone isn't! */
ebone_iter->temp.ebone = ebone;
}
else {
/* complicated - choose which direction to copy */
float axis_delta;
if (STREQ(name_flip, ebone_iter->name)) {
/* Skipping ebones without flippable as they don't have the potential to be mirrored. */
ebone_iter->flag &= ~(BONE_SELECTED | BONE_TIPSEL | BONE_ROOTSEL);
continue;
}
axis_delta = ebone->head[axis] - ebone_iter->head[axis];
if (axis_delta == 0.0f) {
axis_delta = ebone->tail[axis] - ebone_iter->tail[axis];
}
EditBone *ebone = ED_armature_ebone_find_name(arm->edbo, name_flip);
if (axis_delta == 0.0f) {
/* Both mirrored bones exist and point to each other and overlap exactly.
*
* in this case there's no well defined solution, so de-select both and skip.
*/
ebone->flag &= ~(BONE_SELECTED | BONE_TIPSEL | BONE_ROOTSEL);
ebone_iter->flag &= ~(BONE_SELECTED | BONE_TIPSEL | BONE_ROOTSEL);
}
else {
EditBone *ebone_src, *ebone_dst;
if (((axis_delta < 0.0f) ? -1 : 1) == direction) {
ebone_src = ebone;
ebone_dst = ebone_iter;
}
else {
ebone_src = ebone_iter;
ebone_dst = ebone;
}
if (!ebone) {
/* The ebone_iter is unique and mirrorable. */
continue;
}
ebone_src->temp.ebone = ebone_dst;
ebone_dst->flag &= ~(BONE_SELECTED | BONE_TIPSEL | BONE_ROOTSEL);
}
}
if (ebone->flag & BONE_SELECTED) {
/* The mirrored ebone and the ebone_iter are selected.
* Deselect based on the input direction and axis. */
float axis_delta;
axis_delta = ebone->head[axis] - ebone_iter->head[axis];
if (axis_delta == 0.0f) {
/* The ebone heads are overlapping. */
axis_delta = ebone->tail[axis] - ebone_iter->tail[axis];
if (axis_delta == 0.0f) {
/* Both mirrored bones point to each other and overlap exactly.
* In this case there's no well defined solution, so de-select both and skip. */
ebone->flag &= ~(BONE_SELECTED | BONE_TIPSEL | BONE_ROOTSEL);
ebone_iter->flag &= ~(BONE_SELECTED | BONE_TIPSEL | BONE_ROOTSEL);
continue;
}
}
/* Deselect depending on direction. */
if (((axis_delta < 0.0f) ? -1 : 1) == direction) {
/* Don't store temp ptr if the iter_bone gets deselected.
* In this case, the ebone.temp should point to the ebone_iter. */
ebone_iter->flag &= ~(BONE_SELECTED | BONE_TIPSEL | BONE_ROOTSEL);
continue;
}
ebone->flag &= ~(BONE_SELECTED | BONE_TIPSEL | BONE_ROOTSEL);
}
/* Set temp pointer to mirrored ebones */
ebone_iter->temp.ebone = ebone;
}
/* Find the selected bones and duplicate them as needed, with mirrored name. */
@ -1206,11 +1222,12 @@ static int armature_symmetrize_exec(bContext *C, wmOperator *op)
ebone_iter->temp.ebone->inherit_scale_mode = ebone_iter->inherit_scale_mode;
continue;
}
char name_flip[MAXBONENAME];
BLI_string_flip_side_name(name_flip, ebone_iter->name, false, sizeof(name_flip));
/* bones must have a side-suffix */
/* mirrored bones must have a side-suffix */
if (!STREQ(name_flip, ebone_iter->name)) {
EditBone *ebone;
@ -1254,8 +1271,8 @@ static int armature_symmetrize_exec(bContext *C, wmOperator *op)
*/
if (ebone->head[axis] != 0.0f) {
/* The mirrored bone doesn't start on the mirror axis, so assume that this one should
* not be connected to the old parent */
/* The mirrored bone doesn't start on the mirror axis, so assume that this one
* should not be connected to the old parent */
ebone->flag &= ~BONE_CONNECTED;
}
}

View File

@ -508,6 +508,7 @@ set(ICON_NAMES
ipo_ease_out
ipo_ease_in_out
normalize_fcurves
orientation_parent
vertexsel
edgesel
facesel

View File

@ -56,24 +56,6 @@ void ED_gizmotypes_snap_3d_flag_set(struct wmGizmo *gz, int flag)
snap_state->flag |= flag;
}
void ED_gizmotypes_snap_3d_flag_clear(struct wmGizmo *gz, int flag)
{
V3DSnapCursorState *snap_state = ((SnapGizmo3D *)gz)->snap_state;
snap_state->flag &= ~flag;
}
bool ED_gizmotypes_snap_3d_flag_test(struct wmGizmo *gz, int flag)
{
V3DSnapCursorState *snap_state = ((SnapGizmo3D *)gz)->snap_state;
return (snap_state->flag & flag) != 0;
}
bool ED_gizmotypes_snap_3d_invert_snap_get(struct wmGizmo *UNUSED(gz))
{
V3DSnapCursorData *snap_data = ED_view3d_cursor_snap_data_get();
return snap_data->is_snap_invert;
}
bool ED_gizmotypes_snap_3d_is_enabled(const wmGizmo *UNUSED(gz))
{
V3DSnapCursorData *snap_data = ED_view3d_cursor_snap_data_get();
@ -135,7 +117,7 @@ static V3DSnapCursorState *gizmo_snap_state_from_rna_get(struct PointerRNA *ptr)
return snap_gizmo->snap_state;
}
return ED_view3d_cursor_snap_state_get();
return ED_view3d_cursor_snap_state_active_get();
}
static int gizmo_snap_rna_snap_elements_force_get_fn(struct PointerRNA *ptr,
@ -168,7 +150,7 @@ static void gizmo_snap_rna_prevpoint_set_fn(struct PointerRNA *ptr,
const float *values)
{
V3DSnapCursorState *snap_state = gizmo_snap_state_from_rna_get(ptr);
ED_view3d_cursor_snap_prevpoint_set(snap_state, values);
ED_view3d_cursor_snap_state_prevpoint_set(snap_state, values);
}
static void gizmo_snap_rna_location_get_fn(struct PointerRNA *UNUSED(ptr),
@ -212,7 +194,7 @@ static void gizmo_snap_rna_snap_elem_index_get_fn(struct PointerRNA *UNUSED(ptr)
static void snap_cursor_free(SnapGizmo3D *snap_gizmo)
{
if (snap_gizmo->snap_state) {
ED_view3d_cursor_snap_deactive(snap_gizmo->snap_state);
ED_view3d_cursor_snap_state_free(snap_gizmo->snap_state);
snap_gizmo->snap_state = NULL;
}
}
@ -242,7 +224,7 @@ static bool snap_cursor_poll(ARegion *region, void *data)
static void snap_cursor_init(SnapGizmo3D *snap_gizmo)
{
snap_gizmo->snap_state = ED_view3d_cursor_snap_active();
snap_gizmo->snap_state = ED_view3d_cursor_snap_state_create();
snap_gizmo->snap_state->draw_point = true;
snap_gizmo->snap_state->draw_plane = false;
@ -270,7 +252,10 @@ static void snap_gizmo_draw(const bContext *UNUSED(C), wmGizmo *gz)
if (snap_gizmo->snap_state == NULL) {
snap_cursor_init(snap_gizmo);
}
/* All drawing is handled at the paint cursor. */
/* All drawing is handled at the paint cursor.
* Therefore, make sure that the #V3DSnapCursorState is the one of the gizmo being drawn. */
ED_view3d_cursor_snap_state_active_set(snap_gizmo->snap_state);
}
static int snap_gizmo_test_select(bContext *C, wmGizmo *gz, const int mval[2])

View File

@ -248,10 +248,7 @@ struct SnapObjectContext *ED_gizmotypes_snap_3d_context_ensure(struct Scene *sce
struct wmGizmo *gz);
void ED_gizmotypes_snap_3d_flag_set(struct wmGizmo *gz, int flag);
void ED_gizmotypes_snap_3d_flag_clear(struct wmGizmo *gz, int flag);
bool ED_gizmotypes_snap_3d_flag_test(struct wmGizmo *gz, int flag);
bool ED_gizmotypes_snap_3d_invert_snap_get(struct wmGizmo *gz);
bool ED_gizmotypes_snap_3d_is_enabled(const struct wmGizmo *gz);
void ED_gizmotypes_snap_3d_data_get(const struct bContext *C,

View File

@ -336,10 +336,12 @@ typedef struct V3DSnapCursorState {
} V3DSnapCursorState;
void ED_view3d_cursor_snap_state_default_set(V3DSnapCursorState *state);
V3DSnapCursorState *ED_view3d_cursor_snap_state_get(void);
V3DSnapCursorState *ED_view3d_cursor_snap_active(void);
void ED_view3d_cursor_snap_deactive(V3DSnapCursorState *state);
void ED_view3d_cursor_snap_prevpoint_set(V3DSnapCursorState *state, const float prev_point[3]);
V3DSnapCursorState *ED_view3d_cursor_snap_state_active_get(void);
void ED_view3d_cursor_snap_state_active_set(V3DSnapCursorState *state);
V3DSnapCursorState *ED_view3d_cursor_snap_state_create(void);
void ED_view3d_cursor_snap_state_free(V3DSnapCursorState *state);
void ED_view3d_cursor_snap_state_prevpoint_set(V3DSnapCursorState *state,
const float prev_point[3]);
void ED_view3d_cursor_snap_data_update(V3DSnapCursorState *state,
const struct bContext *C,
int x,

View File

@ -618,7 +618,7 @@ DEF_ICON(IPO_EASE_IN)
DEF_ICON(IPO_EASE_OUT)
DEF_ICON(IPO_EASE_IN_OUT)
DEF_ICON(NORMALIZE_FCURVES)
DEF_ICON_BLANK(635)
DEF_ICON(ORIENTATION_PARENT)
DEF_ICON_BLANK(636)
DEF_ICON_BLANK(637)
DEF_ICON_BLANK(638)

View File

@ -779,6 +779,9 @@ static bConstraint *edit_constraint_property_get(bContext *C, wmOperator *op, Ob
if (owner == EDIT_CONSTRAINT_OWNER_BONE) {
list = ED_object_pose_constraint_list(C);
if (!list) {
return NULL;
}
}
else {
list = &ob->constraints;

View File

@ -519,7 +519,7 @@ static void view3d_ob_drop_draw_activate(struct wmDropBox *drop, wmDrag *drag)
return;
}
state = static_cast<V3DSnapCursorState *>(ED_view3d_cursor_snap_active());
state = static_cast<V3DSnapCursorState *>(ED_view3d_cursor_snap_state_create());
drop->draw_data = state;
state->draw_plane = true;
@ -547,7 +547,7 @@ static void view3d_ob_drop_draw_deactivate(struct wmDropBox *drop, wmDrag * /*dr
{
V3DSnapCursorState *state = static_cast<V3DSnapCursorState *>(drop->draw_data);
if (state) {
ED_view3d_cursor_snap_deactive(state);
ED_view3d_cursor_snap_state_free(state);
drop->draw_data = nullptr;
}
}
@ -781,7 +781,7 @@ static void view3d_ob_drop_copy_local_id(bContext * /*C*/, wmDrag *drag, wmDropB
/* Don't duplicate ID's which were just imported. Only do that for existing, local IDs. */
BLI_assert(drag->type != WM_DRAG_ASSET);
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
float obmat_final[4][4];
view3d_ob_drop_matrix_from_snap(snap_state, (Object *)id, obmat_final);

View File

@ -829,7 +829,7 @@ static bool v3d_cursor_snap_poll_fn(bContext *C)
};
/* Call this callback last and don't reuse the `state` as the caller can free the cursor. */
V3DSnapCursorState *state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *state = ED_view3d_cursor_snap_state_active_get();
if (state->poll && !state->poll(region, state->poll_data)) {
return false;
}
@ -840,7 +840,7 @@ static bool v3d_cursor_snap_poll_fn(bContext *C)
static void v3d_cursor_snap_draw_fn(bContext *C, int x, int y, void *UNUSED(customdata))
{
SnapCursorDataIntern *data_intern = &g_data_intern;
V3DSnapCursorState *state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *state = ED_view3d_cursor_snap_state_active_get();
V3DSnapCursorData *snap_data = &data_intern->snap_data;
wmWindowManager *wm = CTX_wm_manager(C);
@ -902,7 +902,7 @@ static void v3d_cursor_snap_draw_fn(bContext *C, int x, int y, void *UNUSED(cust
/** \} */
V3DSnapCursorState *ED_view3d_cursor_snap_state_get(void)
V3DSnapCursorState *ED_view3d_cursor_snap_state_active_get(void)
{
SnapCursorDataIntern *data_intern = &g_data_intern;
if (BLI_listbase_is_empty(&data_intern->state_intern)) {
@ -911,6 +911,26 @@ V3DSnapCursorState *ED_view3d_cursor_snap_state_get(void)
return &((SnapStateIntern *)data_intern->state_intern.last)->snap_state;
}
void ED_view3d_cursor_snap_state_active_set(V3DSnapCursorState *state)
{
if (state == &g_data_intern.state_default) {
BLI_assert_unreachable();
return;
}
SnapStateIntern *state_intern = STATE_INTERN_GET(state);
if (state_intern == (SnapStateIntern *)g_data_intern.state_intern.last) {
return;
}
if (!BLI_remlink_safe(&g_data_intern.state_intern, state_intern)) {
BLI_assert_unreachable();
return;
}
BLI_addtail(&g_data_intern.state_intern, state_intern);
}
static void v3d_cursor_snap_activate(void)
{
SnapCursorDataIntern *data_intern = &g_data_intern;
@ -964,7 +984,7 @@ void ED_view3d_cursor_snap_state_default_set(V3DSnapCursorState *state)
g_data_intern.state_default.poll_data = NULL;
}
V3DSnapCursorState *ED_view3d_cursor_snap_active(void)
V3DSnapCursorState *ED_view3d_cursor_snap_state_create(void)
{
SnapCursorDataIntern *data_intern = &g_data_intern;
if (!data_intern->handle) {
@ -978,7 +998,7 @@ V3DSnapCursorState *ED_view3d_cursor_snap_active(void)
return (V3DSnapCursorState *)&state_intern->snap_state;
}
void ED_view3d_cursor_snap_deactive(V3DSnapCursorState *state)
void ED_view3d_cursor_snap_state_free(V3DSnapCursorState *state)
{
SnapCursorDataIntern *data_intern = &g_data_intern;
if (BLI_listbase_is_empty(&data_intern->state_intern)) {
@ -993,11 +1013,12 @@ void ED_view3d_cursor_snap_deactive(V3DSnapCursorState *state)
}
}
void ED_view3d_cursor_snap_prevpoint_set(V3DSnapCursorState *state, const float prev_point[3])
void ED_view3d_cursor_snap_state_prevpoint_set(V3DSnapCursorState *state,
const float prev_point[3])
{
SnapCursorDataIntern *data_intern = &g_data_intern;
if (!state) {
state = ED_view3d_cursor_snap_state_get();
state = ED_view3d_cursor_snap_state_active_get();
}
if (prev_point) {
copy_v3_v3(data_intern->prevpoint_stack, prev_point);
@ -1023,7 +1044,7 @@ void ED_view3d_cursor_snap_data_update(V3DSnapCursorState *state,
View3D *v3d = CTX_wm_view3d(C);
if (!state) {
state = ED_view3d_cursor_snap_state_get();
state = ED_view3d_cursor_snap_state_active_get();
}
v3d_cursor_snap_update(state, C, wm, depsgraph, scene, region, v3d, x, y);
}

View File

@ -708,7 +708,7 @@ static bool view3d_interactive_add_calc_snap(bContext *UNUSED(C),
static void view3d_interactive_add_begin(bContext *C, wmOperator *op, const wmEvent *event)
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
const int plane_axis = snap_state->plane_axis;
const enum ePlace_SnapTo snap_to = RNA_enum_get(op->ptr, "snap_target");
@ -726,7 +726,7 @@ static void view3d_interactive_add_begin(bContext *C, wmOperator *op, const wmEv
ipd->launch_event = WM_userdef_event_type_from_keymap_type(event->type);
V3DSnapCursorState *snap_state_new = ED_view3d_cursor_snap_active();
V3DSnapCursorState *snap_state_new = ED_view3d_cursor_snap_state_create();
if (snap_state_new) {
ipd->snap_state = snap_state = snap_state_new;
@ -755,7 +755,7 @@ static void view3d_interactive_add_begin(bContext *C, wmOperator *op, const wmEv
C, event, ipd->co_src, ipd->matrix_orient, &ipd->use_snap, &ipd->is_snap_invert) != 0;
snap_state->draw_plane = false;
ED_view3d_cursor_snap_prevpoint_set(snap_state, ipd->co_src);
ED_view3d_cursor_snap_state_prevpoint_set(snap_state, ipd->co_src);
ipd->orient_axis = plane_axis;
for (int i = 0; i < 2; i++) {
@ -910,7 +910,7 @@ static void view3d_interactive_add_exit(bContext *C, wmOperator *op)
UNUSED_VARS(C);
struct InteractivePlaceData *ipd = op->customdata;
ED_view3d_cursor_snap_deactive(ipd->snap_state);
ED_view3d_cursor_snap_state_free(ipd->snap_state);
if (ipd->region != NULL) {
if (ipd->draw_handle_view != NULL) {
@ -1036,7 +1036,7 @@ static int view3d_interactive_add_modal(bContext *C, wmOperator *op, const wmEve
if (ipd->step_index == STEP_BASE) {
if (ELEM(event->type, ipd->launch_event, LEFTMOUSE)) {
if (event->val == KM_RELEASE) {
ED_view3d_cursor_snap_prevpoint_set(ipd->snap_state, ipd->co_src);
ED_view3d_cursor_snap_state_prevpoint_set(ipd->snap_state, ipd->co_src);
/* Set secondary plane. */
@ -1265,7 +1265,7 @@ static bool view3d_interactive_add_poll(bContext *C)
static int idp_rna_plane_axis_get_fn(struct PointerRNA *UNUSED(ptr),
struct PropertyRNA *UNUSED(prop))
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
return snap_state->plane_axis;
}
@ -1273,7 +1273,7 @@ static void idp_rna_plane_axis_set_fn(struct PointerRNA *UNUSED(ptr),
struct PropertyRNA *UNUSED(prop),
int value)
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
snap_state->plane_axis = (short)value;
ED_view3d_cursor_snap_state_default_set(snap_state);
}
@ -1281,7 +1281,7 @@ static void idp_rna_plane_axis_set_fn(struct PointerRNA *UNUSED(ptr),
static int idp_rna_plane_depth_get_fn(struct PointerRNA *UNUSED(ptr),
struct PropertyRNA *UNUSED(prop))
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
return snap_state->plane_depth;
}
@ -1289,7 +1289,7 @@ static void idp_rna_plane_depth_set_fn(struct PointerRNA *UNUSED(ptr),
struct PropertyRNA *UNUSED(prop),
int value)
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
snap_state->plane_depth = value;
ED_view3d_cursor_snap_state_default_set(snap_state);
}
@ -1297,7 +1297,7 @@ static void idp_rna_plane_depth_set_fn(struct PointerRNA *UNUSED(ptr),
static int idp_rna_plane_orient_get_fn(struct PointerRNA *UNUSED(ptr),
struct PropertyRNA *UNUSED(prop))
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
return snap_state->plane_orient;
}
@ -1305,7 +1305,7 @@ static void idp_rna_plane_orient_set_fn(struct PointerRNA *UNUSED(ptr),
struct PropertyRNA *UNUSED(prop),
int value)
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
snap_state->plane_orient = value;
ED_view3d_cursor_snap_state_default_set(snap_state);
}
@ -1313,7 +1313,7 @@ static void idp_rna_plane_orient_set_fn(struct PointerRNA *UNUSED(ptr),
static int idp_rna_snap_target_get_fn(struct PointerRNA *UNUSED(ptr),
struct PropertyRNA *UNUSED(prop))
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
if (snap_state->snap_elem_force == SCE_SNAP_MODE_NONE) {
return PLACE_SNAP_TO_DEFAULT;
}
@ -1333,7 +1333,7 @@ static void idp_rna_snap_target_set_fn(struct PointerRNA *UNUSED(ptr),
snap_mode = SCE_SNAP_MODE_GEOM;
}
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
snap_state->snap_elem_force = snap_mode;
ED_view3d_cursor_snap_state_default_set(snap_state);
}
@ -1341,7 +1341,7 @@ static void idp_rna_snap_target_set_fn(struct PointerRNA *UNUSED(ptr),
static bool idp_rna_use_plane_axis_auto_get_fn(struct PointerRNA *UNUSED(ptr),
struct PropertyRNA *UNUSED(prop))
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
return snap_state->use_plane_axis_auto;
}
@ -1349,7 +1349,7 @@ static void idp_rna_use_plane_axis_auto_set_fn(struct PointerRNA *UNUSED(ptr),
struct PropertyRNA *UNUSED(prop),
bool value)
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_get();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_active_get();
snap_state->use_plane_axis_auto = value;
ED_view3d_cursor_snap_state_default_set(snap_state);
}
@ -1523,7 +1523,7 @@ void VIEW3D_OT_interactive_add(struct wmOperatorType *ot)
static void preview_plane_free_fn(void *customdata)
{
V3DSnapCursorState *snap_state = customdata;
ED_view3d_cursor_snap_deactive(snap_state);
ED_view3d_cursor_snap_state_free(snap_state);
}
static bool snap_cursor_poll(ARegion *region, void *data)
@ -1537,7 +1537,7 @@ static bool snap_cursor_poll(ARegion *region, void *data)
static void WIDGETGROUP_placement_setup(const bContext *UNUSED(C), wmGizmoGroup *gzgroup)
{
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_active();
V3DSnapCursorState *snap_state = ED_view3d_cursor_snap_state_create();
if (snap_state) {
snap_state->poll = snap_cursor_poll;
snap_state->poll_data = gzgroup->type;

View File

@ -18,6 +18,8 @@
#include "DNA_object_types.h"
#include "DNA_scene_types.h"
#include "BKE_global.h"
#include "BLI_array.hh"
#include "BLI_convexhull_2d.h"
#include "BLI_linklist.h"
@ -59,6 +61,7 @@
#include "ED_image.h"
#include "ED_mesh.h"
#include "ED_screen.h"
#include "ED_undo.h"
#include "ED_uvedit.h"
#include "ED_view3d.h"
@ -169,6 +172,14 @@ void blender::geometry::UVPackIsland_Params::setUDIMOffsetFromSpaceImage(const S
}
/** \} */
bool blender::geometry::UVPackIsland_Params::isCancelled() const
{
if (stop) {
return *stop;
}
return false;
}
/* -------------------------------------------------------------------- */
/** \name Parametrizer Conversion
* \{ */
@ -1141,6 +1152,7 @@ static bool island_has_pins(const Scene *scene,
* This is needed to perform UV packing on objects that aren't in edit-mode.
* \param udim_source_closest: UDIM source SpaceImage.
* \param original_selection: Pack to original selection.
* \param notify_wm: Notify the WM of any changes. (UI thread only.)
* \param params: Parameters and options to pass to the packing engine.
*/
static void uvedit_pack_islands_multi(const Scene *scene,
@ -1149,6 +1161,7 @@ static void uvedit_pack_islands_multi(const Scene *scene,
BMesh **bmesh_override,
const SpaceImage *udim_source_closest,
const bool original_selection,
const bool notify_wm,
blender::geometry::UVPackIsland_Params *params)
{
blender::Vector<FaceIsland *> island_vector;
@ -1271,6 +1284,7 @@ static void uvedit_pack_islands_multi(const Scene *scene,
BLI_memarena_free(arena);
const float scale = pack_islands(pack_island_vector, *params);
const bool is_cancelled = params->isCancelled();
float base_offset[2] = {0.0f, 0.0f};
copy_v2_v2(base_offset, params->udim_base_offset);
@ -1309,7 +1323,10 @@ static void uvedit_pack_islands_multi(const Scene *scene,
float matrix[2][2];
float matrix_inverse[2][2];
float pre_translate[2];
for (int64_t i : pack_island_vector.index_range()) {
for (const int64_t i : pack_island_vector.index_range()) {
if (is_cancelled) {
continue;
}
blender::geometry::PackIsland *pack_island = pack_island_vector[i];
FaceIsland *island = island_vector[pack_island->caller_index];
const float island_scale = pack_island->can_scale_(*params) ? scale : 1.0f;
@ -1339,16 +1356,21 @@ static void uvedit_pack_islands_multi(const Scene *scene,
pre_translate[1] = selection_min_co[1] / rescale;
island_uv_transform(island, matrix, pre_translate);
}
}
for (const int64_t i : pack_island_vector.index_range()) {
blender::geometry::PackIsland *pack_island = pack_island_vector[i];
/* Cleanup memory. */
pack_island_vector[i] = nullptr;
delete pack_island;
}
for (uint ob_index = 0; ob_index < objects_len; ob_index++) {
Object *obedit = objects[ob_index];
DEG_id_tag_update(static_cast<ID *>(obedit->data), ID_RECALC_GEOMETRY);
WM_main_add_notifier(NC_GEOM | ND_DATA, obedit->data);
if (notify_wm && !is_cancelled) {
for (uint ob_index = 0; ob_index < objects_len; ob_index++) {
Object *obedit = objects[ob_index];
DEG_id_tag_update(static_cast<ID *>(obedit->data), ID_RECALC_GEOMETRY);
WM_main_add_notifier(NC_GEOM | ND_DATA, obedit->data);
}
}
for (FaceIsland *island : island_vector) {
@ -1361,6 +1383,9 @@ static void uvedit_pack_islands_multi(const Scene *scene,
/** \name Pack UV Islands Operator
* \{ */
/* TODO: support this, interaction with the job-system needs to be handled carefully. */
// #define USE_INTERACTIVE_PACK
/* Packing targets. */
enum {
PACK_UDIM_SRC_CLOSEST = 0,
@ -1368,6 +1393,70 @@ enum {
PACK_ORIGINAL_AABB,
};
struct UVPackIslandsData {
wmWindowManager *wm;
const Scene *scene;
Object **objects;
uint objects_len;
const SpaceImage *sima;
int udim_source;
bContext *undo_context;
const char *undo_str;
bool use_job;
blender::geometry::UVPackIsland_Params pack_island_params;
};
static void pack_islands_startjob(void *pidv, bool *stop, bool *do_update, float *progress)
{
*progress = 0.02f;
UVPackIslandsData *pid = static_cast<UVPackIslandsData *>(pidv);
pid->pack_island_params.stop = stop;
pid->pack_island_params.do_update = do_update;
pid->pack_island_params.progress = progress;
uvedit_pack_islands_multi(pid->scene,
pid->objects,
pid->objects_len,
nullptr,
(pid->udim_source == PACK_UDIM_SRC_CLOSEST) ? pid->sima : nullptr,
(pid->udim_source == PACK_ORIGINAL_AABB),
!pid->use_job,
&pid->pack_island_params);
*progress = 0.99f;
*do_update = true;
}
static void pack_islands_endjob(void *pidv)
{
UVPackIslandsData *pid = static_cast<UVPackIslandsData *>(pidv);
for (uint ob_index = 0; ob_index < pid->objects_len; ob_index++) {
Object *obedit = pid->objects[ob_index];
DEG_id_tag_update(static_cast<ID *>(obedit->data), ID_RECALC_GEOMETRY);
WM_main_add_notifier(NC_GEOM | ND_DATA, obedit->data);
}
WM_main_add_notifier(NC_SPACE | ND_SPACE_IMAGE, NULL);
if (pid->undo_str) {
ED_undo_push(pid->undo_context, pid->undo_str);
}
}
static void pack_islands_freejob(void *pidv)
{
WM_cursor_wait(false);
UVPackIslandsData *pid = static_cast<UVPackIslandsData *>(pidv);
MEM_freeN(pid->objects);
WM_set_locked_interface(pid->wm, false);
MEM_freeN(pid);
}
static int pack_islands_exec(bContext *C, wmOperator *op)
{
ViewLayer *view_layer = CTX_data_view_layer(C);
@ -1400,7 +1489,23 @@ static int pack_islands_exec(bContext *C, wmOperator *op)
RNA_float_set(op->ptr, "margin", scene->toolsettings->uvcalc_margin);
}
blender::geometry::UVPackIsland_Params pack_island_params;
UVPackIslandsData *pid = static_cast<UVPackIslandsData *>(
MEM_callocN(sizeof(UVPackIslandsData), "pack_islands_data"));
pid->use_job = op->flag & OP_IS_INVOKE;
pid->scene = scene;
pid->objects = objects;
pid->objects_len = objects_len;
pid->sima = sima;
pid->udim_source = udim_source;
pid->wm = CTX_wm_manager(C);
blender::geometry::UVPackIsland_Params &pack_island_params = pid->pack_island_params;
{
/* Call default constructor and copy the defaults. */
blender::geometry::UVPackIsland_Params default_params;
pack_island_params = default_params;
}
pack_island_params.setFromUnwrapOptions(options);
pack_island_params.rotate = RNA_boolean_get(op->ptr, "rotate");
pack_island_params.scale_to_fit = RNA_boolean_get(op->ptr, "scale");
@ -1416,15 +1521,31 @@ static int pack_islands_exec(bContext *C, wmOperator *op)
pack_island_params.setUDIMOffsetFromSpaceImage(sima);
}
uvedit_pack_islands_multi(scene,
objects,
objects_len,
nullptr,
(udim_source == PACK_UDIM_SRC_CLOSEST) ? sima : nullptr,
(udim_source == PACK_ORIGINAL_AABB),
&pack_island_params);
if (pid->use_job) {
/* Setup job. */
if (pid->wm->op_undo_depth == 0) {
/* The job must do it's own undo push. */
pid->undo_context = C;
pid->undo_str = op->type->name;
}
MEM_freeN(objects);
wmJob *wm_job = WM_jobs_get(
pid->wm, CTX_wm_window(C), scene, "Packing UVs", WM_JOB_PROGRESS, WM_JOB_TYPE_UV_PACK);
WM_jobs_customdata_set(wm_job, pid, pack_islands_freejob);
WM_jobs_timer(wm_job, 0.1, 0, 0);
WM_set_locked_interface(pid->wm, true);
WM_jobs_callbacks(wm_job, pack_islands_startjob, nullptr, nullptr, pack_islands_endjob);
WM_cursor_wait(true);
G.is_break = false;
WM_jobs_start(CTX_wm_manager(C), wm_job);
return OPERATOR_FINISHED;
}
pack_islands_startjob(pid, nullptr, nullptr, nullptr);
pack_islands_endjob(pid);
MEM_freeN(pid);
return OPERATOR_FINISHED;
}
@ -1452,7 +1573,7 @@ static const EnumPropertyItem pack_shape_method_items[] = {
};
static const EnumPropertyItem pinned_islands_method_items[] = {
{ED_UVPACK_PIN_NORMAL, "NORMAL", 0, "Normal", "Pin information is not used"},
{ED_UVPACK_PIN_DEFAULT, "DEFAULT", 0, "Default", "Pin information is not used"},
{ED_UVPACK_PIN_IGNORED, "IGNORED", 0, "Ignored", "Pinned islands are not packed"},
{ED_UVPACK_PIN_LOCK_SCALE, "SCALE", 0, "Locked scale", "Pinned islands won't rescale"},
{ED_UVPACK_PIN_LOCK_ROTATION, "ROTATION", 0, "Locked rotation", "Pinned islands won't rotate"},
@ -1487,10 +1608,21 @@ void UV_OT_pack_islands(wmOperatorType *ot)
ot->description =
"Transform all islands so that they fill up the UV/UDIM space as much as possible";
#ifdef USE_INTERACTIVE_PACK
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
#else
/* The operator will handle undo, so the job system can push() it after the job completes. */
ot->flag = OPTYPE_REGISTER;
#endif
/* api callbacks */
ot->exec = pack_islands_exec;
#ifdef USE_INTERACTIVE_PACK
ot->invoke = WM_operator_props_popup_call;
#else
ot->invoke = WM_operator_props_popup_confirm;
#endif
ot->poll = ED_operator_uvedit;
/* properties */
@ -1510,7 +1642,7 @@ void UV_OT_pack_islands(wmOperatorType *ot)
RNA_def_enum(ot->srna,
"pin_method",
pinned_islands_method_items,
ED_UVPACK_PIN_NORMAL,
ED_UVPACK_PIN_DEFAULT,
"Pinned Islands",
"");
RNA_def_enum(ot->srna,
@ -2234,7 +2366,7 @@ void ED_uvedit_live_unwrap(const Scene *scene, Object **objects, int objects_len
pack_island_params.margin = scene->toolsettings->uvcalc_margin;
uvedit_pack_islands_multi(
scene, objects, objects_len, nullptr, nullptr, false, &pack_island_params);
scene, objects, objects_len, nullptr, nullptr, false, true, &pack_island_params);
}
}
@ -2377,7 +2509,7 @@ static int unwrap_exec(bContext *C, wmOperator *op)
pack_island_params.margin = RNA_float_get(op->ptr, "margin");
uvedit_pack_islands_multi(
scene, objects, objects_len, nullptr, nullptr, false, &pack_island_params);
scene, objects, objects_len, nullptr, nullptr, false, true, &pack_island_params);
MEM_freeN(objects);
@ -2759,7 +2891,7 @@ static int smart_project_exec(bContext *C, wmOperator *op)
params.margin = RNA_float_get(op->ptr, "island_margin");
uvedit_pack_islands_multi(
scene, objects_changed, object_changed_len, nullptr, nullptr, false, &params);
scene, objects_changed, object_changed_len, nullptr, nullptr, false, true, &params);
/* #uvedit_pack_islands_multi only supports `per_face_aspect = false`. */
const bool per_face_aspect = false;
@ -3747,7 +3879,7 @@ void ED_uvedit_add_simple_uvs(Main *bmain, const Scene *scene, Object *ob)
params.margin_method = ED_UVPACK_MARGIN_SCALED;
params.margin = 0.001f;
uvedit_pack_islands_multi(scene, &ob, 1, &bm, nullptr, false, &params);
uvedit_pack_islands_multi(scene, &ob, 1, &bm, nullptr, false, true, &params);
/* Write back from BMesh to Mesh. */
BMeshToMeshParams bm_to_me_params{};

View File

@ -37,7 +37,7 @@ enum eUVPackIsland_ShapeMethod {
enum eUVPackIsland_PinMethod {
ED_UVPACK_PIN_IGNORED = 0,
ED_UVPACK_PIN_NORMAL,
ED_UVPACK_PIN_DEFAULT,
ED_UVPACK_PIN_LOCK_ROTATION,
ED_UVPACK_PIN_LOCK_ROTATION_SCALE,
ED_UVPACK_PIN_LOCK_SCALE,
@ -55,6 +55,7 @@ class UVPackIsland_Params {
void setFromUnwrapOptions(const UnwrapOptions &options);
void setUDIMOffsetFromSpaceImage(const SpaceImage *sima);
bool isCancelled() const;
/** Islands can be rotated to improve packing. */
bool rotate;
@ -84,6 +85,12 @@ class UVPackIsland_Params {
float target_aspect_y;
/** Which shape to use when packing. */
eUVPackIsland_ShapeMethod shape_method;
/** Abandon packing early when set by the job system. */
bool *stop;
bool *do_update;
/** How much progress we have made. From wmJob. */
float *progress;
};
class uv_phi;

View File

@ -18,14 +18,6 @@ static inline bool naive_edges_equal(const int2 &edge1, const int2 &edge2)
return edge1 == edge2;
}
template<typename T>
static void copy_to_new_verts(MutableSpan<T> data, const Span<int> new_to_old_verts_map)
{
const Span<T> old_data = data.drop_back(new_to_old_verts_map.size());
MutableSpan<T> new_data = data.take_back(new_to_old_verts_map.size());
array_utils::gather(old_data, new_to_old_verts_map, new_data);
}
static void add_new_vertices(Mesh &mesh, const Span<int> new_to_old_verts_map)
{
/* These types aren't supported for interpolation below. */
@ -46,22 +38,26 @@ static void add_new_vertices(Mesh &mesh, const Span<int> new_to_old_verts_map)
continue;
}
bke::attribute_math::convert_to_static_type(attribute.span.type(), [&](auto dummy) {
using T = decltype(dummy);
copy_to_new_verts(attribute.span.typed<T>(), new_to_old_verts_map);
});
bke::attribute_math::gather(attribute.span,
new_to_old_verts_map,
attribute.span.take_back(new_to_old_verts_map.size()));
attribute.finish();
}
if (float3 *orco = static_cast<float3 *>(
CustomData_get_layer_for_write(&mesh.vdata, CD_ORCO, mesh.totvert)))
{
copy_to_new_verts<float3>({orco, mesh.totvert}, new_to_old_verts_map);
array_utils::gather(Span(orco, mesh.totvert),
new_to_old_verts_map,
MutableSpan(orco, mesh.totvert).take_back(new_to_old_verts_map.size()));
}
if (int *orig_indices = static_cast<int *>(
CustomData_get_layer_for_write(&mesh.vdata, CD_ORIGINDEX, mesh.totvert)))
{
copy_to_new_verts<int>({orig_indices, mesh.totvert}, new_to_old_verts_map);
array_utils::gather(
Span(orig_indices, mesh.totvert),
new_to_old_verts_map,
MutableSpan(orig_indices, mesh.totvert).take_back(new_to_old_verts_map.size()));
}
}
@ -119,12 +115,8 @@ static void add_new_edges(Mesh &mesh,
const CPPType &type = attribute.varray.type();
void *new_data = MEM_malloc_arrayN(new_edges.size(), type.size(), __func__);
bke::attribute_math::convert_to_static_type(type, [&](auto dummy) {
using T = decltype(dummy);
const VArray<T> src = attribute.varray.typed<T>();
MutableSpan<T> dst(static_cast<T *>(new_data), new_edges.size());
array_utils::gather(src, new_to_old_edges_map, dst);
});
bke::attribute_math::gather(
attribute.varray, new_to_old_edges_map, GMutableSpan(type, new_data, new_edges.size()));
/* Free the original attribute as soon as possible to lower peak memory usage. */
attributes.remove(local_id);

View File

@ -51,21 +51,19 @@ BLI_NOINLINE bke::CurvesGeometry create_curve_from_vert_indices(
continue;
}
const GVArray mesh_attribute = *mesh_attributes.lookup(attribute_id, ATTR_DOMAIN_POINT);
const GVArray src = *mesh_attributes.lookup(attribute_id, ATTR_DOMAIN_POINT);
/* Some attributes might not exist if they were builtin attribute on domains that don't
* have any elements, i.e. a face attribute on the output of the line primitive node. */
if (!mesh_attribute) {
if (!src) {
continue;
}
const eCustomDataType type = bke::cpp_type_to_custom_data_type(src.type());
/* Copy attribute based on the map for this curve. */
bke::attribute_math::convert_to_static_type(mesh_attribute.type(), [&](auto dummy) {
using T = decltype(dummy);
bke::SpanAttributeWriter<T> attribute =
curves_attributes.lookup_or_add_for_write_only_span<T>(attribute_id, ATTR_DOMAIN_POINT);
array_utils::gather<T>(mesh_attribute.typed<T>(), vert_indices, attribute.span);
attribute.finish();
});
bke::GSpanAttributeWriter dst = curves_attributes.lookup_or_add_for_write_only_span(
attribute_id, ATTR_DOMAIN_POINT, type);
bke::attribute_math::gather(src, vert_indices, dst.span);
dst.finish();
}
return curves;

View File

@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_kdtree.h"
#include "BLI_offset_indices.hh"
#include "BLI_task.hh"
#include "DNA_pointcloud_types.h"
@ -80,27 +81,25 @@ PointCloud *point_merge_by_distance(const PointCloud &src_points,
}
/* This array stores an offset into `merge_map` for every result point. */
Array<int> map_offsets(dst_size + 1);
Array<int> map_offsets_data(dst_size + 1);
int offset = 0;
for (const int i : IndexRange(dst_size)) {
map_offsets[i] = offset;
map_offsets_data[i] = offset;
offset += point_merge_counts[i];
}
map_offsets.last() = offset;
map_offsets_data.last() = offset;
OffsetIndices<int> map_offsets(map_offsets_data);
point_merge_counts.fill(0);
/* This array stores all of the source indices for every result point. The size is the source
* size because every input point is either merged with another or copied directly. */
Array<int> merge_map(src_size);
Array<int> merge_map_indices(src_size);
for (const int i : IndexRange(src_size)) {
const int merge_index = merge_indices[i];
const int dst_index = src_to_dst_indices[merge_index];
const IndexRange points(map_offsets[dst_index],
map_offsets[dst_index + 1] - map_offsets[dst_index]);
MutableSpan<int> point_merge_indices = merge_map.as_mutable_span().slice(points);
point_merge_indices[point_merge_counts[dst_index]] = i;
merge_map_indices[map_offsets[dst_index].first() + point_merge_counts[dst_index]] = i;
point_merge_counts[dst_index]++;
}
@ -114,8 +113,7 @@ PointCloud *point_merge_by_distance(const PointCloud &src_points,
threading::parallel_for(IndexRange(dst_size), 1024, [&](IndexRange range) {
for (const int i_dst : range) {
const IndexRange points(map_offsets[i_dst], map_offsets[i_dst + 1] - map_offsets[i_dst]);
dst.span[i_dst] = src[points.first()];
dst.span[i_dst] = src[map_offsets[i_dst].first()];
}
});
@ -143,9 +141,7 @@ PointCloud *point_merge_by_distance(const PointCloud &src_points,
* in the mixer the size of the result point cloud and to improve memory locality. */
bke::attribute_math::DefaultMixer<T> mixer{dst_attribute.span.slice(i_dst, 1)};
const IndexRange points(map_offsets[i_dst],
map_offsets[i_dst + 1] - map_offsets[i_dst]);
Span<int> src_merge_indices = merge_map.as_span().slice(points);
Span<int> src_merge_indices = merge_map_indices.as_span().slice(map_offsets[i_dst]);
for (const int i_src : src_merge_indices) {
mixer.mix_in(0, src[i_src]);
}

View File

@ -6,6 +6,8 @@
#include "GEO_uv_pack.hh"
#include "BKE_global.h"
#include "BLI_array.hh"
#include "BLI_bounds.hh"
#include "BLI_boxpack_2d.h"
@ -329,7 +331,7 @@ UVPackIsland_Params::UVPackIsland_Params()
only_selected_faces = false;
use_seams = false;
correct_aspect = false;
pin_method = ED_UVPACK_PIN_NORMAL;
pin_method = ED_UVPACK_PIN_DEFAULT;
pin_unselected = false;
merge_overlap = false;
margin = 0.001f;
@ -338,6 +340,9 @@ UVPackIsland_Params::UVPackIsland_Params()
udim_base_offset[1] = 0.0f;
target_aspect_y = 1.0f;
shape_method = ED_UVPACK_SHAPE_AABB;
stop = nullptr;
do_update = nullptr;
progress = nullptr;
}
/* Compact representation for AABB packers. */
@ -1200,6 +1205,13 @@ static int64_t pack_island_xatlas(const Span<UVAABBIsland *> island_indices,
while (i < island_indices.size()) {
if (params.stop && G.is_break) {
*params.stop = true;
}
if (params.isCancelled()) {
break;
}
while (traced_islands < i) {
/* Trace an island that's been solved. (Greedy.) */
const int64_t island_index = island_indices[traced_islands]->index;
@ -1308,17 +1320,26 @@ static int64_t pack_island_xatlas(const Span<UVAABBIsland *> island_indices,
else {
scan_line = std::max(0, scan_line - 25); /* `-25` must by odd. */
}
if (params.progress) {
/* We don't (yet) have a good model for how long the pack operation is going
* to take, so just update the progress a little bit. */
const float previous_progress = *params.progress;
*params.do_update = true;
const float reduction = island_indices.size() / (island_indices.size() + 0.5f);
*params.progress = 1.0f - (1.0f - previous_progress) * reduction;
}
}
if (!is_larger(*r_extent, extent, params)) {
return 0;
}
*r_extent = extent;
for (const int64_t i : phis.index_range()) {
const int64_t island_index = island_indices[i]->index;
for (int64_t j = 0; j < i; j++) {
const int64_t island_index = island_indices[j]->index;
r_phis[island_index] = phis[island_index];
}
return phis.size();
return i;
}
/**
@ -1434,7 +1455,7 @@ static float pack_islands_scale_margin(const Span<PackIsland *> islands,
alpaca_cutoff = alpaca_cutoff_fast;
}
}
const int64_t max_box_pack = std::min(alpaca_cutoff, islands.size());
int64_t max_box_pack = std::min(alpaca_cutoff, islands.size());
rctf extent = {0.0f, 1e30f, 0.0f, 1e30f};
@ -1460,13 +1481,13 @@ static float pack_islands_scale_margin(const Span<PackIsland *> islands,
switch (params.shape_method) {
case ED_UVPACK_SHAPE_CONVEX:
case ED_UVPACK_SHAPE_CONCAVE:
pack_island_xatlas(aabbs.as_span().take_front(max_box_pack),
islands,
scale,
margin,
params,
r_phis,
&extent);
max_box_pack = pack_island_xatlas(aabbs.as_span().take_front(max_box_pack),
islands,
scale,
margin,
params,
r_phis,
&extent);
break;
default:
break;

View File

@ -212,14 +212,17 @@ set(VULKAN_SRC
vulkan/vk_drawlist.cc
vulkan/vk_fence.cc
vulkan/vk_framebuffer.cc
vulkan/vk_immediate.cc
vulkan/vk_index_buffer.cc
vulkan/vk_memory.cc
vulkan/vk_memory_layout.cc
vulkan/vk_pipeline_state.cc
vulkan/vk_pipeline.cc
vulkan/vk_pixel_buffer.cc
vulkan/vk_push_constants.cc
vulkan/vk_query.cc
vulkan/vk_resource_tracker.cc
vulkan/vk_sampler.cc
vulkan/vk_shader.cc
vulkan/vk_shader_interface.cc
vulkan/vk_shader_log.cc
@ -227,6 +230,7 @@ set(VULKAN_SRC
vulkan/vk_storage_buffer.cc
vulkan/vk_texture.cc
vulkan/vk_uniform_buffer.cc
vulkan/vk_vertex_attribute_object.cc
vulkan/vk_vertex_buffer.cc
vulkan/vk_backend.hh
@ -243,14 +247,17 @@ set(VULKAN_SRC
vulkan/vk_drawlist.hh
vulkan/vk_fence.hh
vulkan/vk_framebuffer.hh
vulkan/vk_immediate.hh
vulkan/vk_index_buffer.hh
vulkan/vk_memory.hh
vulkan/vk_memory_layout.hh
vulkan/vk_pipeline_state.hh
vulkan/vk_pipeline.hh
vulkan/vk_pixel_buffer.hh
vulkan/vk_push_constants.hh
vulkan/vk_query.hh
vulkan/vk_resource_tracker.hh
vulkan/vk_sampler.hh
vulkan/vk_shader.hh
vulkan/vk_shader_interface.hh
vulkan/vk_shader_log.hh
@ -258,6 +265,7 @@ set(VULKAN_SRC
vulkan/vk_storage_buffer.hh
vulkan/vk_texture.hh
vulkan/vk_uniform_buffer.hh
vulkan/vk_vertex_attribute_object.hh
vulkan/vk_vertex_buffer.hh
)
@ -848,6 +856,7 @@ if(WITH_GTESTS)
tests/state_blend_test.cc
tests/storage_buffer_test.cc
tests/texture_test.cc
tests/vertex_buffer_test.cc
tests/gpu_testing.hh
)

View File

@ -0,0 +1,116 @@
#include "testing/testing.h"
#include "GPU_framebuffer.h"
#include "GPU_immediate.h"
#include "GPU_shader.h"
#include "GPU_vertex_buffer.h"
#include "GPU_vertex_format.h"
#include "BLI_index_range.hh"
#include "BLI_math_vector_types.hh"
#include "gpu_testing.hh"
namespace blender::gpu::tests {
static constexpr int Size = 256;
template<GPUVertCompType comp_type, GPUVertFetchMode fetch_mode, typename ColorType>
static void vertex_buffer_fetch_mode(ColorType color)
{
GPUOffScreen *offscreen = GPU_offscreen_create(Size,
Size,
false,
GPU_RGBA16F,
GPU_TEXTURE_USAGE_ATTACHMENT |
GPU_TEXTURE_USAGE_HOST_READ,
nullptr);
BLI_assert(offscreen != nullptr);
GPU_offscreen_bind(offscreen, false);
GPUTexture *color_texture = GPU_offscreen_color_texture(offscreen);
GPU_texture_clear(color_texture, GPU_DATA_FLOAT, float4(0.0f));
GPUVertFormat format = {0};
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
GPU_vertformat_attr_add(&format, "color", comp_type, 4, fetch_mode);
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
GPU_vertbuf_data_alloc(vbo, 4);
struct Vert {
float2 pos;
ColorType color;
};
Vert data[4] = {
{float2(-1.0, -1.0), color},
{float2(1.0, -1.0), color},
{float2(1.0, 1.0), color},
{float2(-1.0, 1.0), color},
};
for (int i : IndexRange(4)) {
GPU_vertbuf_vert_set(vbo, i, &data[i]);
}
GPUBatch *batch = GPU_batch_create(GPU_PRIM_TRI_FAN, vbo, NULL);
GPU_batch_program_set_builtin(batch, GPU_SHADER_3D_FLAT_COLOR);
GPU_batch_draw(batch);
GPU_offscreen_unbind(offscreen, false);
GPU_flush();
/* Read back data and perform some basic tests. */
float read_data[4 * Size * Size];
GPU_offscreen_read_color(offscreen, GPU_DATA_FLOAT, &read_data);
for (int pixel_index = 0; pixel_index < Size * Size; pixel_index++) {
float4 read_color = float4(&read_data[pixel_index * 4]);
EXPECT_EQ(read_color, float4(color));
}
GPU_batch_discard(batch);
GPU_vertbuf_discard(vbo);
GPU_offscreen_free(offscreen);
}
static void test_vertex_buffer_fetch_mode__GPU_COMP_I8__GPU_FETCH_INT_TO_FLOAT()
{
vertex_buffer_fetch_mode<GPU_COMP_I8, GPU_FETCH_INT_TO_FLOAT, char4>(char4(4, 5, 6, 1));
}
GPU_TEST(vertex_buffer_fetch_mode__GPU_COMP_I8__GPU_FETCH_INT_TO_FLOAT);
static void test_vertex_buffer_fetch_mode__GPU_COMP_U8__GPU_FETCH_INT_TO_FLOAT()
{
vertex_buffer_fetch_mode<GPU_COMP_U8, GPU_FETCH_INT_TO_FLOAT, uchar4>(uchar4(4, 5, 6, 1));
}
GPU_TEST(vertex_buffer_fetch_mode__GPU_COMP_U8__GPU_FETCH_INT_TO_FLOAT);
static void test_vertex_buffer_fetch_mode__GPU_COMP_I16__GPU_FETCH_INT_TO_FLOAT()
{
vertex_buffer_fetch_mode<GPU_COMP_I16, GPU_FETCH_INT_TO_FLOAT, short4>(short4(4, 5, 6, 1));
}
GPU_TEST(vertex_buffer_fetch_mode__GPU_COMP_I16__GPU_FETCH_INT_TO_FLOAT);
static void test_vertex_buffer_fetch_mode__GPU_COMP_U16__GPU_FETCH_INT_TO_FLOAT()
{
vertex_buffer_fetch_mode<GPU_COMP_U16, GPU_FETCH_INT_TO_FLOAT, ushort4>(ushort4(4, 5, 6, 1));
}
GPU_TEST(vertex_buffer_fetch_mode__GPU_COMP_U16__GPU_FETCH_INT_TO_FLOAT);
static void test_vertex_buffer_fetch_mode__GPU_COMP_I32__GPU_FETCH_INT_TO_FLOAT()
{
vertex_buffer_fetch_mode<GPU_COMP_I32, GPU_FETCH_INT_TO_FLOAT, int4>(int4(4, 5, 6, 1));
}
GPU_TEST(vertex_buffer_fetch_mode__GPU_COMP_I32__GPU_FETCH_INT_TO_FLOAT);
static void test_vertex_buffer_fetch_mode__GPU_COMP_U32__GPU_FETCH_INT_TO_FLOAT()
{
vertex_buffer_fetch_mode<GPU_COMP_U32, GPU_FETCH_INT_TO_FLOAT, uint4>(uint4(4, 5, 6, 1));
}
GPU_TEST(vertex_buffer_fetch_mode__GPU_COMP_U32__GPU_FETCH_INT_TO_FLOAT);
static void test_vertex_buffer_fetch_mode__GPU_COMP_F32__GPU_FETCH_FLOAT()
{
vertex_buffer_fetch_mode<GPU_COMP_F32, GPU_FETCH_FLOAT, float4>(float4(4, 5, 6, 1));
}
GPU_TEST(vertex_buffer_fetch_mode__GPU_COMP_F32__GPU_FETCH_FLOAT);
} // namespace blender::gpu::tests

View File

@ -64,17 +64,8 @@ void VKBackend::samplers_update() {}
void VKBackend::compute_dispatch(int groups_x_len, int groups_y_len, int groups_z_len)
{
VKContext &context = *VKContext::get();
VKShader *shader = static_cast<VKShader *>(context.shader);
context.bind_compute_pipeline();
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKPipeline &pipeline = shader->pipeline_get();
VKDescriptorSetTracker &descriptor_set = pipeline.descriptor_set_get();
VKPushConstants &push_constants = pipeline.push_constants_get();
push_constants.update(context);
descriptor_set.update(context);
command_buffer.bind(*descriptor_set.active_descriptor_set(),
shader->vk_pipeline_layout_get(),
VK_PIPELINE_BIND_POINT_COMPUTE);
command_buffer.dispatch(groups_x_len, groups_y_len, groups_z_len);
}

View File

@ -7,9 +7,45 @@
#include "vk_batch.hh"
#include "vk_context.hh"
#include "vk_index_buffer.hh"
#include "vk_vertex_attribute_object.hh"
#include "vk_vertex_buffer.hh"
namespace blender::gpu {
void VKBatch::draw(int /*v_first*/, int /*v_count*/, int /*i_first*/, int /*i_count*/) {}
void VKBatch::draw(int vertex_first, int vertex_count, int instance_first, int instance_count)
{
/* Currently the pipeline is rebuild on each draw command. Clearing the dirty flag for
* consistency with the internals of GPU module. */
flag &= ~GPU_BATCH_DIRTY;
/* Finalize graphics pipeline */
VKContext &context = *VKContext::get();
context.state_manager->apply_state();
VKVertexAttributeObject vao;
vao.update_bindings(context, *this);
context.bind_graphics_pipeline(prim_type, vao);
/* Bind geometry resources. */
vao.bind(context);
VKIndexBuffer *index_buffer = index_buffer_get();
const bool draw_indexed = index_buffer != nullptr;
if (draw_indexed) {
index_buffer->upload_data();
index_buffer->bind(context);
context.command_buffer_get().draw(index_buffer->index_len_get(),
instance_count,
index_buffer->index_start_get(),
vertex_first,
instance_first);
}
else {
context.command_buffer_get().draw(vertex_first, vertex_count, instance_first, instance_count);
}
context.command_buffer_get().submit();
}
void VKBatch::draw_indirect(GPUStorageBuf * /*indirect_buf*/, intptr_t /*offset*/) {}
@ -20,4 +56,19 @@ void VKBatch::multi_draw_indirect(GPUStorageBuf * /*indirect_buf*/,
{
}
VKVertexBuffer *VKBatch::vertex_buffer_get(int index)
{
return unwrap(verts_(index));
}
VKVertexBuffer *VKBatch::instance_buffer_get(int index)
{
return unwrap(inst_(index));
}
VKIndexBuffer *VKBatch::index_buffer_get()
{
return unwrap(unwrap(elem));
}
} // namespace blender::gpu

View File

@ -10,15 +10,21 @@
#include "gpu_batch_private.hh"
namespace blender::gpu {
class VKVertexBuffer;
class VKIndexBuffer;
class VKBatch : public Batch {
public:
void draw(int v_first, int v_count, int i_first, int i_count) override;
void draw(int vertex_first, int vertex_count, int instance_first, int instance_count) override;
void draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset) override;
void multi_draw_indirect(GPUStorageBuf *indirect_buf,
int count,
intptr_t offset,
intptr_t stride) override;
VKVertexBuffer *vertex_buffer_get(int index);
VKVertexBuffer *instance_buffer_get(int index);
VKIndexBuffer *index_buffer_get();
};
} // namespace blender::gpu

View File

@ -61,4 +61,17 @@ class VKBuffer {
void unmap();
};
/**
* Helper struct to enable buffers to be bound with an offset.
*
* VKImmediate mode uses a single VKBuffer with multiple vertex layouts. Those layouts are send to
* the command buffer containing an offset.
*
* VKIndexBuffer uses this when it is a subrange of another buffer.
*/
struct VKBufferWithOffset {
VKBuffer &buffer;
VkDeviceSize offset;
};
} // namespace blender::gpu

View File

@ -38,6 +38,13 @@ void VKCommandBuffer::init(const VkDevice vk_device,
submission_id_.reset();
state.stage = Stage::Initial;
/* When a the last GHOST context is destroyed the device is deallocate. A moment later the GPU
* context is destroyed. The first step is to activate it. Activating would retrieve the device
* from GHOST which in that case is a VK_NULL_HANDLE.*/
if (vk_device == VK_NULL_HANDLE) {
return;
}
if (vk_fence_ == VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
VkFenceCreateInfo fenceInfo{};
@ -95,6 +102,11 @@ void VKCommandBuffer::bind(const uint32_t binding,
bind(binding, vertex_buffer.vk_handle(), offset);
}
void VKCommandBuffer::bind(const uint32_t binding, const VKBufferWithOffset &vertex_buffer)
{
bind(binding, vertex_buffer.buffer.vk_handle(), vertex_buffer.offset);
}
void VKCommandBuffer::bind(const uint32_t binding,
const VkBuffer &vk_vertex_buffer,
const VkDeviceSize offset)
@ -104,12 +116,12 @@ void VKCommandBuffer::bind(const uint32_t binding,
vkCmdBindVertexBuffers(vk_command_buffer_, binding, 1, &vk_vertex_buffer, &offset);
}
void VKCommandBuffer::bind(const VKIndexBuffer &index_buffer, VkIndexType index_type)
void VKCommandBuffer::bind(const VKBufferWithOffset &index_buffer, VkIndexType index_type)
{
validate_framebuffer_exists();
ensure_active_framebuffer();
VkBuffer vk_buffer = index_buffer.vk_handle();
vkCmdBindIndexBuffer(vk_command_buffer_, vk_buffer, 0, index_type);
vkCmdBindIndexBuffer(
vk_command_buffer_, index_buffer.buffer.vk_handle(), index_buffer.offset, index_type);
}
void VKCommandBuffer::begin_render_pass(const VKFrameBuffer &framebuffer)

View File

@ -14,6 +14,7 @@
namespace blender::gpu {
class VKBuffer;
struct VKBufferWithOffset;
class VKDescriptorSet;
class VKFrameBuffer;
class VKIndexBuffer;
@ -141,8 +142,10 @@ class VKCommandBuffer : NonCopyable, NonMovable {
const VKVertexBuffer &vertex_buffer,
const VkDeviceSize offset);
/* Bind the given buffer as a vertex buffer. */
void bind(const uint32_t binding, const VKBufferWithOffset &vertex_buffer);
void bind(const uint32_t binding, const VkBuffer &vk_vertex_buffer, const VkDeviceSize offset);
void bind(const VKIndexBuffer &index_buffer, VkIndexType index_type);
/* Bind the given buffer as an index buffer. */
void bind(const VKBufferWithOffset &index_buffer, VkIndexType index_type);
void begin_render_pass(const VKFrameBuffer &framebuffer);
void end_render_pass(const VKFrameBuffer &framebuffer);

View File

@ -241,7 +241,7 @@ VkFormat to_vk_format(const eGPUTextureFormat format)
return VK_FORMAT_UNDEFINED;
}
VkFormat to_vk_format(const GPUVertCompType type, const uint32_t size)
static VkFormat to_vk_format_norm(const GPUVertCompType type, const uint32_t size)
{
switch (type) {
case GPU_COMP_I8:
@ -254,8 +254,11 @@ VkFormat to_vk_format(const GPUVertCompType type, const uint32_t size)
return VK_FORMAT_R8G8B8_SNORM;
case 4:
return VK_FORMAT_R8G8B8A8_SNORM;
case 16:
return VK_FORMAT_R8G8B8A8_SNORM;
default:
break;
BLI_assert_unreachable();
return VK_FORMAT_R8_SNORM;
}
break;
@ -269,8 +272,11 @@ VkFormat to_vk_format(const GPUVertCompType type, const uint32_t size)
return VK_FORMAT_R8G8B8_UNORM;
case 4:
return VK_FORMAT_R8G8B8A8_UNORM;
case 16:
return VK_FORMAT_R8G8B8A8_UNORM;
default:
break;
BLI_assert_unreachable();
return VK_FORMAT_R8_UNORM;
}
break;
@ -285,7 +291,8 @@ VkFormat to_vk_format(const GPUVertCompType type, const uint32_t size)
case 8:
return VK_FORMAT_R16G16B16A16_SNORM;
default:
break;
BLI_assert_unreachable();
return VK_FORMAT_R16_SNORM;
}
break;
@ -300,39 +307,102 @@ VkFormat to_vk_format(const GPUVertCompType type, const uint32_t size)
case 8:
return VK_FORMAT_R16G16B16A16_UNORM;
default:
break;
BLI_assert_unreachable();
return VK_FORMAT_R16_UNORM;
}
break;
case GPU_COMP_I10:
BLI_assert(size == 4);
return VK_FORMAT_A2B10G10R10_SNORM_PACK32;
case GPU_COMP_I32:
switch (size) {
case 4:
return VK_FORMAT_R32_SINT;
case 8:
return VK_FORMAT_R32G32_SINT;
case 12:
return VK_FORMAT_R32G32B32_SINT;
case 16:
return VK_FORMAT_R32G32B32A32_SINT;
default:
break;
}
break;
case GPU_COMP_U32:
case GPU_COMP_F32:
default:
break;
}
BLI_assert_unreachable();
return VK_FORMAT_R32_SFLOAT;
}
static VkFormat to_vk_format_float(const GPUVertCompType type, const uint32_t size)
{
switch (type) {
case GPU_COMP_I8:
switch (size) {
case 1:
return VK_FORMAT_R8_SSCALED;
case 2:
return VK_FORMAT_R8G8_SSCALED;
case 3:
return VK_FORMAT_R8G8B8_SSCALED;
case 4:
return VK_FORMAT_R8G8B8A8_SSCALED;
default:
BLI_assert_unreachable();
return VK_FORMAT_R8_SSCALED;
}
case GPU_COMP_U8:
switch (size) {
case 1:
return VK_FORMAT_R8_USCALED;
case 2:
return VK_FORMAT_R8G8_USCALED;
case 3:
return VK_FORMAT_R8G8B8_USCALED;
case 4:
return VK_FORMAT_R8G8B8A8_USCALED;
default:
BLI_assert_unreachable();
return VK_FORMAT_R8_USCALED;
}
case GPU_COMP_I16:
switch (size) {
case 2:
return VK_FORMAT_R16_SSCALED;
case 4:
return VK_FORMAT_R16G16_SSCALED;
case 6:
return VK_FORMAT_R16G16B16_SSCALED;
case 8:
return VK_FORMAT_R16G16B16A16_SSCALED;
default:
BLI_assert_unreachable();
return VK_FORMAT_R16_SSCALED;
}
case GPU_COMP_U16:
switch (size) {
case 2:
return VK_FORMAT_R16_USCALED;
case 4:
return VK_FORMAT_R16G16_USCALED;
case 6:
return VK_FORMAT_R16G16B16_USCALED;
case 8:
return VK_FORMAT_R16G16B16A16_USCALED;
default:
BLI_assert_unreachable();
return VK_FORMAT_R16_USCALED;
}
case GPU_COMP_I32:
case GPU_COMP_U32:
/* NOTE: GPU_COMP_I32/U32 using GPU_FETCH_INT_TO_FLOAT isn't natively supported. These are
* converted on host-side to signed floats. */
switch (size) {
case 4:
return VK_FORMAT_R32_UINT;
return VK_FORMAT_R32_SFLOAT;
case 8:
return VK_FORMAT_R32G32_UINT;
return VK_FORMAT_R32G32_SFLOAT;
case 12:
return VK_FORMAT_R32G32B32_UINT;
return VK_FORMAT_R32G32B32_SFLOAT;
case 16:
return VK_FORMAT_R32G32B32A32_UINT;
return VK_FORMAT_R32G32B32A32_SFLOAT;
default:
break;
BLI_assert_unreachable();
return VK_FORMAT_R32_SFLOAT;
}
break;
case GPU_COMP_F32:
switch (size) {
@ -347,17 +417,165 @@ VkFormat to_vk_format(const GPUVertCompType type, const uint32_t size)
case 64:
return VK_FORMAT_R32G32B32A32_SFLOAT;
default:
break;
BLI_assert_unreachable();
return VK_FORMAT_R32_SFLOAT;
}
case GPU_COMP_I10:
BLI_assert(size == 4);
return VK_FORMAT_A2B10G10R10_SSCALED_PACK32;
default:
break;
}
BLI_assert_unreachable();
return VK_FORMAT_R32_SFLOAT;
}
static VkFormat to_vk_format_int(const GPUVertCompType type, const uint32_t size)
{
switch (type) {
case GPU_COMP_I8:
switch (size) {
case 1:
return VK_FORMAT_R8_SINT;
case 2:
return VK_FORMAT_R8G8_SINT;
case 3:
return VK_FORMAT_R8G8B8_SINT;
case 4:
return VK_FORMAT_R8G8B8A8_SINT;
default:
BLI_assert_unreachable();
return VK_FORMAT_R8_SINT;
}
break;
case GPU_COMP_U8:
switch (size) {
case 1:
return VK_FORMAT_R8_USCALED;
case 2:
return VK_FORMAT_R8G8_USCALED;
case 3:
return VK_FORMAT_R8G8B8_USCALED;
case 4:
return VK_FORMAT_R8G8B8A8_USCALED;
default:
BLI_assert_unreachable();
return VK_FORMAT_R8_USCALED;
}
break;
case GPU_COMP_I16:
switch (size) {
case 2:
return VK_FORMAT_R16_SINT;
case 4:
return VK_FORMAT_R16G16_SINT;
case 6:
return VK_FORMAT_R16G16B16_SINT;
case 8:
return VK_FORMAT_R16G16B16A16_SINT;
default:
BLI_assert_unreachable();
return VK_FORMAT_R16_SINT;
}
break;
case GPU_COMP_U16:
switch (size) {
case 2:
return VK_FORMAT_R16_USCALED;
case 4:
return VK_FORMAT_R16G16_USCALED;
case 6:
return VK_FORMAT_R16G16B16_USCALED;
case 8:
return VK_FORMAT_R16G16B16A16_USCALED;
default:
BLI_assert_unreachable();
return VK_FORMAT_R16_USCALED;
}
break;
case GPU_COMP_I32:
switch (size) {
case 4:
return VK_FORMAT_R32_SINT;
case 8:
return VK_FORMAT_R32G32_SINT;
case 12:
return VK_FORMAT_R32G32B32_SINT;
case 16:
return VK_FORMAT_R32G32B32A32_SINT;
default:
BLI_assert_unreachable();
return VK_FORMAT_R32_SINT;
}
break;
case GPU_COMP_U32:
switch (size) {
case 4:
return VK_FORMAT_R32_UINT;
case 8:
return VK_FORMAT_R32G32_UINT;
case 12:
return VK_FORMAT_R32G32B32_UINT;
case 16:
return VK_FORMAT_R32G32B32A32_UINT;
default:
BLI_assert_unreachable();
return VK_FORMAT_R32_UINT;
}
break;
case GPU_COMP_F32:
switch (size) {
case 4:
return VK_FORMAT_R32_SINT;
case 8:
return VK_FORMAT_R32G32_SINT;
case 12:
return VK_FORMAT_R32G32B32_SINT;
case 16:
return VK_FORMAT_R32G32B32A32_SINT;
default:
BLI_assert_unreachable();
return VK_FORMAT_R32_SINT;
}
break;
case GPU_COMP_I10:
BLI_assert(size == 4);
return VK_FORMAT_A2B10G10R10_UNORM_PACK32;
return VK_FORMAT_A2B10G10R10_SINT_PACK32;
default:
break;
}
BLI_assert_unreachable();
return VK_FORMAT_R32_SFLOAT;
}
VkFormat to_vk_format(const GPUVertCompType type, const uint32_t size, GPUVertFetchMode fetch_mode)
{
switch (fetch_mode) {
case GPU_FETCH_FLOAT:
case GPU_FETCH_INT_TO_FLOAT:
return to_vk_format_float(type, size);
break;
case GPU_FETCH_INT:
return to_vk_format_int(type, size);
break;
case GPU_FETCH_INT_TO_FLOAT_UNIT:
return to_vk_format_norm(type, size);
break;
default:
break;
}
BLI_assert_unreachable();
return VK_FORMAT_R32_SFLOAT;
}

View File

@ -24,7 +24,9 @@ namespace blender::gpu {
VkImageAspectFlagBits to_vk_image_aspect_flag_bits(const eGPUTextureFormat format);
VkFormat to_vk_format(const eGPUTextureFormat format);
VkFormat to_vk_format(const GPUVertCompType type, const uint32_t size);
VkFormat to_vk_format(const GPUVertCompType type,
const uint32_t size,
const GPUVertFetchMode fetch_mode);
VkComponentMapping to_vk_component_mapping(const eGPUTextureFormat format);
VkImageViewType to_vk_image_view_type(const eGPUTextureType type);
VkImageType to_vk_image_type(const eGPUTextureType type);
@ -95,4 +97,6 @@ template<typename T> VkObjectType to_vk_object_type(T /*vk_obj*/)
return VK_OBJECT_TYPE_UNKNOWN;
}
#define NOT_YET_IMPLEMENTED printf("%s not implemented yet\n", __func__);
} // namespace blender::gpu

View File

@ -9,8 +9,11 @@
#include "vk_backend.hh"
#include "vk_framebuffer.hh"
#include "vk_immediate.hh"
#include "vk_memory.hh"
#include "vk_shader.hh"
#include "vk_state_manager.hh"
#include "vk_texture.hh"
#include "GHOST_C-api.h"
@ -29,6 +32,7 @@ VKContext::VKContext(void *ghost_window, void *ghost_context)
}
state_manager = new VKStateManager();
imm = new VKImmediate();
/* For off-screen contexts. Default frame-buffer is empty. */
VKFrameBuffer *framebuffer = new VKFrameBuffer("back_left");
@ -36,7 +40,11 @@ VKContext::VKContext(void *ghost_window, void *ghost_context)
active_fb = framebuffer;
}
VKContext::~VKContext() {}
VKContext::~VKContext()
{
delete imm;
imm = nullptr;
}
void VKContext::sync_backbuffer()
{
@ -85,9 +93,15 @@ void VKContext::activate()
is_active_ = true;
sync_backbuffer();
immActivate();
}
void VKContext::deactivate() {}
void VKContext::deactivate()
{
immDeactivate();
is_active_ = false;
}
void VKContext::begin_frame()
{
@ -106,9 +120,6 @@ void VKContext::flush()
void VKContext::finish()
{
if (has_active_framebuffer()) {
deactivate_framebuffer();
}
command_buffer_.submit();
}
@ -123,8 +134,17 @@ const VKStateManager &VKContext::state_manager_get() const
return *static_cast<const VKStateManager *>(state_manager);
}
VKStateManager &VKContext::state_manager_get()
{
return *static_cast<VKStateManager *>(state_manager);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Framebuffer
* \{ */
void VKContext::activate_framebuffer(VKFrameBuffer &framebuffer)
{
if (has_active_framebuffer()) {
@ -148,12 +168,47 @@ bool VKContext::has_active_framebuffer() const
void VKContext::deactivate_framebuffer()
{
BLI_assert(active_fb != nullptr);
VKFrameBuffer *framebuffer = active_framebuffer_get();
BLI_assert(framebuffer != nullptr);
if (framebuffer->is_valid()) {
command_buffer_.end_render_pass(*framebuffer);
}
active_fb = nullptr;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Compute pipeline
* \{ */
void VKContext::bind_compute_pipeline()
{
VKShader *shader = unwrap(this->shader);
BLI_assert(shader);
VKPipeline &pipeline = shader->pipeline_get();
pipeline.update_and_bind(
*this, shader->vk_pipeline_layout_get(), VK_PIPELINE_BIND_POINT_COMPUTE);
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Graphics pipeline
* \{ */
void VKContext::bind_graphics_pipeline(const GPUPrimType prim_type,
const VKVertexAttributeObject &vertex_attribute_object)
{
VKShader *shader = unwrap(this->shader);
BLI_assert(shader);
shader->update_graphics_pipeline(*this, prim_type, vertex_attribute_object);
VKPipeline &pipeline = shader->pipeline_get();
pipeline.update_and_bind(
*this, shader->vk_pipeline_layout_get(), VK_PIPELINE_BIND_POINT_GRAPHICS);
}
/** \} */
} // namespace blender::gpu

View File

@ -15,6 +15,8 @@
namespace blender::gpu {
class VKFrameBuffer;
class VKVertexAttributeObject;
class VKBatch;
class VKStateManager;
class VKContext : public Context, NonCopyable {
@ -50,6 +52,9 @@ class VKContext : public Context, NonCopyable {
void deactivate_framebuffer();
VKFrameBuffer *active_framebuffer_get() const;
void bind_compute_pipeline();
void bind_graphics_pipeline(const GPUPrimType prim_type,
const VKVertexAttributeObject &vertex_attribute_object);
void sync_backbuffer();
static VKContext *get(void)
@ -63,6 +68,7 @@ class VKContext : public Context, NonCopyable {
}
const VKStateManager &state_manager_get() const;
VKStateManager &state_manager_get();
};
} // namespace blender::gpu

View File

@ -882,4 +882,56 @@ void convert_device_to_host(void *dst_buffer,
/* \} */
/* -------------------------------------------------------------------- */
/** \name Vertex Attributes
* \{ */
static bool conversion_needed(const GPUVertAttr &vertex_attribute)
{
return (vertex_attribute.fetch_mode == GPU_FETCH_INT_TO_FLOAT &&
ELEM(vertex_attribute.comp_type, GPU_COMP_I32, GPU_COMP_U32));
}
bool conversion_needed(const GPUVertFormat &vertex_format)
{
for (int attr_index : IndexRange(vertex_format.attr_len)) {
const GPUVertAttr &vert_attr = vertex_format.attrs[attr_index];
if (conversion_needed(vert_attr)) {
return true;
}
}
return false;
}
void convert_in_place(void *data, const GPUVertFormat &vertex_format, const uint vertex_len)
{
BLI_assert(vertex_format.deinterleaved == false);
for (int attr_index : IndexRange(vertex_format.attr_len)) {
const GPUVertAttr &vert_attr = vertex_format.attrs[attr_index];
if (!conversion_needed(vert_attr)) {
continue;
}
void *row_data = static_cast<uint8_t *>(data) + vert_attr.offset;
for (int vert_index = 0; vert_index < vertex_len; vert_index++) {
if (vert_attr.comp_type == GPU_COMP_I32) {
for (int component : IndexRange(vert_attr.comp_len)) {
int32_t *component_in = static_cast<int32_t *>(row_data) + component;
float *component_out = static_cast<float *>(row_data) + component;
*component_out = float(*component_in);
}
}
else if (vert_attr.comp_type == GPU_COMP_U32) {
for (int component : IndexRange(vert_attr.comp_len)) {
uint32_t *component_in = static_cast<uint32_t *>(row_data) + component;
float *component_out = static_cast<float *>(row_data) + component;
*component_out = float(*component_in);
}
}
row_data = static_cast<uint8_t *>(row_data) + vertex_format.stride;
}
}
}
/* \} */
} // namespace blender::gpu

View File

@ -72,4 +72,28 @@ void convert_device_to_host(void *dst_buffer,
eGPUDataFormat host_format,
eGPUTextureFormat device_format);
/**
* Are all attributes of the given vertex format natively supported or does conversion needs to
* happen.
*
* \param vertex_format: the vertex format to check if an associated buffer requires conversion
* being done on the host.
*/
bool conversion_needed(const GPUVertFormat &vertex_format);
/**
* Convert the given `data` to contain Vulkan natively supported data formats.
*
* When for an vertex attribute the fetch mode is set to GPU_FETCH_INT_TO_FLOAT and the attribute
* is an int32_t or uint32_t the conversion will be done. Attributes of 16 or 8 bits are supported
* natively and will be done in Vulkan.
*
* \param data: Buffer to convert. Data will be converted in place.
* \param vertex_format: Vertex format of the given data. Attributes that aren't supported will be
* converted to a supported one.
* \param vertex_len: Number of vertices of the given data buffer;
* The number of vertices to convert.
*/
void convert_in_place(void *data, const GPUVertFormat &vertex_format, const uint vertex_len);
}; // namespace blender::gpu

View File

@ -118,6 +118,7 @@ void object_label(VkObjectType vk_object_type, uint64_t object_handle, const cha
const VKDevice &device = VKBackend::get().device_get();
const VKDebuggingTools &debugging_tools = device.debugging_tools_get();
if (debugging_tools.enabled) {
const VKDevice &device = VKBackend::get().device_get();
VkDebugUtilsObjectNameInfoEXT info = {};
info.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT;
info.objectType = vk_object_type;

View File

@ -81,6 +81,7 @@ bool VKDescriptorPools::is_last_pool_active()
std::unique_ptr<VKDescriptorSet> VKDescriptorPools::allocate(
const VkDescriptorSetLayout &descriptor_set_layout)
{
BLI_assert(descriptor_set_layout != VK_NULL_HANDLE);
VkDescriptorSetAllocateInfo allocate_info = {};
VkDescriptorPool pool = active_pool_get();
allocate_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;

View File

@ -7,6 +7,7 @@
#include "vk_descriptor_set.hh"
#include "vk_index_buffer.hh"
#include "vk_sampler.hh"
#include "vk_shader.hh"
#include "vk_storage_buffer.hh"
#include "vk_texture.hh"
@ -27,7 +28,6 @@ VKDescriptorSet::~VKDescriptorSet()
{
if (vk_descriptor_set_ != VK_NULL_HANDLE) {
/* Handle should be given back to the pool. */
BLI_assert(VKContext::get());
VKDevice &device = VKBackend::get().device_;
device.descriptor_pools_get().free(*this);
BLI_assert(vk_descriptor_set_ == VK_NULL_HANDLE);
@ -81,7 +81,17 @@ void VKDescriptorSetTracker::image_bind(VKTexture &texture,
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
binding.vk_image_view = texture.vk_image_view_handle();
binding.texture = &texture;
}
void VKDescriptorSetTracker::bind(VKTexture &texture,
const VKDescriptorSet::Location location,
VKSampler &sampler)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
binding.texture = &texture;
binding.vk_sampler = sampler.vk_handle();
}
VKDescriptorSetTracker::Binding &VKDescriptorSetTracker::ensure_location(
@ -101,6 +111,7 @@ VKDescriptorSetTracker::Binding &VKDescriptorSetTracker::ensure_location(
void VKDescriptorSetTracker::update(VKContext &context)
{
BLI_assert(layout_ != VK_NULL_HANDLE);
tracked_resource_for(context, !bindings_.is_empty());
std::unique_ptr<VKDescriptorSet> &descriptor_set = active_descriptor_set();
VkDescriptorSet vk_descriptor_set = descriptor_set->vk_handle();
@ -132,9 +143,12 @@ void VKDescriptorSetTracker::update(VKContext &context)
if (!binding.is_image()) {
continue;
}
/* When updating the descriptor sets the layout of the texture should already be updated. */
binding.texture->layout_ensure(context, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
VkDescriptorImageInfo image_info = {};
image_info.imageView = binding.vk_image_view;
image_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL;
image_info.sampler = binding.vk_sampler;
image_info.imageView = binding.texture->vk_image_view_handle();
image_info.imageLayout = binding.texture->current_layout_get();
image_infos.append(image_info);
VkWriteDescriptorSet write_descriptor = {};
@ -150,7 +164,6 @@ void VKDescriptorSetTracker::update(VKContext &context)
BLI_assert_msg(image_infos.size() + buffer_infos.size() == descriptor_writes.size(),
"Not all changes have been converted to a write descriptor. Check "
"`Binding::is_buffer` and `Binding::is_image`.");
const VKDevice &device = VKBackend::get().device_get();
vkUpdateDescriptorSets(
device.device_get(), descriptor_writes.size(), descriptor_writes.data(), 0, nullptr);
@ -158,7 +171,7 @@ void VKDescriptorSetTracker::update(VKContext &context)
bindings_.clear();
}
std::unique_ptr<VKDescriptorSet> VKDescriptorSetTracker::create_resource(VKContext &context)
std::unique_ptr<VKDescriptorSet> VKDescriptorSetTracker::create_resource(VKContext & /*context*/)
{
VKDevice &device = VKBackend::get().device_;
return device.descriptor_pools_get().allocate(layout_);

View File

@ -25,6 +25,7 @@ class VKTexture;
class VKUniformBuffer;
class VKVertexBuffer;
class VKDescriptorSetTracker;
class VKSampler;
/**
* In vulkan shader resources (images and buffers) are grouped in descriptor sets.
@ -117,7 +118,8 @@ class VKDescriptorSetTracker : protected VKResourceTracker<VKDescriptorSet> {
VkBuffer vk_buffer = VK_NULL_HANDLE;
VkDeviceSize buffer_size = 0;
VkImageView vk_image_view = VK_NULL_HANDLE;
VKTexture *texture = nullptr;
VkSampler vk_sampler = VK_NULL_HANDLE;
Binding()
{
@ -131,14 +133,17 @@ class VKDescriptorSetTracker : protected VKResourceTracker<VKDescriptorSet> {
bool is_image() const
{
return ELEM(type, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
return ELEM(type,
VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
texture != nullptr;
}
};
private:
/** A list of bindings that needs to be updated. */
Vector<Binding> bindings_;
VkDescriptorSetLayout layout_;
VkDescriptorSetLayout layout_ = VK_NULL_HANDLE;
public:
VKDescriptorSetTracker() {}
@ -149,7 +154,20 @@ class VKDescriptorSetTracker : protected VKResourceTracker<VKDescriptorSet> {
void bind_as_ssbo(VKIndexBuffer &buffer, VKDescriptorSet::Location location);
void bind(VKStorageBuffer &buffer, VKDescriptorSet::Location location);
void bind(VKUniformBuffer &buffer, VKDescriptorSet::Location location);
/* TODO: bind as image */
void image_bind(VKTexture &texture, VKDescriptorSet::Location location);
void bind(VKTexture &texture, VKDescriptorSet::Location location, VKSampler &sampler);
/**
* Some shaders don't need any descriptor sets so we don't need to bind them.
*
* The result of this function determines if the descriptor set has any layout assigned.
* TODO: we might want to make descriptor sets optional for pipelines.
*/
bool has_layout() const
{
return layout_ != VK_NULL_HANDLE;
}
/**
* Update the descriptor set on the device.

View File

@ -5,11 +5,16 @@
* \ingroup gpu
*/
#include "GPU_batch.h"
#include "vk_drawlist.hh"
namespace blender::gpu {
void VKDrawList::append(GPUBatch * /*batch*/, int /*i_first*/, int /*i_count*/) {}
void VKDrawList::append(GPUBatch *batch, int instance_first, int instance_count)
{
GPU_batch_draw_advanced(batch, 0, 0, instance_first, instance_count);
}
void VKDrawList::submit() {}

View File

@ -13,7 +13,7 @@ namespace blender::gpu {
class VKDrawList : public DrawList {
public:
void append(GPUBatch *batch, int i_first, int i_count) override;
void append(GPUBatch *batch, int instance_first, int instance_count) override;
void submit() override;
};

View File

@ -503,8 +503,10 @@ void VKFrameBuffer::render_pass_free()
VK_ALLOCATION_CALLBACKS
const VKDevice &device = VKBackend::get().device_get();
vkDestroyRenderPass(device.device_get(), vk_render_pass_, vk_allocation_callbacks);
vkDestroyFramebuffer(device.device_get(), vk_framebuffer_, vk_allocation_callbacks);
if (device.is_initialized()) {
vkDestroyRenderPass(device.device_get(), vk_render_pass_, vk_allocation_callbacks);
vkDestroyFramebuffer(device.device_get(), vk_framebuffer_, vk_allocation_callbacks);
}
vk_render_pass_ = VK_NULL_HANDLE;
vk_framebuffer_ = VK_NULL_HANDLE;
}

View File

@ -0,0 +1,88 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation */
/** \file
* \ingroup gpu
*
* Mimics old style OpenGL immediate mode drawing.
*/
#include "vk_immediate.hh"
#include "vk_data_conversion.hh"
namespace blender::gpu {
VKImmediate::VKImmediate() {}
VKImmediate::~VKImmediate() {}
uchar *VKImmediate::begin()
{
VKContext &context = *VKContext::get();
const size_t bytes_needed = vertex_buffer_size(&vertex_format, vertex_len);
const bool new_buffer_needed = !has_active_resource() || buffer_bytes_free() < bytes_needed;
std::unique_ptr<VKBuffer> &buffer = tracked_resource_for(context, new_buffer_needed);
current_subbuffer_len_ = bytes_needed;
uchar *data = static_cast<uchar *>(buffer->mapped_memory_get());
return data + subbuffer_offset_get();
}
void VKImmediate::end()
{
BLI_assert_msg(prim_type != GPU_PRIM_NONE, "Illegal state: not between an immBegin/End pair.");
if (vertex_len == 0) {
return;
}
if (conversion_needed(vertex_format)) {
// Slow path
/* Determine the start of the subbuffer. The `vertex_data` attribute changes when new vertices
* are loaded.
*/
uchar *data = static_cast<uchar *>(active_resource()->mapped_memory_get()) +
subbuffer_offset_get();
convert_in_place(data, vertex_format, vertex_len);
}
VKContext &context = *VKContext::get();
BLI_assert(context.shader == unwrap(shader));
context.state_manager->apply_state();
vertex_attributes_.update_bindings(*this);
context.bind_graphics_pipeline(prim_type, vertex_attributes_);
vertex_attributes_.bind(context);
context.command_buffer_get().draw(0, vertex_len, 0, 1);
buffer_offset_ += current_subbuffer_len_;
current_subbuffer_len_ = 0;
}
VkDeviceSize VKImmediate::subbuffer_offset_get()
{
return buffer_offset_;
}
VkDeviceSize VKImmediate::buffer_bytes_free()
{
return active_resource()->size_in_bytes() - subbuffer_offset_get();
}
static VkDeviceSize new_buffer_size(size_t sub_buffer_size)
{
return max_ii(sub_buffer_size, DEFAULT_INTERNAL_BUFFER_SIZE);
}
std::unique_ptr<VKBuffer> VKImmediate::create_resource(VKContext & /*context*/)
{
const size_t bytes_needed = vertex_buffer_size(&vertex_format, vertex_len);
std::unique_ptr<VKBuffer> result = std::make_unique<VKBuffer>();
result->create(new_buffer_size(bytes_needed),
GPU_USAGE_DYNAMIC,
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT));
debug::object_label(result->vk_handle(), "Immediate");
buffer_offset_ = 0;
return result;
}
} // namespace blender::gpu

View File

@ -0,0 +1,51 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation */
/** \file
* \ingroup gpu
*
* Mimics old style OpenGL immediate mode drawing.
*/
#pragma once
#include "MEM_guardedalloc.h"
#include "gpu_immediate_private.hh"
#include "gpu_vertex_format_private.h"
#include "vk_buffer.hh"
#include "vk_context.hh"
#include "vk_mem_alloc.h"
#include "vk_resource_tracker.hh"
#include "vk_vertex_attribute_object.hh"
namespace blender::gpu {
/* Size of internal buffer. */
constexpr size_t DEFAULT_INTERNAL_BUFFER_SIZE = (4 * 1024 * 1024);
class VKImmediate : public Immediate, VKResourceTracker<VKBuffer> {
private:
VKVertexAttributeObject vertex_attributes_;
VkDeviceSize buffer_offset_ = 0;
VkDeviceSize current_subbuffer_len_ = 0;
public:
VKImmediate();
virtual ~VKImmediate();
uchar *begin(void) override;
void end(void) override;
friend class VKVertexAttributeObject;
private:
VkDeviceSize subbuffer_offset_get();
VkDeviceSize buffer_bytes_free();
std::unique_ptr<VKBuffer> create_resource(VKContext &context) override;
};
} // namespace blender::gpu

View File

@ -35,7 +35,7 @@ void VKIndexBuffer::upload_data()
void VKIndexBuffer::bind(VKContext &context)
{
context.command_buffer_get().bind(*this, to_vk_index_type(index_type_));
context.command_buffer_get().bind(buffer_with_offset(), to_vk_index_type(index_type_));
}
void VKIndexBuffer::bind_as_ssbo(uint binding)
@ -61,9 +61,15 @@ void VKIndexBuffer::read(uint32_t *data) const
buffer_.read(data);
}
void VKIndexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*data*/) {}
void VKIndexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*data*/)
{
NOT_YET_IMPLEMENTED
}
void VKIndexBuffer::strip_restart_indices() {}
void VKIndexBuffer::strip_restart_indices()
{
NOT_YET_IMPLEMENTED
}
void VKIndexBuffer::allocate()
{
@ -75,4 +81,16 @@ void VKIndexBuffer::allocate()
debug::object_label(buffer_.vk_handle(), "IndexBuffer");
}
VKBufferWithOffset VKIndexBuffer::buffer_with_offset()
{
VKIndexBuffer *src = unwrap(src_);
VKBufferWithOffset result{is_subrange_ ? src->buffer_ : buffer_, index_start_};
BLI_assert_msg(is_subrange_ || result.offset == 0,
"According to design index_start should always be zero when index buffer isn't "
"a subrange");
return result;
}
} // namespace blender::gpu

View File

@ -35,6 +35,7 @@ class VKIndexBuffer : public IndexBuf {
void strip_restart_indices() override;
void allocate();
void ensure_updated();
VKBufferWithOffset buffer_with_offset();
};
static inline VKIndexBuffer *unwrap(IndexBuf *index_buffer)

View File

@ -7,26 +7,39 @@
#include "vk_pipeline.hh"
#include "vk_backend.hh"
#include "vk_batch.hh"
#include "vk_context.hh"
#include "vk_framebuffer.hh"
#include "vk_memory.hh"
#include "vk_state_manager.hh"
#include "vk_vertex_attribute_object.hh"
namespace blender::gpu {
VKPipeline::VKPipeline(VkDescriptorSetLayout vk_descriptor_set_layout,
VKPushConstants &&push_constants)
: active_vk_pipeline_(VK_NULL_HANDLE),
descriptor_set_(vk_descriptor_set_layout),
push_constants_(std::move(push_constants))
{
}
VKPipeline::VKPipeline(VkPipeline vk_pipeline,
VkDescriptorSetLayout vk_descriptor_set_layout,
VKPushConstants &&push_constants)
: vk_pipeline_(vk_pipeline),
: active_vk_pipeline_(vk_pipeline),
descriptor_set_(vk_descriptor_set_layout),
push_constants_(std::move(push_constants))
{
vk_pipelines_.append(vk_pipeline);
}
VKPipeline::~VKPipeline()
{
VK_ALLOCATION_CALLBACKS
const VKDevice &device = VKBackend::get().device_get();
if (vk_pipeline_ != VK_NULL_HANDLE) {
vkDestroyPipeline(device.device_get(), vk_pipeline_, vk_allocation_callbacks);
for (VkPipeline vk_pipeline : vk_pipelines_) {
vkDestroyPipeline(device.device_get(), vk_pipeline, vk_allocation_callbacks);
}
}
@ -64,14 +77,137 @@ VKPipeline VKPipeline::create_compute_pipeline(
return VKPipeline(vk_pipeline, descriptor_set_layout, std::move(push_constants));
}
VKPipeline VKPipeline::create_graphics_pipeline(
VkDescriptorSetLayout &descriptor_set_layout,
const VKPushConstants::Layout &push_constants_layout)
{
VKPushConstants push_constants(&push_constants_layout);
return VKPipeline(descriptor_set_layout, std::move(push_constants));
}
VkPipeline VKPipeline::vk_handle() const
{
return vk_pipeline_;
return active_vk_pipeline_;
}
bool VKPipeline::is_valid() const
{
return vk_pipeline_ != VK_NULL_HANDLE;
return active_vk_pipeline_ != VK_NULL_HANDLE;
}
void VKPipeline::finalize(VKContext &context,
VkShaderModule vertex_module,
VkShaderModule geometry_module,
VkShaderModule fragment_module,
VkPipelineLayout &pipeline_layout,
const GPUPrimType prim_type,
const VKVertexAttributeObject &vertex_attribute_object)
{
BLI_assert(vertex_module != VK_NULL_HANDLE);
VK_ALLOCATION_CALLBACKS
Vector<VkPipelineShaderStageCreateInfo> pipeline_stages;
VkPipelineShaderStageCreateInfo vertex_stage_info = {};
vertex_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertex_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertex_stage_info.module = vertex_module;
vertex_stage_info.pName = "main";
pipeline_stages.append(vertex_stage_info);
if (geometry_module != VK_NULL_HANDLE) {
VkPipelineShaderStageCreateInfo geometry_stage_info = {};
geometry_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
geometry_stage_info.stage = VK_SHADER_STAGE_GEOMETRY_BIT;
geometry_stage_info.module = geometry_module;
geometry_stage_info.pName = "main";
pipeline_stages.append(geometry_stage_info);
}
if (fragment_module != VK_NULL_HANDLE) {
VkPipelineShaderStageCreateInfo fragment_stage_info = {};
fragment_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragment_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragment_stage_info.module = fragment_module;
fragment_stage_info.pName = "main";
pipeline_stages.append(fragment_stage_info);
}
VKFrameBuffer &framebuffer = *context.active_framebuffer_get();
VkGraphicsPipelineCreateInfo pipeline_create_info = {};
pipeline_create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
pipeline_create_info.stageCount = pipeline_stages.size();
pipeline_create_info.pStages = pipeline_stages.data();
pipeline_create_info.layout = pipeline_layout;
pipeline_create_info.renderPass = framebuffer.vk_render_pass_get();
pipeline_create_info.subpass = 0;
/* Vertex input state. */
VkPipelineVertexInputStateCreateInfo vertex_input_state = {};
vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state.vertexBindingDescriptionCount = vertex_attribute_object.bindings.size();
vertex_input_state.pVertexBindingDescriptions = vertex_attribute_object.bindings.data();
vertex_input_state.vertexAttributeDescriptionCount = vertex_attribute_object.attributes.size();
vertex_input_state.pVertexAttributeDescriptions = vertex_attribute_object.attributes.data();
pipeline_create_info.pVertexInputState = &vertex_input_state;
/* Input assembly state. */
VkPipelineInputAssemblyStateCreateInfo pipeline_input_assembly = {};
pipeline_input_assembly.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
pipeline_input_assembly.topology = to_vk_primitive_topology(prim_type);
pipeline_create_info.pInputAssemblyState = &pipeline_input_assembly;
/* Viewport state. */
VkPipelineViewportStateCreateInfo viewport_state = {};
viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
VkViewport viewport = framebuffer.vk_viewport_get();
viewport_state.pViewports = &viewport;
viewport_state.viewportCount = 1;
VkRect2D scissor = framebuffer.vk_render_area_get();
viewport_state.pScissors = &scissor;
viewport_state.scissorCount = 1;
pipeline_create_info.pViewportState = &viewport_state;
/* Multisample state. */
VkPipelineMultisampleStateCreateInfo multisample_state = {};
multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisample_state.minSampleShading = 1.0f;
pipeline_create_info.pMultisampleState = &multisample_state;
/* States from the state manager. */
const VKPipelineStateManager &state_manager = state_manager_get();
pipeline_create_info.pColorBlendState = &state_manager.pipeline_color_blend_state;
pipeline_create_info.pRasterizationState = &state_manager.rasterization_state;
pipeline_create_info.pDepthStencilState = &state_manager.depth_stencil_state;
const VKDevice &device = VKBackend::get().device_get();
vkCreateGraphicsPipelines(device.device_get(),
VK_NULL_HANDLE,
1,
&pipeline_create_info,
vk_allocation_callbacks,
&active_vk_pipeline_);
/* TODO: we should cache several pipeline instances and detect pipelines we can reuse. This might
* also be done using a VkPipelineCache. For now we just destroy any available pipeline so it
* won't be overwritten by the newly created one. */
vk_pipelines_.append(active_vk_pipeline_);
debug::object_label(active_vk_pipeline_, "GraphicsPipeline");
}
void VKPipeline::update_and_bind(VKContext &context,
VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint vk_pipeline_bind_point)
{
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.bind(*this, vk_pipeline_bind_point);
push_constants_.update(context);
if (descriptor_set_.has_layout()) {
descriptor_set_.update(context);
command_buffer.bind(
*descriptor_set_.active_descriptor_set(), vk_pipeline_layout, vk_pipeline_bind_point);
}
}
} // namespace blender::gpu

View File

@ -14,29 +14,49 @@
#include "vk_common.hh"
#include "vk_descriptor_set.hh"
#include "vk_pipeline_state.hh"
#include "vk_push_constants.hh"
namespace blender::gpu {
class VKContext;
class VKShader;
class VKVertexAttributeObject;
class VKBatch;
/**
* Pipeline can be a compute pipeline or a graphic pipeline.
*
* Compute pipelines can be constructed early on, but graphics
* pipelines depends on the actual GPU state/context.
*
* - TODO: we should sanitize the interface. There we can also
* use late construction for compute pipelines.
*/
class VKPipeline : NonCopyable {
VkPipeline vk_pipeline_ = VK_NULL_HANDLE;
/* Active pipeline handle. */
VkPipeline active_vk_pipeline_ = VK_NULL_HANDLE;
/** Keep track of all pipelines as they can still be in flight. */
Vector<VkPipeline> vk_pipelines_;
VKDescriptorSetTracker descriptor_set_;
VKPushConstants push_constants_;
VKPipelineStateManager state_manager_;
public:
VKPipeline() = default;
virtual ~VKPipeline();
VKPipeline(VkDescriptorSetLayout vk_descriptor_set_layout, VKPushConstants &&push_constants);
VKPipeline(VkPipeline vk_pipeline,
VkDescriptorSetLayout vk_descriptor_set_layout,
VKPushConstants &&push_constants);
VKPipeline &operator=(VKPipeline &&other)
{
vk_pipeline_ = other.vk_pipeline_;
other.vk_pipeline_ = VK_NULL_HANDLE;
active_vk_pipeline_ = other.active_vk_pipeline_;
other.active_vk_pipeline_ = VK_NULL_HANDLE;
descriptor_set_ = std::move(other.descriptor_set_);
push_constants_ = std::move(other.push_constants_);
vk_pipelines_ = std::move(other.vk_pipelines_);
other.vk_pipelines_.clear();
return *this;
}
@ -44,6 +64,8 @@ class VKPipeline : NonCopyable {
VkDescriptorSetLayout &descriptor_set_layout,
VkPipelineLayout &pipeline_layouts,
const VKPushConstants::Layout &push_constants_layout);
static VKPipeline create_graphics_pipeline(VkDescriptorSetLayout &descriptor_set_layout,
const VKPushConstants::Layout &push_constants_layout);
VKDescriptorSetTracker &descriptor_set_get()
{
@ -55,8 +77,28 @@ class VKPipeline : NonCopyable {
return push_constants_;
}
VKPipelineStateManager &state_manager_get()
{
return state_manager_;
}
VkPipeline vk_handle() const;
bool is_valid() const;
void finalize(VKContext &context,
VkShaderModule vertex_module,
VkShaderModule geometry_module,
VkShaderModule fragment_module,
VkPipelineLayout &pipeline_layout,
const GPUPrimType prim_type,
const VKVertexAttributeObject &vertex_attribute_object);
/**
* Update PushConstants, DescriptorSets and bind pipeline to command buffer.
*/
void update_and_bind(VKContext &context,
VkPipelineLayout vk_pipeline_layout,
VkPipelineBindPoint vk_pipeline_bind_point);
};
} // namespace blender::gpu

View File

@ -0,0 +1,367 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation */
/** \file
* \ingroup gpu
*/
#include "vk_pipeline_state.hh"
namespace blender::gpu {
VKPipelineStateManager::VKPipelineStateManager()
{
rasterization_state = {};
rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state.lineWidth = 1.0f;
pipeline_color_blend_state = {};
pipeline_color_blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
depth_stencil_state = {};
depth_stencil_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
/* TODO should be extracted from current framebuffer and should not be done here and now. */
/* When the attachments differ the state should be forced. */
VkPipelineColorBlendAttachmentState color_blend_attachment = {};
color_blend_attachment.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
color_blend_attachments.append(color_blend_attachment);
pipeline_color_blend_state.attachmentCount = color_blend_attachments.size();
pipeline_color_blend_state.pAttachments = color_blend_attachments.data();
}
void VKPipelineStateManager::set_state(const GPUState &state, const GPUStateMutable &mutable_state)
{
GPUState changed = state ^ current_;
if (changed.blend) {
set_blend(static_cast<eGPUBlend>(state.blend));
}
if (changed.write_mask != 0) {
set_write_mask((eGPUWriteMask)state.write_mask);
}
if (changed.depth_test != 0) {
set_depth_test((eGPUDepthTest)state.depth_test);
}
if (changed.stencil_test != 0 || changed.stencil_op != 0) {
set_stencil_test((eGPUStencilTest)state.stencil_test, (eGPUStencilOp)state.stencil_op);
set_stencil_mask((eGPUStencilTest)state.stencil_test, mutable_state);
}
if (changed.clip_distances != 0) {
set_clip_distances(state.clip_distances, current_.clip_distances);
}
if (changed.culling_test != 0) {
set_backface_culling((eGPUFaceCullTest)state.culling_test);
}
if (changed.logic_op_xor != 0) {
set_logic_op(state.logic_op_xor);
}
if (changed.invert_facing != 0) {
set_facing(state.invert_facing);
}
if (changed.provoking_vert != 0) {
set_provoking_vert((eGPUProvokingVertex)state.provoking_vert);
}
if (changed.shadow_bias != 0) {
set_shadow_bias(state.shadow_bias);
}
current_ = state;
}
void VKPipelineStateManager::force_state(const GPUState &state,
const GPUStateMutable &mutable_state)
{
current_ = ~state;
set_state(state, mutable_state);
}
void VKPipelineStateManager::set_blend(const eGPUBlend blend)
{
VkPipelineColorBlendStateCreateInfo &cb = pipeline_color_blend_state;
VkPipelineColorBlendAttachmentState &att_state = color_blend_attachments.last();
att_state.blendEnable = VK_TRUE;
att_state.alphaBlendOp = VK_BLEND_OP_ADD;
att_state.colorBlendOp = VK_BLEND_OP_ADD;
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_DST_ALPHA;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
cb.blendConstants[0] = 1.0f;
cb.blendConstants[1] = 1.0f;
cb.blendConstants[2] = 1.0f;
cb.blendConstants[3] = 1.0f;
switch (blend) {
default:
case GPU_BLEND_ALPHA:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
break;
case GPU_BLEND_ALPHA_PREMULT:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
break;
case GPU_BLEND_ADDITIVE:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
break;
case GPU_BLEND_SUBTRACT:
case GPU_BLEND_ADDITIVE_PREMULT:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
break;
case GPU_BLEND_MULTIPLY:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_DST_COLOR;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_DST_ALPHA;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
break;
case GPU_BLEND_INVERT:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
break;
case GPU_BLEND_OIT:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
break;
case GPU_BLEND_BACKGROUND:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ZERO;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
break;
case GPU_BLEND_ALPHA_UNDER_PREMUL:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
break;
case GPU_BLEND_CUSTOM:
att_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.dstColorBlendFactor = VK_BLEND_FACTOR_SRC1_COLOR;
att_state.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE;
att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_SRC1_ALPHA;
break;
}
if (blend == GPU_BLEND_SUBTRACT) {
att_state.alphaBlendOp = VK_BLEND_OP_REVERSE_SUBTRACT;
att_state.colorBlendOp = VK_BLEND_OP_REVERSE_SUBTRACT;
}
else {
att_state.alphaBlendOp = VK_BLEND_OP_ADD;
att_state.colorBlendOp = VK_BLEND_OP_ADD;
}
if (blend != GPU_BLEND_NONE) {
att_state.blendEnable = VK_TRUE;
}
else {
att_state.blendEnable = VK_FALSE;
}
}
void VKPipelineStateManager::set_write_mask(const eGPUWriteMask write_mask)
{
depth_stencil_state.depthWriteEnable = (write_mask & GPU_WRITE_DEPTH) ? VK_TRUE : VK_FALSE;
VkPipelineColorBlendAttachmentState &att_state = color_blend_attachments.last();
att_state.colorWriteMask = 0;
if ((write_mask & GPU_WRITE_RED) != 0) {
att_state.colorWriteMask |= VK_COLOR_COMPONENT_R_BIT;
}
if ((write_mask & GPU_WRITE_GREEN) != 0) {
att_state.colorWriteMask |= VK_COLOR_COMPONENT_G_BIT;
}
if ((write_mask & GPU_WRITE_BLUE) != 0) {
att_state.colorWriteMask |= VK_COLOR_COMPONENT_B_BIT;
}
if ((write_mask & GPU_WRITE_ALPHA) != 0) {
att_state.colorWriteMask |= VK_COLOR_COMPONENT_A_BIT;
}
}
void VKPipelineStateManager::set_depth_test(const eGPUDepthTest value)
{
switch (value) {
case GPU_DEPTH_LESS:
depth_stencil_state.depthCompareOp = VK_COMPARE_OP_LESS;
break;
case GPU_DEPTH_LESS_EQUAL:
depth_stencil_state.depthCompareOp = VK_COMPARE_OP_LESS_OR_EQUAL;
break;
case GPU_DEPTH_EQUAL:
depth_stencil_state.depthCompareOp = VK_COMPARE_OP_EQUAL;
break;
case GPU_DEPTH_GREATER:
depth_stencil_state.depthCompareOp = VK_COMPARE_OP_GREATER;
break;
case GPU_DEPTH_GREATER_EQUAL:
depth_stencil_state.depthCompareOp = VK_COMPARE_OP_GREATER_OR_EQUAL;
break;
case GPU_DEPTH_ALWAYS:
default:
depth_stencil_state.depthCompareOp = VK_COMPARE_OP_ALWAYS;
break;
}
if (value != GPU_DEPTH_NONE) {
depth_stencil_state.depthTestEnable = VK_TRUE;
}
else {
depth_stencil_state.depthTestEnable = VK_FALSE;
depth_stencil_state.depthCompareOp = VK_COMPARE_OP_NEVER;
}
depth_stencil_state.depthBoundsTestEnable = VK_TRUE;
}
void VKPipelineStateManager::set_stencil_test(const eGPUStencilTest test,
const eGPUStencilOp operation)
{
depth_stencil_state.front.compareOp = VK_COMPARE_OP_ALWAYS;
depth_stencil_state.front.compareMask = 0;
depth_stencil_state.front.reference = 0;
switch (operation) {
case GPU_STENCIL_OP_REPLACE:
depth_stencil_state.front.failOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.passOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.depthFailOp = VK_STENCIL_OP_REPLACE;
depth_stencil_state.back = depth_stencil_state.front;
break;
case GPU_STENCIL_OP_COUNT_DEPTH_PASS:
depth_stencil_state.front.failOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.passOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.depthFailOp = VK_STENCIL_OP_DECREMENT_AND_WRAP;
depth_stencil_state.back = depth_stencil_state.front;
depth_stencil_state.back.depthFailOp = VK_STENCIL_OP_INCREMENT_AND_WRAP;
break;
case GPU_STENCIL_OP_COUNT_DEPTH_FAIL:
depth_stencil_state.front.failOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.passOp = VK_STENCIL_OP_INCREMENT_AND_WRAP;
depth_stencil_state.front.depthFailOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.back = depth_stencil_state.front;
depth_stencil_state.back.depthFailOp = VK_STENCIL_OP_DECREMENT_AND_WRAP;
break;
case GPU_STENCIL_OP_NONE:
default:
depth_stencil_state.front.failOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.passOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.depthFailOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.back = depth_stencil_state.front;
break;
}
if (test != GPU_STENCIL_NONE) {
depth_stencil_state.stencilTestEnable = VK_TRUE;
}
else {
depth_stencil_state.stencilTestEnable = VK_FALSE;
}
}
void VKPipelineStateManager::set_stencil_mask(const eGPUStencilTest test,
const GPUStateMutable &mutable_state)
{
depth_stencil_state.front.writeMask = static_cast<uint32_t>(mutable_state.stencil_write_mask);
depth_stencil_state.front.reference = static_cast<uint32_t>(mutable_state.stencil_reference);
depth_stencil_state.front.compareOp = VK_COMPARE_OP_ALWAYS;
depth_stencil_state.front.compareMask = static_cast<uint32_t>(
mutable_state.stencil_compare_mask);
switch (test) {
case GPU_STENCIL_NEQUAL:
depth_stencil_state.front.compareOp = VK_COMPARE_OP_NOT_EQUAL;
break;
case GPU_STENCIL_EQUAL:
depth_stencil_state.front.compareOp = VK_COMPARE_OP_EQUAL;
break;
case GPU_STENCIL_ALWAYS:
depth_stencil_state.front.compareOp = VK_COMPARE_OP_ALWAYS;
break;
case GPU_STENCIL_NONE:
default:
depth_stencil_state.front.compareMask = 0x00;
depth_stencil_state.front.compareOp = VK_COMPARE_OP_ALWAYS;
return;
}
depth_stencil_state.back = depth_stencil_state.front;
}
void VKPipelineStateManager::set_clip_distances(const int /*new_dist_len*/,
const int /*old_dist_len*/)
{
/* TODO: needs to be implemented. */
}
void VKPipelineStateManager::set_logic_op(const bool enable)
{
if (enable) {
pipeline_color_blend_state.logicOpEnable = VK_TRUE;
pipeline_color_blend_state.logicOp = VK_LOGIC_OP_XOR;
}
else {
pipeline_color_blend_state.logicOpEnable = VK_FALSE;
}
}
void VKPipelineStateManager::set_facing(const bool invert)
{
rasterization_state.frontFace = invert ? VK_FRONT_FACE_COUNTER_CLOCKWISE :
VK_FRONT_FACE_CLOCKWISE;
}
void VKPipelineStateManager::set_backface_culling(const eGPUFaceCullTest cull_test)
{
rasterization_state.cullMode = to_vk_cull_mode_flags(cull_test);
}
void VKPipelineStateManager::set_provoking_vert(const eGPUProvokingVertex /*vert*/)
{
/* TODO: Requires VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME, See:
* https://registry.khronos.org/vulkan/specs/1.3-extensions/man/html/VkPipelineRasterizationProvokingVertexStateCreateInfoEXT.html
*/
}
void VKPipelineStateManager::set_shadow_bias(const bool enable)
{
if (enable) {
rasterization_state.depthBiasEnable = VK_TRUE;
rasterization_state.depthBiasSlopeFactor = 2.f;
rasterization_state.depthBiasConstantFactor = 1.f;
rasterization_state.depthBiasClamp = 0.f;
}
else {
rasterization_state.depthBiasEnable = VK_FALSE;
}
}
} // namespace blender::gpu

View File

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation */
/** \file
* \ingroup gpu
*/
#include "gpu_state_private.hh"
#include "vk_common.hh"
#include "BLI_vector.hh"
namespace blender::gpu {
class VKPipelineStateManager {
private:
GPUState current_;
GPUStateMutable current_mutable_;
public:
VkPipelineColorBlendStateCreateInfo pipeline_color_blend_state;
Vector<VkPipelineColorBlendAttachmentState> color_blend_attachments;
VkPipelineRasterizationStateCreateInfo rasterization_state;
VkPipelineDepthStencilStateCreateInfo depth_stencil_state;
VKPipelineStateManager();
void set_state(const GPUState &state, const GPUStateMutable &mutable_state);
void force_state(const GPUState &state, const GPUStateMutable &mutable_state);
private:
void set_blend(eGPUBlend blend);
void set_write_mask(eGPUWriteMask write_mask);
void set_depth_test(eGPUDepthTest value);
void set_stencil_test(eGPUStencilTest test, eGPUStencilOp operation);
void set_stencil_mask(eGPUStencilTest test, const GPUStateMutable &mutable_state);
void set_clip_distances(int new_dist_len, int old_dist_len);
void set_logic_op(bool enable);
void set_facing(bool invert);
void set_backface_culling(eGPUFaceCullTest test);
void set_provoking_vert(eGPUProvokingVertex vert);
void set_shadow_bias(bool enable);
};
} // namespace blender::gpu

View File

@ -158,6 +158,14 @@ template<typename Resource> class VKResourceTracker : NonCopyable {
*/
virtual std::unique_ptr<Resource> create_resource(VKContext &context) = 0;
/**
* Does this instance have an active resource.
*/
bool has_active_resource()
{
return !tracked_resources_.is_empty();
}
/**
* Return the active resource of the tracker.
*/

View File

@ -0,0 +1,45 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation */
/** \file
* \ingroup gpu
*/
#include "vk_sampler.hh"
#include "vk_backend.hh"
#include "vk_context.hh"
#include "vk_memory.hh"
namespace blender::gpu {
VKSampler::~VKSampler()
{
free();
}
void VKSampler::create()
{
BLI_assert(vk_sampler_ == VK_NULL_HANDLE);
VK_ALLOCATION_CALLBACKS
VkSamplerCreateInfo sampler_info = {};
sampler_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
const VKDevice &device = VKBackend::get().device_get();
vkCreateSampler(device.device_get(), &sampler_info, vk_allocation_callbacks, &vk_sampler_);
debug::object_label(vk_sampler_, "DummySampler");
}
void VKSampler::free()
{
VK_ALLOCATION_CALLBACKS
if (vk_sampler_ != VK_NULL_HANDLE) {
const VKDevice &device = VKBackend::get().device_get();
if (device.device_get() != VK_NULL_HANDLE) {
vkDestroySampler(device.device_get(), vk_sampler_, vk_allocation_callbacks);
}
vk_sampler_ = VK_NULL_HANDLE;
}
}
} // namespace blender::gpu

View File

@ -0,0 +1,34 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation */
/** \file
* \ingroup gpu
*/
#pragma once
#include "gpu_shader_private.hh"
#include "vk_common.hh"
#include "BLI_utility_mixins.hh"
namespace blender::gpu {
class VKContext;
class VKSampler : public NonCopyable {
VkSampler vk_sampler_ = VK_NULL_HANDLE;
public:
virtual ~VKSampler();
void create();
void free();
VkSampler vk_handle()
{
BLI_assert(vk_sampler_ != VK_NULL_HANDLE);
return vk_sampler_;
}
};
} // namespace blender::gpu

View File

@ -578,7 +578,6 @@ VKShader::VKShader(const char *name) : Shader(name)
VKShader::~VKShader()
{
VK_ALLOCATION_CALLBACKS
const VKDevice &device = VKBackend::get().device_get();
if (vertex_module_ != VK_NULL_HANDLE) {
vkDestroyShaderModule(device.device_get(), vertex_module_, vk_allocation_callbacks);
@ -667,16 +666,18 @@ bool VKShader::finalize(const shader::ShaderCreateInfo *info)
BLI_assert((fragment_module_ != VK_NULL_HANDLE && info->tf_type_ == GPU_SHADER_TFB_NONE) ||
(fragment_module_ == VK_NULL_HANDLE && info->tf_type_ != GPU_SHADER_TFB_NONE));
BLI_assert(compute_module_ == VK_NULL_HANDLE);
result = finalize_graphics_pipeline(device.device_get());
pipeline_ = VKPipeline::create_graphics_pipeline(layout_,
vk_interface->push_constants_layout_get());
result = true;
}
else {
BLI_assert(vertex_module_ == VK_NULL_HANDLE);
BLI_assert(geometry_module_ == VK_NULL_HANDLE);
BLI_assert(fragment_module_ == VK_NULL_HANDLE);
BLI_assert(compute_module_ != VK_NULL_HANDLE);
compute_pipeline_ = VKPipeline::create_compute_pipeline(
pipeline_ = VKPipeline::create_compute_pipeline(
compute_module_, layout_, pipeline_layout_, vk_interface->push_constants_layout_get());
result = compute_pipeline_.is_valid();
result = pipeline_.is_valid();
}
if (result) {
@ -688,36 +689,6 @@ bool VKShader::finalize(const shader::ShaderCreateInfo *info)
return result;
}
bool VKShader::finalize_graphics_pipeline(VkDevice /*vk_device */)
{
Vector<VkPipelineShaderStageCreateInfo> pipeline_stages;
VkPipelineShaderStageCreateInfo vertex_stage_info = {};
vertex_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
vertex_stage_info.stage = VK_SHADER_STAGE_VERTEX_BIT;
vertex_stage_info.module = vertex_module_;
vertex_stage_info.pName = "main";
pipeline_stages.append(vertex_stage_info);
if (geometry_module_ != VK_NULL_HANDLE) {
VkPipelineShaderStageCreateInfo geo_stage_info = {};
geo_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
geo_stage_info.stage = VK_SHADER_STAGE_GEOMETRY_BIT;
geo_stage_info.module = geometry_module_;
geo_stage_info.pName = "main";
pipeline_stages.append(geo_stage_info);
}
if (fragment_module_ != VK_NULL_HANDLE) {
VkPipelineShaderStageCreateInfo fragment_stage_info = {};
fragment_stage_info.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
fragment_stage_info.stage = VK_SHADER_STAGE_FRAGMENT_BIT;
fragment_stage_info.module = fragment_module_;
fragment_stage_info.pName = "main";
pipeline_stages.append(fragment_stage_info);
}
return true;
}
bool VKShader::finalize_pipeline_layout(VkDevice vk_device,
const VKShaderInterface &shader_interface)
{
@ -958,26 +929,27 @@ bool VKShader::transform_feedback_enable(GPUVertBuf *)
void VKShader::transform_feedback_disable() {}
void VKShader::update_graphics_pipeline(VKContext &context,
const GPUPrimType prim_type,
const VKVertexAttributeObject &vertex_attribute_object)
{
BLI_assert(is_graphics_shader());
pipeline_get().finalize(context,
vertex_module_,
geometry_module_,
fragment_module_,
pipeline_layout_,
prim_type,
vertex_attribute_object);
}
void VKShader::bind()
{
VKContext *context = VKContext::get();
if (is_compute_shader()) {
context->command_buffer_get().bind(compute_pipeline_, VK_PIPELINE_BIND_POINT_COMPUTE);
}
else {
BLI_assert_unreachable();
}
/* Intentionally empty. Binding of the pipeline are done just before drawing/dispatching.
* See #VKPipeline.update_and_bind */
}
void VKShader::unbind()
{
if (is_compute_shader()) {
}
else {
BLI_assert_unreachable();
}
}
void VKShader::unbind() {}
void VKShader::uniform_float(int location, int comp_len, int array_size, const float *data)
{
@ -1217,7 +1189,7 @@ int VKShader::program_handle_get() const
VKPipeline &VKShader::pipeline_get()
{
return compute_pipeline_;
return pipeline_;
}
const VKShaderInterface &VKShader::interface_get() const

View File

@ -28,7 +28,7 @@ class VKShader : public Shader {
bool compilation_failed_ = false;
VkDescriptorSetLayout layout_ = VK_NULL_HANDLE;
VkPipelineLayout pipeline_layout_ = VK_NULL_HANDLE;
VKPipeline compute_pipeline_;
VKPipeline pipeline_;
public:
VKShader(const char *name);
@ -70,6 +70,10 @@ class VKShader : public Shader {
const VKShaderInterface &interface_get() const;
void update_graphics_pipeline(VKContext &context,
const GPUPrimType prim_type,
const VKVertexAttributeObject &vertex_attribute_object);
private:
Vector<uint32_t> compile_glsl_to_spirv(Span<const char *> sources, shaderc_shader_kind kind);
void build_shader_module(Span<uint32_t> spirv_module, VkShaderModule *r_shader_module);
@ -80,7 +84,6 @@ class VKShader : public Shader {
const VKShaderInterface &shader_interface,
const shader::ShaderCreateInfo &info);
bool finalize_pipeline_layout(VkDevice vk_device, const VKShaderInterface &shader_interface);
bool finalize_graphics_pipeline(VkDevice vk_device);
bool is_graphics_shader() const
{
@ -93,4 +96,14 @@ class VKShader : public Shader {
}
};
static inline VKShader &unwrap(Shader &shader)
{
return static_cast<VKShader &>(shader);
}
static inline VKShader *unwrap(Shader *shader)
{
return static_cast<VKShader *>(shader);
}
} // namespace blender::gpu

View File

@ -6,12 +6,67 @@
*/
#include "vk_state_manager.hh"
#include "vk_context.hh"
#include "vk_pipeline.hh"
#include "vk_shader.hh"
#include "vk_texture.hh"
namespace blender::gpu {
void VKStateManager::apply_state() {}
#include "GPU_capabilities.h"
void VKStateManager::force_state() {}
namespace blender::gpu {
VKStateManager::VKStateManager()
{
sampler_.create();
constexpr int max_bindings = 16;
image_bindings_ = Array<ImageBinding>(max_bindings);
image_bindings_.fill(ImageBinding());
texture_bindings_ = Array<ImageBinding>(max_bindings);
texture_bindings_.fill(ImageBinding());
uniform_buffer_bindings_ = Array<UniformBufferBinding>(max_bindings);
uniform_buffer_bindings_.fill(UniformBufferBinding());
}
void VKStateManager::apply_state()
{
VKContext &context = *VKContext::get();
if (context.shader) {
VKShader &shader = unwrap(*context.shader);
VKPipeline &pipeline = shader.pipeline_get();
pipeline.state_manager_get().set_state(state, mutable_state);
for (int binding : IndexRange(image_bindings_.size())) {
if (image_bindings_[binding].texture == nullptr) {
continue;
}
image_bindings_[binding].texture->image_bind(binding);
}
for (int binding : IndexRange(image_bindings_.size())) {
if (texture_bindings_[binding].texture == nullptr) {
continue;
}
texture_bindings_[binding].texture->bind(binding, sampler_);
}
for (int binding : IndexRange(uniform_buffer_bindings_.size())) {
if (uniform_buffer_bindings_[binding].buffer == nullptr) {
continue;
}
uniform_buffer_bindings_[binding].buffer->bind(
binding, shader::ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER);
}
}
}
void VKStateManager::force_state()
{
VKContext &context = *VKContext::get();
BLI_assert(context.shader);
VKShader &shader = unwrap(*context.shader);
VKPipeline &pipeline = shader.pipeline_get();
pipeline.state_manager_get().force_state(state, mutable_state);
}
void VKStateManager::issue_barrier(eGPUBarrier /*barrier_bits*/)
{
@ -22,21 +77,69 @@ void VKStateManager::issue_barrier(eGPUBarrier /*barrier_bits*/)
command_buffer.submit();
}
void VKStateManager::texture_bind(Texture * /*tex*/, GPUSamplerState /*sampler*/, int /*unit*/) {}
void VKStateManager::texture_bind(Texture *tex, GPUSamplerState /*sampler*/, int unit)
{
VKTexture *texture = unwrap(tex);
texture_bindings_[unit].texture = texture;
}
void VKStateManager::texture_unbind(Texture * /*tex*/) {}
void VKStateManager::texture_unbind(Texture *tex)
{
VKTexture *texture = unwrap(tex);
for (ImageBinding &binding : texture_bindings_) {
if (binding.texture == texture) {
binding.texture = nullptr;
}
}
}
void VKStateManager::texture_unbind_all() {}
void VKStateManager::texture_unbind_all()
{
for (ImageBinding &binding : texture_bindings_) {
if (binding.texture != nullptr) {
binding.texture = nullptr;
}
}
}
void VKStateManager::image_bind(Texture *tex, int binding)
{
VKTexture *texture = unwrap(tex);
texture->image_bind(binding);
image_bindings_[binding].texture = texture;
}
void VKStateManager::image_unbind(Texture * /*tex*/) {}
void VKStateManager::image_unbind(Texture *tex)
{
VKTexture *texture = unwrap(tex);
for (ImageBinding &binding : image_bindings_) {
if (binding.texture == texture) {
binding.texture = nullptr;
}
}
}
void VKStateManager::image_unbind_all() {}
void VKStateManager::image_unbind_all()
{
for (ImageBinding &binding : texture_bindings_) {
if (binding.texture != nullptr) {
binding.texture = nullptr;
}
}
}
void VKStateManager::uniform_buffer_bind(VKUniformBuffer *uniform_buffer, int slot)
{
uniform_buffer_bindings_[slot].buffer = uniform_buffer;
}
void VKStateManager::uniform_buffer_unbind(VKUniformBuffer *uniform_buffer)
{
for (UniformBufferBinding &binding : uniform_buffer_bindings_) {
if (binding.buffer == uniform_buffer) {
binding.buffer = nullptr;
}
}
}
void VKStateManager::texture_unpack_row_length_set(uint len)
{

View File

@ -9,11 +9,33 @@
#include "gpu_state_private.hh"
#include "BLI_array.hh"
#include "vk_sampler.hh"
namespace blender::gpu {
class VKTexture;
class VKUniformBuffer;
class VKStateManager : public StateManager {
uint texture_unpack_row_length_;
/* Dummy sampler for now.*/
VKSampler sampler_;
uint texture_unpack_row_length_ = 0;
struct ImageBinding {
VKTexture *texture = nullptr;
};
struct UniformBufferBinding {
VKUniformBuffer *buffer = nullptr;
};
Array<ImageBinding> image_bindings_;
Array<ImageBinding> texture_bindings_;
Array<UniformBufferBinding> uniform_buffer_bindings_;
public:
VKStateManager();
void apply_state() override;
void force_state() override;
@ -27,6 +49,9 @@ class VKStateManager : public StateManager {
void image_unbind(Texture *tex) override;
void image_unbind_all() override;
void uniform_buffer_bind(VKUniformBuffer *uniform_buffer, int slot);
void uniform_buffer_unbind(VKUniformBuffer *uniform_buffer);
void texture_unpack_row_length_set(uint len) override;
/**

View File

@ -229,6 +229,16 @@ static VkImageUsageFlagBits to_vk_image_usage(const eGPUTextureUsage usage,
result = static_cast<VkImageUsageFlagBits>(result | VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
}
/* Disable some usages based on the given format flag to support more devices. */
if (format_flag & GPU_FORMAT_SRGB) {
/* NVIDIA devices don't create SRGB textures when it storage bit is set. */
result = static_cast<VkImageUsageFlagBits>(result & ~VK_IMAGE_USAGE_STORAGE_BIT);
}
if (format_flag & (GPU_FORMAT_DEPTH | GPU_FORMAT_STENCIL)) {
/* NVIDIA devices don't create depth textures when it storage bit is set. */
result = static_cast<VkImageUsageFlagBits>(result & ~VK_IMAGE_USAGE_STORAGE_BIT);
}
return result;
}
@ -311,6 +321,24 @@ bool VKTexture::allocate()
return result == VK_SUCCESS;
}
// TODO: move texture/image bindings to shader.
void VKTexture::bind(int unit, VKSampler &sampler)
{
if (!is_allocated()) {
allocate();
}
VKContext &context = *VKContext::get();
VKShader *shader = static_cast<VKShader *>(context.shader);
const VKShaderInterface &shader_interface = shader->interface_get();
const std::optional<VKDescriptorSet::Location> location =
shader_interface.descriptor_set_location(
shader::ShaderCreateInfo::Resource::BindType::SAMPLER, unit);
if (location) {
VKDescriptorSetTracker &descriptor_set = shader->pipeline_get().descriptor_set_get();
descriptor_set.bind(*this, *location, sampler);
}
}
void VKTexture::image_bind(int binding)
{
if (!is_allocated()) {

View File

@ -12,6 +12,8 @@
namespace blender::gpu {
class VKSampler;
class VKTexture : public Texture {
VkImage vk_image_ = VK_NULL_HANDLE;
VkImageView vk_image_view_ = VK_NULL_HANDLE;
@ -25,6 +27,7 @@ class VKTexture : public Texture {
public:
VKTexture(const char *name) : Texture(name) {}
virtual ~VKTexture() override;
void init(VkImage vk_image, VkImageLayout layout);
@ -46,7 +49,9 @@ class VKTexture : public Texture {
/* TODO(fclem): Legacy. Should be removed at some point. */
uint gl_bindcode_get() const override;
void bind(int unit, VKSampler &sampler);
void image_bind(int location);
VkImage vk_image_handle() const
{
BLI_assert(vk_image_ != VK_NULL_HANDLE);

View File

@ -9,6 +9,7 @@
#include "vk_context.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "vk_state_manager.hh"
namespace blender::gpu {
@ -54,7 +55,9 @@ void VKUniformBuffer::bind(int slot, shader::ShaderCreateInfo::Resource::BindTyp
void VKUniformBuffer::bind(int slot)
{
bind(slot, shader::ShaderCreateInfo::Resource::BindType::UNIFORM_BUFFER);
/* Uniform buffers can be bound without an shader. */
VKContext &context = *VKContext::get();
context.state_manager_get().uniform_buffer_bind(this, slot);
}
void VKUniformBuffer::bind_as_ssbo(int slot)
@ -62,6 +65,10 @@ void VKUniformBuffer::bind_as_ssbo(int slot)
bind(slot, shader::ShaderCreateInfo::Resource::BindType::STORAGE_BUFFER);
}
void VKUniformBuffer::unbind() {}
void VKUniformBuffer::unbind()
{
VKContext &context = *VKContext::get();
context.state_manager_get().uniform_buffer_unbind(this);
}
} // namespace blender::gpu

View File

@ -25,6 +25,7 @@ class VKUniformBuffer : public UniformBuf, NonCopyable {
void clear_to_zero() override;
void bind(int slot) override;
void bind_as_ssbo(int slot) override;
void bind(int slot, shader::ShaderCreateInfo::Resource::BindType bind_type);
void unbind() override;
VkBuffer vk_handle() const
@ -39,7 +40,6 @@ class VKUniformBuffer : public UniformBuf, NonCopyable {
private:
void allocate();
void bind(int slot, shader::ShaderCreateInfo::Resource::BindType bind_type);
};
} // namespace blender::gpu

View File

@ -0,0 +1,196 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
#include "vk_vertex_attribute_object.hh"
#include "vk_batch.hh"
#include "vk_context.hh"
#include "vk_immediate.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "vk_vertex_buffer.hh"
#include "BLI_array.hh"
namespace blender::gpu {
VKVertexAttributeObject::VKVertexAttributeObject()
{
clear();
}
void VKVertexAttributeObject::clear()
{
is_valid = false;
info.pNext = NULL;
bindings.clear();
attributes.clear();
vbos.clear();
buffers.clear();
}
VKVertexAttributeObject &VKVertexAttributeObject::operator=(const VKVertexAttributeObject &other)
{
if (this == &other) {
return *this;
}
is_valid = other.is_valid;
info = other.info;
bindings.clear();
bindings.extend(other.bindings);
attributes.clear();
attributes.extend(other.attributes);
vbos.clear();
vbos.extend(other.vbos);
buffers.clear();
buffers.extend(other.buffers);
return *this;
}
void VKVertexAttributeObject::bind(VKContext &context)
{
Array<bool> visited_bindings(bindings.size());
visited_bindings.fill(false);
for (VkVertexInputAttributeDescription attribute : attributes) {
if (visited_bindings[attribute.binding]) {
continue;
}
visited_bindings[attribute.binding] = true;
/* Bind VBOS from batches. */
if (attribute.binding < vbos.size()) {
BLI_assert(vbos[attribute.binding]);
VKVertexBuffer &vbo = *vbos[attribute.binding];
vbo.upload();
context.command_buffer_get().bind(attribute.binding, vbo, 0);
}
/* Bind dynamic buffers from immediate mode. */
if (attribute.binding < buffers.size()) {
VKBufferWithOffset &buffer = buffers[attribute.binding];
context.command_buffer_get().bind(attribute.binding, buffer);
}
}
}
void VKVertexAttributeObject::update_bindings(const VKContext &context, VKBatch &batch)
{
clear();
const VKShaderInterface &interface = unwrap(context.shader)->interface_get();
AttributeMask occupied_attributes = 0;
for (int v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
VKVertexBuffer *vbo = batch.instance_buffer_get(v);
if (vbo) {
update_bindings(
vbo->format, vbo, nullptr, vbo->vertex_len, interface, occupied_attributes, true);
}
}
for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
VKVertexBuffer *vbo = batch.vertex_buffer_get(v);
if (vbo) {
update_bindings(
vbo->format, vbo, nullptr, vbo->vertex_len, interface, occupied_attributes, false);
}
}
is_valid = true;
}
void VKVertexAttributeObject::update_bindings(VKImmediate &immediate)
{
clear();
const VKShaderInterface &interface = unwrap(unwrap(immediate.shader))->interface_get();
AttributeMask occupied_attributes = 0;
VKBufferWithOffset immediate_buffer = {*immediate.active_resource(),
immediate.subbuffer_offset_get()};
update_bindings(immediate.vertex_format,
nullptr,
&immediate_buffer,
immediate.vertex_len,
interface,
occupied_attributes,
false);
is_valid = true;
BLI_assert(interface.enabled_attr_mask_ == occupied_attributes);
}
void VKVertexAttributeObject::update_bindings(const GPUVertFormat &vertex_format,
VKVertexBuffer *vertex_buffer,
VKBufferWithOffset *immediate_vertex_buffer,
const int64_t vertex_len,
const VKShaderInterface &interface,
AttributeMask &r_occupied_attributes,
const bool use_instancing)
{
BLI_assert(vertex_buffer || immediate_vertex_buffer);
BLI_assert(!(vertex_buffer && immediate_vertex_buffer));
if (vertex_format.attr_len <= 0) {
return;
}
uint32_t offset = 0;
uint32_t stride = vertex_format.stride;
for (uint32_t attribute_index = 0; attribute_index < vertex_format.attr_len; attribute_index++) {
const GPUVertAttr &attribute = vertex_format.attrs[attribute_index];
if (vertex_format.deinterleaved) {
offset += ((attribute_index == 0) ? 0 : vertex_format.attrs[attribute_index - 1].size) *
vertex_len;
stride = attribute.size;
}
else {
offset = attribute.offset;
}
const uint32_t binding = bindings.size();
bool attribute_used_by_shader = false;
for (uint32_t name_index = 0; name_index < attribute.name_len; name_index++) {
const char *name = GPU_vertformat_attr_name_get(&vertex_format, &attribute, name_index);
const ShaderInput *shader_input = interface.attr_get(name);
if (shader_input == nullptr || shader_input->location == -1) {
continue;
}
/* Don't overwrite attributes that are already occupied. */
AttributeMask attribute_mask = 1 << shader_input->location;
if (r_occupied_attributes & attribute_mask) {
continue;
}
r_occupied_attributes |= attribute_mask;
attribute_used_by_shader = true;
VkVertexInputAttributeDescription attribute_description = {};
attribute_description.binding = binding;
attribute_description.location = shader_input->location;
attribute_description.offset = offset;
attribute_description.format = to_vk_format(
static_cast<GPUVertCompType>(attribute.comp_type),
attribute.size,
static_cast<GPUVertFetchMode>(attribute.fetch_mode));
attributes.append(attribute_description);
}
if (attribute_used_by_shader) {
VkVertexInputBindingDescription vk_binding_descriptor = {};
vk_binding_descriptor.binding = binding;
vk_binding_descriptor.stride = stride;
vk_binding_descriptor.inputRate = use_instancing ? VK_VERTEX_INPUT_RATE_INSTANCE :
VK_VERTEX_INPUT_RATE_VERTEX;
bindings.append(vk_binding_descriptor);
if (vertex_buffer) {
vbos.append(vertex_buffer);
}
if (immediate_vertex_buffer) {
buffers.append(*immediate_vertex_buffer);
}
}
}
}
} // namespace blender::gpu

View File

@ -0,0 +1,59 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_buffer.hh"
#include "vk_common.hh"
#include "BLI_vector.hh"
#pragma once
namespace blender::gpu {
class VKVertexBuffer;
class VKContext;
class VKBatch;
class VKShaderInterface;
class VKImmediate;
using AttributeMask = uint16_t;
class VKVertexAttributeObject {
public:
bool is_valid = false;
VkPipelineVertexInputStateCreateInfo info = {
VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, NULL};
Vector<VkVertexInputBindingDescription> bindings;
Vector<VkVertexInputAttributeDescription> attributes;
/* Used for batches. */
Vector<VKVertexBuffer *> vbos;
/* Used for immediate mode. */
Vector<VKBufferWithOffset> buffers;
VKVertexAttributeObject();
void clear();
void bind(VKContext &context);
// Copy assignment operator.
VKVertexAttributeObject &operator=(const VKVertexAttributeObject &other);
void update_bindings(const VKContext &context, VKBatch &batch);
void update_bindings(VKImmediate &immediate);
private:
void update_bindings(const GPUVertFormat &vertex_format,
VKVertexBuffer *vertex_buffer,
VKBufferWithOffset *immediate_vertex_buffer,
const int64_t vertex_len,
const VKShaderInterface &interface,
AttributeMask &r_occupied_attributes,
const bool use_instancing);
};
} // namespace blender::gpu

View File

@ -7,6 +7,7 @@
#include "MEM_guardedalloc.h"
#include "vk_data_conversion.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "vk_vertex_buffer.hh"
@ -34,11 +35,20 @@ void VKVertexBuffer::bind_as_ssbo(uint binding)
shader->pipeline_get().descriptor_set_get().bind_as_ssbo(*this, *location);
}
void VKVertexBuffer::bind_as_texture(uint /*binding*/) {}
void VKVertexBuffer::bind_as_texture(uint /*binding*/)
{
NOT_YET_IMPLEMENTED
}
void VKVertexBuffer::wrap_handle(uint64_t /*handle*/) {}
void VKVertexBuffer::wrap_handle(uint64_t /*handle*/)
{
NOT_YET_IMPLEMENTED
}
void VKVertexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*data*/) {}
void VKVertexBuffer::update_sub(uint /*start*/, uint /*len*/, const void * /*data*/)
{
NOT_YET_IMPLEMENTED
}
void VKVertexBuffer::read(void *data) const
{
@ -55,24 +65,73 @@ void VKVertexBuffer::acquire_data()
}
/* Discard previous data if any. */
/* TODO: Use mapped memory. */
MEM_SAFE_FREE(data);
data = (uchar *)MEM_mallocN(sizeof(uchar) * this->size_alloc_get(), __func__);
}
void VKVertexBuffer::resize_data() {}
void VKVertexBuffer::resize_data()
{
if (usage_ == GPU_USAGE_DEVICE_ONLY) {
return;
}
data = (uchar *)MEM_reallocN(data, sizeof(uchar) * this->size_alloc_get());
}
void VKVertexBuffer::release_data()
{
MEM_SAFE_FREE(data);
}
void VKVertexBuffer::upload_data() {}
static bool inplace_conversion_supported(const GPUUsageType &usage)
{
return ELEM(usage, GPU_USAGE_STATIC, GPU_USAGE_STREAM);
}
void VKVertexBuffer::duplicate_data(VertBuf * /*dst*/) {}
void *VKVertexBuffer::convert() const
{
void *out_data = data;
if (!inplace_conversion_supported(usage_)) {
out_data = MEM_dupallocN(out_data);
}
BLI_assert(format.deinterleaved);
convert_in_place(out_data, format, vertex_len);
return out_data;
}
void VKVertexBuffer::upload_data()
{
if (!buffer_.is_allocated()) {
allocate();
}
if (flag & GPU_VERTBUF_DATA_DIRTY) {
void *data_to_upload = data;
if (conversion_needed(format)) {
data_to_upload = convert();
}
buffer_.update(data_to_upload);
if (data_to_upload != data) {
MEM_SAFE_FREE(data_to_upload);
}
if (usage_ == GPU_USAGE_STATIC) {
MEM_SAFE_FREE(data);
}
flag &= ~GPU_VERTBUF_DATA_DIRTY;
flag |= GPU_VERTBUF_DATA_UPLOADED;
}
}
void VKVertexBuffer::duplicate_data(VertBuf * /*dst*/)
{
NOT_YET_IMPLEMENTED
}
void VKVertexBuffer::allocate()
{
buffer_.create(size_used_get(),
buffer_.create(size_alloc_get(),
usage_,
static_cast<VkBufferUsageFlagBits>(VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));

View File

@ -28,6 +28,7 @@ class VKVertexBuffer : public VertBuf {
VkBuffer vk_handle() const
{
BLI_assert(buffer_.is_allocated());
return buffer_.vk_handle();
}
@ -40,6 +41,12 @@ class VKVertexBuffer : public VertBuf {
private:
void allocate();
void *convert() const;
};
static inline VKVertexBuffer *unwrap(VertBuf *vertex_buffer)
{
return static_cast<VKVertexBuffer *>(vertex_buffer);
}
} // namespace blender::gpu

View File

@ -615,6 +615,20 @@ static void rna_float_print(FILE *f, float num)
}
}
static const char *rna_ui_scale_type_string(const PropertyScaleType type)
{
switch (type) {
case PROP_SCALE_LINEAR:
return "PROP_SCALE_LINEAR";
case PROP_SCALE_LOG:
return "PROP_SCALE_LOG";
case PROP_SCALE_CUBIC:
return "PROP_SCALE_CUBIC";
}
BLI_assert_unreachable();
return "";
}
static void rna_int_print(FILE *f, int64_t num)
{
if (num == INT_MIN) {
@ -4164,8 +4178,7 @@ static void rna_generate_property(FILE *f, StructRNA *srna, const char *nest, Pr
rna_function_string(fprop->getarray_ex),
rna_function_string(fprop->setarray_ex),
rna_function_string(fprop->range_ex));
rna_float_print(f, fprop->ui_scale_type);
fprintf(f, ", ");
fprintf(f, "%s, ", rna_ui_scale_type_string(fprop->ui_scale_type));
rna_float_print(f, fprop->softmin);
fprintf(f, ", ");
rna_float_print(f, fprop->softmax);

View File

@ -630,7 +630,7 @@ const EnumPropertyItem rna_enum_transform_orientation_items[] = {
"Align the transformation axes to the 3D cursor"},
{V3D_ORIENT_PARENT,
"PARENT",
ICON_BLANK1,
ICON_ORIENTATION_PARENT,
"Parent",
"Align the transformation axes to the object's parent space"},
// {V3D_ORIENT_CUSTOM, "CUSTOM", 0, "Custom", "Use a custom transform orientation"},

View File

@ -109,9 +109,17 @@ static void rna_Operator_enum_search_invoke(bContext *C, wmOperator *op)
WM_enum_search_invoke(C, op, NULL);
}
static bool rna_event_modal_handler_add(struct bContext *C, struct wmOperator *operator)
static bool rna_event_modal_handler_add(
struct bContext *C, ReportList *reports, struct wmOperator *operator)
{
return WM_event_add_modal_handler(C, operator) != NULL;
wmWindow *win = CTX_wm_window(C);
if (win == NULL) {
BKE_report(reports, RPT_ERROR, "No active window in context!");
return false;
}
ScrArea *area = CTX_wm_area(C);
ARegion *region = CTX_wm_region(C);
return WM_event_add_modal_handler_ex(win, area, region, operator) != NULL;
}
/* XXX, need a way for python to know event types, 0x0110 is hard coded */
@ -776,7 +784,7 @@ void RNA_api_wm(StructRNA *srna)
func,
"Add a modal handler to the window manager, for the given modal operator "
"(called by invoke() with self, just before returning {'RUNNING_MODAL'})");
RNA_def_function_flag(func, FUNC_NO_SELF | FUNC_USE_CONTEXT);
RNA_def_function_flag(func, FUNC_NO_SELF | FUNC_USE_CONTEXT | FUNC_USE_REPORTS);
parm = RNA_def_pointer(func, "operator", "Operator", "", "Operator to call");
RNA_def_parameter_flags(parm, 0, PARM_REQUIRED);
RNA_def_function_return(

View File

@ -349,6 +349,7 @@ DefNode(GeometryNode, GEO_NODE_INPUT_RADIUS, 0, "INPUT_RADIUS", InputRadius, "Ra
DefNode(GeometryNode, GEO_NODE_INPUT_SCENE_TIME, 0, "INPUT_SCENE_TIME", InputSceneTime, "Scene Time", "Retrieve the current time in the scene's animation in units of seconds or frames")
DefNode(GeometryNode, GEO_NODE_INPUT_SHADE_SMOOTH, 0, "INPUT_SHADE_SMOOTH", InputShadeSmooth, "Is Shade Smooth", "Retrieve whether each face is marked for smooth shading")
DefNode(GeometryNode, GEO_NODE_INPUT_SHORTEST_EDGE_PATHS, 0, "SHORTEST_EDGE_PATHS", InputShortestEdgePaths, "Shortest Edge Paths", "")
DefNode(GeometryNode, GEO_NODE_INPUT_SIGNED_DISTANCE, 0, "SIGNED_DISTANCE", InputSignedDistance, "Signed Distance", "Retrieve the signed distance field grid called 'distance' from a volume")
DefNode(GeometryNode, GEO_NODE_INPUT_SPLINE_CYCLIC, 0, "INPUT_SPLINE_CYCLIC",InputSplineCyclic, "Is Spline Cyclic", "Retrieve whether each spline endpoint connects to the beginning")
DefNode(GeometryNode, GEO_NODE_INPUT_SPLINE_LENGTH, 0, "SPLINE_LENGTH", SplineLength, "Spline Length", "Retrieve the total length of each spline, as a distance or as a number of points")
DefNode(GeometryNode, GEO_NODE_INPUT_SPLINE_RESOLUTION, 0, "INPUT_SPLINE_RESOLUTION", InputSplineResolution, "Spline Resolution", "Retrieve the number of evaluated points that will be generated for every control point on curves")

View File

@ -102,6 +102,7 @@ set(SRC
nodes/node_geo_input_scene_time.cc
nodes/node_geo_input_shade_smooth.cc
nodes/node_geo_input_shortest_edge_paths.cc
nodes/node_geo_input_signed_distance.cc
nodes/node_geo_input_spline_cyclic.cc
nodes/node_geo_input_spline_length.cc
nodes/node_geo_input_spline_resolution.cc

View File

@ -86,6 +86,7 @@ void register_geometry_nodes()
register_node_type_geo_input_scene_time();
register_node_type_geo_input_shade_smooth();
register_node_type_geo_input_shortest_edge_paths();
register_node_type_geo_input_signed_distance();
register_node_type_geo_input_spline_cyclic();
register_node_type_geo_input_spline_length();
register_node_type_geo_input_spline_resolution();

View File

@ -83,6 +83,7 @@ void register_node_type_geo_input_radius();
void register_node_type_geo_input_scene_time();
void register_node_type_geo_input_shade_smooth();
void register_node_type_geo_input_shortest_edge_paths();
void register_node_type_geo_input_signed_distance();
void register_node_type_geo_input_spline_cyclic();
void register_node_type_geo_input_spline_length();
void register_node_type_geo_input_spline_resolution();

View File

@ -334,10 +334,8 @@ class SampleCurveFunction : public mf::MultiFunction {
sampled_normals.fill_indices(mask.indices(), float3(0));
}
if (!sampled_values.is_empty()) {
bke::attribute_math::convert_to_static_type(source_data_->type(), [&](auto dummy) {
using T = decltype(dummy);
sampled_values.typed<T>().fill_indices(mask.indices(), {});
});
const CPPType &type = sampled_values.type();
type.fill_construct_indices(type.default_value(), sampled_values.data(), mask);
}
};

View File

@ -149,52 +149,58 @@ static void transfer_attributes(
});
for (const AttributeIDRef &id : attribute_ids) {
GAttributeReader src_attribute = src_attributes.lookup(id);
GAttributeReader src = src_attributes.lookup(id);
eAttrDomain out_domain;
if (src_attribute.domain == ATTR_DOMAIN_FACE) {
if (src.domain == ATTR_DOMAIN_FACE) {
out_domain = ATTR_DOMAIN_POINT;
}
else if (src_attribute.domain == ATTR_DOMAIN_POINT) {
else if (src.domain == ATTR_DOMAIN_POINT) {
out_domain = ATTR_DOMAIN_FACE;
}
else {
/* Edges and Face Corners. */
out_domain = src_attribute.domain;
out_domain = src.domain;
}
const eCustomDataType data_type = bke::cpp_type_to_custom_data_type(
src_attribute.varray.type());
GSpanAttributeWriter dst_attribute = dst_attributes.lookup_or_add_for_write_only_span(
const eCustomDataType data_type = bke::cpp_type_to_custom_data_type(src.varray.type());
GSpanAttributeWriter dst = dst_attributes.lookup_or_add_for_write_only_span(
id, out_domain, data_type);
if (!dst_attribute) {
if (!dst) {
continue;
}
bke::attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
using T = decltype(dummy);
VArraySpan<T> span{src_attribute.varray.typed<T>()};
MutableSpan<T> dst_span = dst_attribute.span.typed<T>();
switch (src_attribute.domain) {
case ATTR_DOMAIN_POINT:
copy_data_based_on_vertex_types(span, dst_span, vertex_types, keep_boundaries);
break;
case ATTR_DOMAIN_EDGE:
array_utils::gather(span, new_to_old_edges_map, dst_span);
break;
case ATTR_DOMAIN_FACE:
dst_span.take_front(span.size()).copy_from(span);
if (keep_boundaries) {
copy_data_based_on_pairs(span, dst_span, boundary_vertex_to_relevant_face_map);
}
break;
case ATTR_DOMAIN_CORNER:
array_utils::gather(span, new_to_old_face_corners_map, dst_span);
break;
default:
BLI_assert_unreachable();
switch (src.domain) {
case ATTR_DOMAIN_POINT: {
const GVArraySpan src_span(*src);
bke::attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
using T = decltype(dummy);
copy_data_based_on_vertex_types(
src_span.typed<T>(), dst.span.typed<T>(), vertex_types, keep_boundaries);
});
break;
}
});
dst_attribute.finish();
case ATTR_DOMAIN_EDGE:
bke::attribute_math::gather(*src, new_to_old_edges_map, dst.span);
break;
case ATTR_DOMAIN_FACE: {
const GVArraySpan src_span(*src);
dst.span.take_front(src_span.size()).copy_from(src_span);
bke::attribute_math::convert_to_static_type(data_type, [&](auto dummy) {
using T = decltype(dummy);
if (keep_boundaries) {
copy_data_based_on_pairs(
src_span.typed<T>(), dst.span.typed<T>(), boundary_vertex_to_relevant_face_map);
}
});
break;
}
case ATTR_DOMAIN_CORNER:
bke::attribute_math::gather(*src, new_to_old_face_corners_map, dst.span);
break;
default:
BLI_assert_unreachable();
}
dst.finish();
}
}

Some files were not shown because too many files have changed in this diff Show More