Compare commits

..

1 Commits

Author SHA1 Message Date
e94cd5eeed T94900: Sequence drawing errors on selected platforms.
The VSE only uses an overlay texture to draw to the screen. The
GPUViewport assumes that drivers would clear textures when created, but
they do not on selected platforms. What would lead to drawing from
uncleared memory.

By selecting which layers would contain correct data allows us to use
dummy textures in these cases that would eliminate the drawing
artifacts.
2022-01-26 13:36:52 +01:00
252 changed files with 2939 additions and 7060 deletions

View File

@@ -31,7 +31,7 @@ ExternalProject_Add(external_python_site_packages
CONFIGURE_COMMAND ${PIP_CONFIGURE_COMMAND}
BUILD_COMMAND ""
PREFIX ${BUILD_DIR}/site_packages
INSTALL_COMMAND ${PYTHON_BINARY} -m pip install --no-cache-dir ${SITE_PACKAGES_EXTRA} cython==${CYTHON_VERSION} idna==${IDNA_VERSION} charset-normalizer==${CHARSET_NORMALIZER_VERSION} urllib3==${URLLIB3_VERSION} certifi==${CERTIFI_VERSION} requests==${REQUESTS_VERSION} zstandard==${ZSTANDARD_VERSION} --no-binary :all:
INSTALL_COMMAND ${PYTHON_BINARY} -m pip install ${SITE_PACKAGES_EXTRA} cython==${CYTHON_VERSION} idna==${IDNA_VERSION} charset-normalizer==${CHARSET_NORMALIZER_VERSION} urllib3==${URLLIB3_VERSION} certifi==${CERTIFI_VERSION} requests==${REQUESTS_VERSION} zstandard==${ZSTANDARD_VERSION} --no-binary :all:
)
if(USE_PIP_NUMPY)

View File

@@ -189,11 +189,11 @@ set(OSL_HASH 1abd7ce40481771a9fa937f19595d2f2)
set(OSL_HASH_TYPE MD5)
set(OSL_FILE OpenShadingLanguage-${OSL_VERSION}.tar.gz)
set(PYTHON_VERSION 3.10.2)
set(PYTHON_SHORT_VERSION 3.10)
set(PYTHON_SHORT_VERSION_NO_DOTS 310)
set(PYTHON_VERSION 3.9.7)
set(PYTHON_SHORT_VERSION 3.9)
set(PYTHON_SHORT_VERSION_NO_DOTS 39)
set(PYTHON_URI https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tar.xz)
set(PYTHON_HASH 14e8c22458ed7779a1957b26cde01db9)
set(PYTHON_HASH fddb060b483bc01850a3f412eea1d954)
set(PYTHON_HASH_TYPE MD5)
set(PYTHON_FILE Python-${PYTHON_VERSION}.tar.xz)
@@ -215,20 +215,18 @@ set(NANOVDB_HASH e7b9e863ec2f3b04ead171dec2322807)
set(NANOVDB_HASH_TYPE MD5)
set(NANOVDB_FILE nano-vdb-${NANOVDB_GIT_UID}.tar.gz)
set(IDNA_VERSION 3.3)
set(CHARSET_NORMALIZER_VERSION 2.0.10)
set(URLLIB3_VERSION 1.26.8)
set(IDNA_VERSION 3.2)
set(CHARSET_NORMALIZER_VERSION 2.0.6)
set(URLLIB3_VERSION 1.26.7)
set(CERTIFI_VERSION 2021.10.8)
set(REQUESTS_VERSION 2.27.1)
set(CYTHON_VERSION 0.29.26)
# The version of the zstd library used to build the Python package should match ZSTD_VERSION defined below.
# At this time of writing, 0.17.0 was already released, but built against zstd 1.5.1, while we use 1.5.0.
set(ZSTANDARD_VERSION 0.16.0)
set(REQUESTS_VERSION 2.26.0)
set(CYTHON_VERSION 0.29.24)
set(ZSTANDARD_VERSION 0.15.2 )
set(NUMPY_VERSION 1.22.0)
set(NUMPY_SHORT_VERSION 1.22)
set(NUMPY_VERSION 1.21.2)
set(NUMPY_SHORT_VERSION 1.21)
set(NUMPY_URI https://github.com/numpy/numpy/releases/download/v${NUMPY_VERSION}/numpy-${NUMPY_VERSION}.zip)
set(NUMPY_HASH 252de134862a27bd66705d29622edbfe)
set(NUMPY_HASH 5638d5dae3ca387be562912312db842e)
set(NUMPY_HASH_TYPE MD5)
set(NUMPY_FILE numpy-${NUMPY_VERSION}.zip)

View File

@@ -379,27 +379,27 @@ USE_CXX11=true
CLANG_FORMAT_VERSION_MIN="6.0"
CLANG_FORMAT_VERSION_MEX="10.0"
PYTHON_VERSION="3.10.2"
PYTHON_VERSION_SHORT="3.10"
PYTHON_VERSION_MIN="3.9"
PYTHON_VERSION_MEX="3.12"
PYTHON_VERSION="3.9.7"
PYTHON_VERSION_SHORT="3.9"
PYTHON_VERSION_MIN="3.7"
PYTHON_VERSION_MEX="3.11"
PYTHON_VERSION_INSTALLED=$PYTHON_VERSION_SHORT
PYTHON_FORCE_BUILD=false
PYTHON_FORCE_REBUILD=false
PYTHON_SKIP=false
# Additional Python modules.
PYTHON_IDNA_VERSION="3.3"
PYTHON_IDNA_VERSION="3.2"
PYTHON_IDNA_VERSION_MIN="2.0"
PYTHON_IDNA_VERSION_MEX="4.0"
PYTHON_IDNA_NAME="idna"
PYTHON_CHARSET_NORMALIZER_VERSION="2.0.10"
PYTHON_CHARSET_NORMALIZER_VERSION="2.0.6"
PYTHON_CHARSET_NORMALIZER_VERSION_MIN="2.0.6"
PYTHON_CHARSET_NORMALIZER_VERSION_MEX="2.1.0" # requests uses `charset_normalizer~=2.0.0`
PYTHON_CHARSET_NORMALIZER_NAME="charset-normalizer"
PYTHON_URLLIB3_VERSION="1.26.8"
PYTHON_URLLIB3_VERSION="1.26.7"
PYTHON_URLLIB3_VERSION_MIN="1.0"
PYTHON_URLLIB3_VERSION_MEX="2.0"
PYTHON_URLLIB3_NAME="urllib3"
@@ -409,17 +409,17 @@ PYTHON_CERTIFI_VERSION_MIN="2021.0"
PYTHON_CERTIFI_VERSION_MEX="2023.0"
PYTHON_CERTIFI_NAME="certifi"
PYTHON_REQUESTS_VERSION="2.27.1"
PYTHON_REQUESTS_VERSION="2.23.0"
PYTHON_REQUESTS_VERSION_MIN="2.0"
PYTHON_REQUESTS_VERSION_MEX="3.0"
PYTHON_REQUESTS_NAME="requests"
PYTHON_ZSTANDARD_VERSION="0.16.0"
PYTHON_ZSTANDARD_VERSION="0.15.2"
PYTHON_ZSTANDARD_VERSION_MIN="0.15.2"
PYTHON_ZSTANDARD_VERSION_MEX="0.20.0"
PYTHON_ZSTANDARD_VERSION_MEX="0.16.0"
PYTHON_ZSTANDARD_NAME="zstandard"
PYTHON_NUMPY_VERSION="1.22.0"
PYTHON_NUMPY_VERSION="1.21.2"
PYTHON_NUMPY_VERSION_MIN="1.14"
PYTHON_NUMPY_VERSION_MEX="2.0"
PYTHON_NUMPY_NAME="numpy"
@@ -499,7 +499,7 @@ LLVM_FORCE_REBUILD=false
LLVM_SKIP=false
# OSL needs to be compiled for now!
OSL_VERSION="1.11.17.0"
OSL_VERSION="1.11.14.1"
OSL_VERSION_SHORT="1.11"
OSL_VERSION_MIN="1.11"
OSL_VERSION_MEX="2.0"

View File

@@ -128,20 +128,25 @@ if(WITH_CODEC_SNDFILE)
endif()
if(WITH_PYTHON)
# Use precompiled libraries by default.
set(PYTHON_VERSION 3.10)
# we use precompiled libraries for py 3.9 and up by default
set(PYTHON_VERSION 3.9)
if(NOT WITH_PYTHON_MODULE AND NOT WITH_PYTHON_FRAMEWORK)
# Normally cached but not since we include them with blender.
# normally cached but not since we include them with blender
set(PYTHON_INCLUDE_DIR "${LIBDIR}/python/include/python${PYTHON_VERSION}")
set(PYTHON_EXECUTABLE "${LIBDIR}/python/bin/python${PYTHON_VERSION}")
set(PYTHON_LIBRARY ${LIBDIR}/python/lib/libpython${PYTHON_VERSION}.a)
set(PYTHON_LIBPATH "${LIBDIR}/python/lib/python${PYTHON_VERSION}")
# set(PYTHON_LINKFLAGS "-u _PyMac_Error") # won't build with this enabled
else()
# Module must be compiled against Python framework.
# module must be compiled against Python framework
set(_py_framework "/Library/Frameworks/Python.framework/Versions/${PYTHON_VERSION}")
set(PYTHON_INCLUDE_DIR "${_py_framework}/include/python${PYTHON_VERSION}")
set(PYTHON_EXECUTABLE "${_py_framework}/bin/python${PYTHON_VERSION}")
set(PYTHON_LIBPATH "${_py_framework}/lib/python${PYTHON_VERSION}")
# set(PYTHON_LIBRARY python${PYTHON_VERSION})
# set(PYTHON_LINKFLAGS "-u _PyMac_Error -framework Python") # won't build with this enabled
unset(_py_framework)
endif()

View File

@@ -55,10 +55,6 @@ if(CMAKE_C_COMPILER_ID MATCHES "Clang")
message(WARNING "stripped pdb not supported with clang, disabling..")
set(WITH_WINDOWS_STRIPPED_PDB OFF)
endif()
else()
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 19.28.29921) # MSVC 2019 16.9.16
message(FATAL_ERROR "Compiler is unsupported, MSVC 2019 16.9.16 or newer is required for building blender.")
endif()
endif()
if(NOT WITH_PYTHON_MODULE)
@@ -269,6 +265,12 @@ if(NOT DEFINED LIBDIR)
elseif(MSVC_VERSION GREATER 1919)
message(STATUS "Visual Studio 2019 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
elseif(MSVC_VERSION GREATER 1909)
message(STATUS "Visual Studio 2017 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
elseif(MSVC_VERSION EQUAL 1900)
message(STATUS "Visual Studio 2015 detected.")
set(LIBDIR ${CMAKE_SOURCE_DIR}/../lib/${LIBDIR_BASE}_vc15)
endif()
else()
message(STATUS "Using pre-compiled LIBDIR: ${LIBDIR}")
@@ -463,7 +465,7 @@ if(WITH_JACK)
endif()
if(WITH_PYTHON)
set(PYTHON_VERSION 3.10) # CACHE STRING)
set(PYTHON_VERSION 3.9) # CACHE STRING)
string(REPLACE "." "" _PYTHON_VERSION_NO_DOTS ${PYTHON_VERSION})
set(PYTHON_LIBRARY ${LIBDIR}/python/${_PYTHON_VERSION_NO_DOTS}/libs/python${_PYTHON_VERSION_NO_DOTS}.lib)

View File

@@ -3,6 +3,9 @@ echo No explicit msvc version requested, autodetecting version.
call "%~dp0\detect_msvc2019.cmd"
if %ERRORLEVEL% EQU 0 goto DetectionComplete
call "%~dp0\detect_msvc2017.cmd"
if %ERRORLEVEL% EQU 0 goto DetectionComplete
call "%~dp0\detect_msvc2022.cmd"
if %ERRORLEVEL% EQU 0 goto DetectionComplete

View File

@@ -1,3 +1,4 @@
if "%BUILD_VS_YEAR%"=="2017" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2019" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2022" set BUILD_VS_LIBDIRPOST=vc15

View File

@@ -19,6 +19,12 @@ if "%WITH_PYDEBUG%"=="1" (
set PYDEBUG_CMAKE_ARGS=-DWINDOWS_PYTHON_DEBUG=On
)
if "%BUILD_VS_YEAR%"=="2017" (
set BUILD_GENERATOR_POST=%WINDOWS_ARCH%
) else (
set BUILD_PLATFORM_SELECT=-A %MSBUILD_PLATFORM%
)
set BUILD_CMAKE_ARGS=%BUILD_CMAKE_ARGS% -G "Visual Studio %BUILD_VS_VER% %BUILD_VS_YEAR%%BUILD_GENERATOR_POST%" %BUILD_PLATFORM_SELECT% %TESTS_CMAKE_ARGS% %CLANG_CMAKE_ARGS% %ASAN_CMAKE_ARGS% %PYDEBUG_CMAKE_ARGS%
if NOT EXIST %BUILD_DIR%\nul (

View File

@@ -37,9 +37,15 @@ set LLVM_DIR=
:DetectionComplete
set CC=%LLVM_DIR%\bin\clang-cl
set CXX=%LLVM_DIR%\bin\clang-cl
rem build and tested against 2019 16.2
set CFLAGS=-m64 -fmsc-version=1922
set CXXFLAGS=-m64 -fmsc-version=1922
if "%BUILD_VS_YEAR%" == "2019" (
rem build and tested against 2019 16.2
set CFLAGS=-m64 -fmsc-version=1922
set CXXFLAGS=-m64 -fmsc-version=1922
) else (
rem build and tested against 2017 15.7
set CFLAGS=-m64 -fmsc-version=1914
set CXXFLAGS=-m64 -fmsc-version=1914
)
)
if "%WITH_ASAN%"=="1" (

View File

@@ -0,0 +1,3 @@
set BUILD_VS_VER=15
set BUILD_VS_YEAR=2017
call "%~dp0\detect_msvc_vswhere.cmd"

View File

@@ -50,6 +50,14 @@ if NOT "%1" == "" (
goto ERR
) else if "%1" == "x64" (
set BUILD_ARCH=x64
) else if "%1" == "2017" (
set BUILD_VS_YEAR=2017
) else if "%1" == "2017pre" (
set BUILD_VS_YEAR=2017
set VSWHERE_ARGS=-prerelease
) else if "%1" == "2017b" (
set BUILD_VS_YEAR=2017
set VSWHERE_ARGS=-products Microsoft.VisualStudio.Product.BuildTools
) else if "%1" == "2019" (
set BUILD_VS_YEAR=2019
) else if "%1" == "2019pre" (

View File

@@ -24,12 +24,12 @@ echo - nobuildinfo ^(disable buildinfo^)
echo - debug ^(Build an unoptimized debuggable build^)
echo - packagename [newname] ^(override default cpack package name^)
echo - builddir [newdir] ^(override default build folder^)
echo - 2017 ^(build with visual studio 2017^)
echo - 2017pre ^(build with visual studio 2017 pre-release^)
echo - 2017b ^(build with visual studio 2017 Build Tools^)
echo - 2019 ^(build with visual studio 2019^)
echo - 2019pre ^(build with visual studio 2019 pre-release^)
echo - 2019b ^(build with visual studio 2019 Build Tools^)
echo - 2022 ^(build with visual studio 2022^)
echo - 2022pre ^(build with visual studio 2022 pre-release^)
echo - 2022b ^(build with visual studio 2022 Build Tools^)
echo.
echo Documentation Targets ^(Not associated with building^)

View File

@@ -1,3 +1,4 @@
if "%BUILD_VS_YEAR%"=="2017" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2019" set BUILD_VS_LIBDIRPOST=vc15
if "%BUILD_VS_YEAR%"=="2022" set BUILD_VS_LIBDIRPOST=vc15

View File

@@ -61,26 +61,6 @@ static_assert(Object::MAX_MOTION_STEPS == Geometry::MAX_MOTION_STEPS,
# define IS_HAIR(x) (x & 1)
/* This gets called by Embree at every valid ray/object intersection.
* Things like recording subsurface or shadow hits for later evaluation
* as well as filtering for volume objects happen here.
* Cycles' own BVH does that directly inside the traversal calls.
*/
static void rtc_filter_intersection_func(const RTCFilterFunctionNArguments *args)
{
/* Current implementation in Cycles assumes only single-ray intersection queries. */
assert(args->N == 1);
RTCHit *hit = (RTCHit *)args->hit;
CCLIntersectContext *ctx = ((IntersectContext *)args->context)->userRayExt;
const KernelGlobalsCPU *kg = ctx->kg;
const Ray *cray = ctx->ray;
if (kernel_embree_is_self_intersection(kg, hit, cray)) {
*args->valid = 0;
}
}
/* This gets called by Embree at every valid ray/object intersection.
* Things like recording subsurface or shadow hits for later evaluation
* as well as filtering for volume objects happen here.
@@ -95,16 +75,12 @@ static void rtc_filter_occluded_func(const RTCFilterFunctionNArguments *args)
RTCHit *hit = (RTCHit *)args->hit;
CCLIntersectContext *ctx = ((IntersectContext *)args->context)->userRayExt;
const KernelGlobalsCPU *kg = ctx->kg;
const Ray *cray = ctx->ray;
switch (ctx->type) {
case CCLIntersectContext::RAY_SHADOW_ALL: {
Intersection current_isect;
kernel_embree_convert_hit(kg, ray, hit, &current_isect);
if (intersection_skip_self_shadow(cray->self, current_isect.object, current_isect.prim)) {
*args->valid = 0;
return;
}
/* If no transparent shadows or max number of hits exceeded, all light is blocked. */
const int flags = intersection_get_shader_flags(kg, current_isect.prim, current_isect.type);
if (!(flags & (SD_HAS_TRANSPARENT_SHADOW)) || ctx->num_hits >= ctx->max_hits) {
@@ -184,10 +160,6 @@ static void rtc_filter_occluded_func(const RTCFilterFunctionNArguments *args)
break;
}
}
if (intersection_skip_self_local(cray->self, current_isect.prim)) {
*args->valid = 0;
return;
}
/* No intersection information requested, just return a hit. */
if (ctx->max_hits == 0) {
@@ -253,11 +225,6 @@ static void rtc_filter_occluded_func(const RTCFilterFunctionNArguments *args)
if (ctx->num_hits < ctx->max_hits) {
Intersection current_isect;
kernel_embree_convert_hit(kg, ray, hit, &current_isect);
if (intersection_skip_self(cray->self, current_isect.object, current_isect.prim)) {
*args->valid = 0;
return;
}
Intersection *isect = &ctx->isect_s[ctx->num_hits];
++ctx->num_hits;
*isect = current_isect;
@@ -269,15 +236,12 @@ static void rtc_filter_occluded_func(const RTCFilterFunctionNArguments *args)
}
/* This tells Embree to continue tracing. */
*args->valid = 0;
break;
}
break;
}
case CCLIntersectContext::RAY_REGULAR:
default:
if (kernel_embree_is_self_intersection(kg, hit, cray)) {
*args->valid = 0;
return;
}
/* Nothing to do here. */
break;
}
}
@@ -293,14 +257,6 @@ static void rtc_filter_func_backface_cull(const RTCFilterFunctionNArguments *arg
*args->valid = 0;
return;
}
CCLIntersectContext *ctx = ((IntersectContext *)args->context)->userRayExt;
const KernelGlobalsCPU *kg = ctx->kg;
const Ray *cray = ctx->ray;
if (kernel_embree_is_self_intersection(kg, hit, cray)) {
*args->valid = 0;
}
}
static void rtc_filter_occluded_func_backface_cull(const RTCFilterFunctionNArguments *args)
@@ -549,7 +505,6 @@ void BVHEmbree::add_triangles(const Object *ob, const Mesh *mesh, int i)
rtcSetGeometryUserData(geom_id, (void *)prim_offset);
rtcSetGeometryOccludedFilterFunction(geom_id, rtc_filter_occluded_func);
rtcSetGeometryIntersectFilterFunction(geom_id, rtc_filter_intersection_func);
rtcSetGeometryMask(geom_id, ob->visibility_for_tracing());
rtcCommitGeometry(geom_id);
@@ -812,7 +767,6 @@ void BVHEmbree::add_curves(const Object *ob, const Hair *hair, int i)
rtcSetGeometryUserData(geom_id, (void *)prim_offset);
if (hair->curve_shape == CURVE_RIBBON) {
rtcSetGeometryIntersectFilterFunction(geom_id, rtc_filter_intersection_func);
rtcSetGeometryOccludedFilterFunction(geom_id, rtc_filter_occluded_func);
}
else {

View File

@@ -226,7 +226,7 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
pipeline_options.usesMotionBlur = false;
pipeline_options.traversableGraphFlags =
OPTIX_TRAVERSABLE_GRAPH_FLAG_ALLOW_SINGLE_LEVEL_INSTANCING;
pipeline_options.numPayloadValues = 8;
pipeline_options.numPayloadValues = 6;
pipeline_options.numAttributeValues = 2; /* u, v */
pipeline_options.exceptionFlags = OPTIX_EXCEPTION_FLAG_NONE;
pipeline_options.pipelineLaunchParamsVariableName = "__params"; /* See globals.h */

View File

@@ -173,16 +173,15 @@ ccl_device_intersect bool scene_intersect(KernelGlobals kg,
uint p3 = 0;
uint p4 = visibility;
uint p5 = PRIMITIVE_NONE;
uint p6 = ((uint64_t)ray) & 0xFFFFFFFF;
uint p7 = (((uint64_t)ray) >> 32) & 0xFFFFFFFF;
uint ray_mask = visibility & 0xFF;
uint ray_flags = OPTIX_RAY_FLAG_ENFORCE_ANYHIT;
uint ray_flags = OPTIX_RAY_FLAG_NONE;
if (0 == ray_mask && (visibility & ~0xFF) != 0) {
ray_mask = 0xFF;
ray_flags = OPTIX_RAY_FLAG_ENFORCE_ANYHIT;
}
else if (visibility & PATH_RAY_SHADOW_OPAQUE) {
ray_flags |= OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT;
ray_flags = OPTIX_RAY_FLAG_TERMINATE_ON_FIRST_HIT;
}
optixTrace(scene_intersect_valid(ray) ? kernel_data.bvh.scene : 0,
@@ -201,9 +200,7 @@ ccl_device_intersect bool scene_intersect(KernelGlobals kg,
p2,
p3,
p4,
p5,
p6,
p7);
p5);
isect->t = __uint_as_float(p0);
isect->u = __uint_as_float(p1);
@@ -245,7 +242,6 @@ ccl_device_intersect bool scene_intersect(KernelGlobals kg,
}
MetalRTIntersectionPayload payload;
payload.self = ray->self;
payload.u = 0.0f;
payload.v = 0.0f;
payload.visibility = visibility;
@@ -313,7 +309,6 @@ ccl_device_intersect bool scene_intersect(KernelGlobals kg,
CCLIntersectContext ctx(kg, CCLIntersectContext::RAY_REGULAR);
IntersectContext rtc_ctx(&ctx);
RTCRayHit ray_hit;
ctx.ray = ray;
kernel_embree_setup_rayhit(*ray, ray_hit, visibility);
rtcIntersect1(kernel_data.bvh.scene, &rtc_ctx.context, &ray_hit);
if (ray_hit.hit.geomID != RTC_INVALID_GEOMETRY_ID &&
@@ -361,9 +356,6 @@ ccl_device_intersect bool scene_intersect_local(KernelGlobals kg,
uint p2 = pointer_pack_to_uint_0(local_isect);
uint p3 = pointer_pack_to_uint_1(local_isect);
uint p4 = local_object;
uint p6 = ((uint64_t)ray) & 0xFFFFFFFF;
uint p7 = (((uint64_t)ray) >> 32) & 0xFFFFFFFF;
/* Is set to zero on miss or if ray is aborted, so can be used as return value. */
uint p5 = max_hits;
@@ -387,9 +379,7 @@ ccl_device_intersect bool scene_intersect_local(KernelGlobals kg,
p2,
p3,
p4,
p5,
p6,
p7);
p5);
return p5;
# elif defined(__METALRT__)
@@ -427,7 +417,6 @@ ccl_device_intersect bool scene_intersect_local(KernelGlobals kg,
}
MetalRTIntersectionLocalPayload payload;
payload.self = ray->self;
payload.local_object = local_object;
payload.max_hits = max_hits;
payload.local_isect.num_hits = 0;
@@ -471,7 +460,6 @@ ccl_device_intersect bool scene_intersect_local(KernelGlobals kg,
kg, has_bvh ? CCLIntersectContext::RAY_SSS : CCLIntersectContext::RAY_LOCAL);
ctx.lcg_state = lcg_state;
ctx.max_hits = max_hits;
ctx.ray = ray;
ctx.local_isect = local_isect;
if (local_isect) {
local_isect->num_hits = 0;
@@ -544,8 +532,6 @@ ccl_device_intersect bool scene_intersect_shadow_all(KernelGlobals kg,
uint p3 = max_hits;
uint p4 = visibility;
uint p5 = false;
uint p6 = ((uint64_t)ray) & 0xFFFFFFFF;
uint p7 = (((uint64_t)ray) >> 32) & 0xFFFFFFFF;
uint ray_mask = visibility & 0xFF;
if (0 == ray_mask && (visibility & ~0xFF) != 0) {
@@ -569,9 +555,7 @@ ccl_device_intersect bool scene_intersect_shadow_all(KernelGlobals kg,
p2,
p3,
p4,
p5,
p6,
p7);
p5);
*num_recorded_hits = uint16_unpack_from_uint_0(p2);
*throughput = __uint_as_float(p1);
@@ -604,7 +588,6 @@ ccl_device_intersect bool scene_intersect_shadow_all(KernelGlobals kg,
}
MetalRTIntersectionShadowPayload payload;
payload.self = ray->self;
payload.visibility = visibility;
payload.max_hits = max_hits;
payload.num_hits = 0;
@@ -651,7 +634,6 @@ ccl_device_intersect bool scene_intersect_shadow_all(KernelGlobals kg,
Intersection *isect_array = (Intersection *)state->shadow_isect;
ctx.isect_s = isect_array;
ctx.max_hits = max_hits;
ctx.ray = ray;
IntersectContext rtc_ctx(&ctx);
RTCRay rtc_ray;
kernel_embree_setup_ray(*ray, rtc_ray, visibility);
@@ -703,8 +685,6 @@ ccl_device_intersect bool scene_intersect_volume(KernelGlobals kg,
uint p3 = 0;
uint p4 = visibility;
uint p5 = PRIMITIVE_NONE;
uint p6 = ((uint64_t)ray) & 0xFFFFFFFF;
uint p7 = (((uint64_t)ray) >> 32) & 0xFFFFFFFF;
uint ray_mask = visibility & 0xFF;
if (0 == ray_mask && (visibility & ~0xFF) != 0) {
@@ -728,9 +708,7 @@ ccl_device_intersect bool scene_intersect_volume(KernelGlobals kg,
p2,
p3,
p4,
p5,
p6,
p7);
p5);
isect->t = __uint_as_float(p0);
isect->u = __uint_as_float(p1);
@@ -766,7 +744,6 @@ ccl_device_intersect bool scene_intersect_volume(KernelGlobals kg,
}
MetalRTIntersectionPayload payload;
payload.self = ray->self;
payload.visibility = visibility;
typename metalrt_intersector_type::result_type intersection;
@@ -843,7 +820,6 @@ ccl_device_intersect uint scene_intersect_volume_all(KernelGlobals kg,
ctx.isect_s = isect;
ctx.max_hits = max_hits;
ctx.num_hits = 0;
ctx.ray = ray;
IntersectContext rtc_ctx(&ctx);
RTCRay rtc_ray;
kernel_embree_setup_ray(*ray, rtc_ray, visibility);

View File

@@ -22,8 +22,6 @@
#include "kernel/device/cpu/compat.h"
#include "kernel/device/cpu/globals.h"
#include "kernel/bvh/util.h"
#include "util/vector.h"
CCL_NAMESPACE_BEGIN
@@ -40,9 +38,6 @@ struct CCLIntersectContext {
KernelGlobals kg;
RayType type;
/* For avoiding self intersections */
const Ray *ray;
/* for shadow rays */
Intersection *isect_s;
uint max_hits;
@@ -61,7 +56,6 @@ struct CCLIntersectContext {
{
kg = kg_;
type = type_;
ray = NULL;
max_hits = 1;
num_hits = 0;
num_recorded_hits = 0;
@@ -108,34 +102,7 @@ ccl_device_inline void kernel_embree_setup_rayhit(const Ray &ray,
{
kernel_embree_setup_ray(ray, rayhit.ray, visibility);
rayhit.hit.geomID = RTC_INVALID_GEOMETRY_ID;
rayhit.hit.instID[0] = RTC_INVALID_GEOMETRY_ID;
}
ccl_device_inline bool kernel_embree_is_self_intersection(const KernelGlobals kg,
const RTCHit *hit,
const Ray *ray)
{
bool status = false;
if (hit->instID[0] != RTC_INVALID_GEOMETRY_ID) {
const int oID = hit->instID[0] / 2;
if ((ray->self.object == oID) || (ray->self.light_object == oID)) {
RTCScene inst_scene = (RTCScene)rtcGetGeometryUserData(
rtcGetGeometry(kernel_data.bvh.scene, hit->instID[0]));
const int pID = hit->primID +
(intptr_t)rtcGetGeometryUserData(rtcGetGeometry(inst_scene, hit->geomID));
status = intersection_skip_self_shadow(ray->self, oID, pID);
}
}
else {
const int oID = hit->geomID / 2;
if ((ray->self.object == oID) || (ray->self.light_object == oID)) {
const int pID = hit->primID + (intptr_t)rtcGetGeometryUserData(
rtcGetGeometry(kernel_data.bvh.scene, hit->geomID));
status = intersection_skip_self_shadow(ray->self, oID, pID);
}
}
return status;
rayhit.hit.primID = RTC_INVALID_GEOMETRY_ID;
}
ccl_device_inline void kernel_embree_convert_hit(KernelGlobals kg,

View File

@@ -157,11 +157,7 @@ ccl_device_inline
}
}
/* Skip self intersection. */
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
if (intersection_skip_self_local(ray->self, prim)) {
continue;
}
if (triangle_intersect_local(kg,
local_isect,
@@ -192,11 +188,7 @@ ccl_device_inline
}
}
/* Skip self intersection. */
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
if (intersection_skip_self_local(ray->self, prim)) {
continue;
}
if (motion_triangle_intersect_local(kg,
local_isect,

View File

@@ -15,7 +15,6 @@
*/
struct MetalRTIntersectionPayload {
RaySelfPrimitives self;
uint visibility;
float u, v;
int prim;
@@ -26,7 +25,6 @@ struct MetalRTIntersectionPayload {
};
struct MetalRTIntersectionLocalPayload {
RaySelfPrimitives self;
uint local_object;
uint lcg_state;
short max_hits;
@@ -36,7 +34,6 @@ struct MetalRTIntersectionLocalPayload {
};
struct MetalRTIntersectionShadowPayload {
RaySelfPrimitives self;
uint visibility;
#if defined(__METALRT_MOTION__)
float time;

View File

@@ -160,9 +160,6 @@ ccl_device_inline
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
if (intersection_skip_self_shadow(ray->self, prim_object, prim)) {
continue;
}
switch (type & PRIMITIVE_ALL) {
case PRIMITIVE_TRIANGLE: {

View File

@@ -133,29 +133,35 @@ ccl_device_noinline bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
--stack_ptr;
/* primitive intersection */
for (; prim_addr < prim_addr2; prim_addr++) {
kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
switch (type & PRIMITIVE_ALL) {
case PRIMITIVE_TRIANGLE: {
for (; prim_addr < prim_addr2; prim_addr++) {
kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
const int prim_object = (object == OBJECT_NONE) ?
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
if (intersection_skip_self_shadow(ray->self, prim_object, prim)) {
continue;
}
const int prim_object = (object == OBJECT_NONE) ?
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
switch (type & PRIMITIVE_ALL) {
case PRIMITIVE_TRIANGLE: {
if (triangle_intersect(
kg, isect, P, dir, isect->t, visibility, prim_object, prim, prim_addr)) {
/* shadow ray early termination */
if (visibility & PATH_RAY_SHADOW_OPAQUE)
return true;
}
break;
}
break;
}
#if BVH_FEATURE(BVH_MOTION)
case PRIMITIVE_MOTION_TRIANGLE: {
case PRIMITIVE_MOTION_TRIANGLE: {
for (; prim_addr < prim_addr2; prim_addr++) {
kernel_assert(kernel_tex_fetch(__prim_type, prim_addr) == type);
const int prim_object = (object == OBJECT_NONE) ?
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
if (motion_triangle_intersect(kg,
isect,
P,
@@ -170,21 +176,28 @@ ccl_device_noinline bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
if (visibility & PATH_RAY_SHADOW_OPAQUE)
return true;
}
break;
}
break;
}
#endif /* BVH_FEATURE(BVH_MOTION) */
#if BVH_FEATURE(BVH_HAIR)
case PRIMITIVE_CURVE_THICK:
case PRIMITIVE_MOTION_CURVE_THICK:
case PRIMITIVE_CURVE_RIBBON:
case PRIMITIVE_MOTION_CURVE_RIBBON: {
case PRIMITIVE_CURVE_THICK:
case PRIMITIVE_MOTION_CURVE_THICK:
case PRIMITIVE_CURVE_RIBBON:
case PRIMITIVE_MOTION_CURVE_RIBBON: {
for (; prim_addr < prim_addr2; prim_addr++) {
if ((type & PRIMITIVE_MOTION) && kernel_data.bvh.use_bvh_steps) {
const float2 prim_time = kernel_tex_fetch(__prim_time, prim_addr);
if (ray->time < prim_time.x || ray->time > prim_time.y) {
break;
continue;
}
}
const int prim_object = (object == OBJECT_NONE) ?
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
const int curve_type = kernel_tex_fetch(__prim_type, prim_addr);
const bool hit = curve_intersect(
kg, isect, P, dir, isect->t, prim_object, prim, ray->time, curve_type);
@@ -193,19 +206,26 @@ ccl_device_noinline bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
if (visibility & PATH_RAY_SHADOW_OPAQUE)
return true;
}
break;
}
break;
}
#endif /* BVH_FEATURE(BVH_HAIR) */
#if BVH_FEATURE(BVH_POINTCLOUD)
case PRIMITIVE_POINT:
case PRIMITIVE_MOTION_POINT: {
case PRIMITIVE_POINT:
case PRIMITIVE_MOTION_POINT: {
for (; prim_addr < prim_addr2; prim_addr++) {
if ((type & PRIMITIVE_MOTION) && kernel_data.bvh.use_bvh_steps) {
const float2 prim_time = kernel_tex_fetch(__prim_time, prim_addr);
if (ray->time < prim_time.x || ray->time > prim_time.y) {
break;
continue;
}
}
const int prim_object = (object == OBJECT_NONE) ?
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
const int point_type = kernel_tex_fetch(__prim_type, prim_addr);
const bool hit = point_intersect(
kg, isect, P, dir, isect->t, prim_object, prim, ray->time, point_type);
@@ -214,10 +234,10 @@ ccl_device_noinline bool BVH_FUNCTION_FULL_NAME(BVH)(KernelGlobals kg,
if (visibility & PATH_RAY_SHADOW_OPAQUE)
return true;
}
break;
}
#endif /* BVH_FEATURE(BVH_POINTCLOUD) */
break;
}
#endif /* BVH_FEATURE(BVH_POINTCLOUD) */
}
}
else {

View File

@@ -21,22 +21,54 @@ CCL_NAMESPACE_BEGIN
/* Ray offset to avoid self intersection.
*
* This function should be used to compute a modified ray start position for
* rays leaving from a surface. This is from "A Fast and Robust Method for Avoiding
* Self-Intersection" see https://research.nvidia.com/publication/2019-03_A-Fast-and
*/
* rays leaving from a surface. */
ccl_device_inline float3 ray_offset(float3 P, float3 Ng)
{
const float int_scale = 256.0f;
int3 of_i = make_int3((int)(int_scale * Ng.x), (int)(int_scale * Ng.y), (int)(int_scale * Ng.z));
#ifdef __INTERSECTION_REFINE__
const float epsilon_f = 1e-5f;
/* ideally this should match epsilon_f, but instancing and motion blur
* precision makes it problematic */
const float epsilon_test = 1.0f;
const int epsilon_i = 32;
float3 p_i = make_float3(__int_as_float(__float_as_int(P.x) + ((P.x < 0) ? -of_i.x : of_i.x)),
__int_as_float(__float_as_int(P.y) + ((P.y < 0) ? -of_i.y : of_i.y)),
__int_as_float(__float_as_int(P.z) + ((P.z < 0) ? -of_i.z : of_i.z)));
const float origin = 1.0f / 32.0f;
const float float_scale = 1.0f / 65536.0f;
return make_float3(fabsf(P.x) < origin ? P.x + float_scale * Ng.x : p_i.x,
fabsf(P.y) < origin ? P.y + float_scale * Ng.y : p_i.y,
fabsf(P.z) < origin ? P.z + float_scale * Ng.z : p_i.z);
float3 res;
/* x component */
if (fabsf(P.x) < epsilon_test) {
res.x = P.x + Ng.x * epsilon_f;
}
else {
uint ix = __float_as_uint(P.x);
ix += ((ix ^ __float_as_uint(Ng.x)) >> 31) ? -epsilon_i : epsilon_i;
res.x = __uint_as_float(ix);
}
/* y component */
if (fabsf(P.y) < epsilon_test) {
res.y = P.y + Ng.y * epsilon_f;
}
else {
uint iy = __float_as_uint(P.y);
iy += ((iy ^ __float_as_uint(Ng.y)) >> 31) ? -epsilon_i : epsilon_i;
res.y = __uint_as_float(iy);
}
/* z component */
if (fabsf(P.z) < epsilon_test) {
res.z = P.z + Ng.z * epsilon_f;
}
else {
uint iz = __float_as_uint(P.z);
iz += ((iz ^ __float_as_uint(Ng.z)) >> 31) ? -epsilon_i : epsilon_i;
res.z = __uint_as_float(iz);
}
return res;
#else
const float epsilon_f = 1e-4f;
return P + epsilon_f * Ng;
#endif
}
#if defined(__KERNEL_CPU__)
@@ -195,25 +227,4 @@ ccl_device_inline float intersection_curve_shadow_transparency(KernelGlobals kg,
return (1.0f - u) * f0 + u * f1;
}
ccl_device_inline bool intersection_skip_self(ccl_private const RaySelfPrimitives &self,
const int object,
const int prim)
{
return (self.prim == prim) && (self.object == object);
}
ccl_device_inline bool intersection_skip_self_shadow(ccl_private const RaySelfPrimitives &self,
const int object,
const int prim)
{
return ((self.prim == prim) && (self.object == object)) ||
((self.light_prim == prim) && (self.light_object == object));
}
ccl_device_inline bool intersection_skip_self_local(ccl_private const RaySelfPrimitives &self,
const int prim)
{
return (self.prim == prim);
}
CCL_NAMESPACE_END

View File

@@ -144,9 +144,6 @@ ccl_device_inline
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
if (intersection_skip_self(ray->self, prim_object, prim)) {
continue;
}
int object_flag = kernel_tex_fetch(__object_flag, prim_object);
if ((object_flag & SD_OBJECT_HAS_VOLUME) == 0) {
@@ -167,9 +164,6 @@ ccl_device_inline
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
if (intersection_skip_self(ray->self, prim_object, prim)) {
continue;
}
int object_flag = kernel_tex_fetch(__object_flag, prim_object);
if ((object_flag & SD_OBJECT_HAS_VOLUME) == 0) {
continue;

View File

@@ -147,9 +147,6 @@ ccl_device_inline
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
if (intersection_skip_self(ray->self, prim_object, prim)) {
continue;
}
int object_flag = kernel_tex_fetch(__object_flag, prim_object);
if ((object_flag & SD_OBJECT_HAS_VOLUME) == 0) {
continue;
@@ -191,9 +188,6 @@ ccl_device_inline
kernel_tex_fetch(__prim_object, prim_addr) :
object;
const int prim = kernel_tex_fetch(__prim_index, prim_addr);
if (intersection_skip_self(ray->self, prim_object, prim)) {
continue;
}
int object_flag = kernel_tex_fetch(__object_flag, prim_object);
if ((object_flag & SD_OBJECT_HAS_VOLUME) == 0) {
continue;

View File

@@ -40,27 +40,6 @@ struct TriangleIntersectionResult
enum { METALRT_HIT_TRIANGLE, METALRT_HIT_BOUNDING_BOX };
ccl_device_inline bool intersection_skip_self(ray_data const RaySelfPrimitives& self,
const int object,
const int prim)
{
return (self.prim == prim) && (self.object == object);
}
ccl_device_inline bool intersection_skip_self_shadow(ray_data const RaySelfPrimitives& self,
const int object,
const int prim)
{
return ((self.prim == prim) && (self.object == object)) ||
((self.light_prim == prim) && (self.light_object == object));
}
ccl_device_inline bool intersection_skip_self_local(ray_data const RaySelfPrimitives& self,
const int prim)
{
return (self.prim == prim);
}
template<typename TReturn, uint intersection_type>
TReturn metalrt_local_hit(constant KernelParamsMetal &launch_params_metal,
ray_data MetalKernelContext::MetalRTIntersectionLocalPayload &payload,
@@ -74,8 +53,8 @@ TReturn metalrt_local_hit(constant KernelParamsMetal &launch_params_metal,
#ifdef __BVH_LOCAL__
uint prim = primitive_id + kernel_tex_fetch(__object_prim_offset, object);
if ((object != payload.local_object) || intersection_skip_self_local(payload.self, prim)) {
/* Only intersect with matching object and skip self-intersecton. */
if (object != payload.local_object) {
/* Only intersect with matching object */
result.accept = false;
result.continue_search = true;
return result;
@@ -187,11 +166,6 @@ bool metalrt_shadow_all_hit(constant KernelParamsMetal &launch_params_metal,
}
# endif
if (intersection_skip_self_shadow(payload.self, object, prim)) {
/* continue search */
return true;
}
float u = 0.0f, v = 0.0f;
int type = 0;
if (intersection_type == METALRT_HIT_TRIANGLE) {
@@ -348,35 +322,21 @@ inline TReturnType metalrt_visibility_test(constant KernelParamsMetal &launch_pa
}
# endif
uint visibility = payload.visibility;
# ifdef __VISIBILITY_FLAG__
uint visibility = payload.visibility;
if ((kernel_tex_fetch(__objects, object).visibility & visibility) == 0) {
result.accept = false;
result.continue_search = true;
return result;
}
# endif
/* Shadow ray early termination. */
if (visibility & PATH_RAY_SHADOW_OPAQUE) {
if (intersection_skip_self_shadow(payload.self, object, prim)) {
result.accept = false;
result.continue_search = true;
return result;
}
else {
result.accept = true;
result.continue_search = false;
return result;
}
}
else {
if (intersection_skip_self(payload.self, object, prim)) {
result.accept = false;
result.continue_search = true;
return result;
}
result.accept = true;
result.continue_search = false;
return result;
}
# endif
result.accept = true;
result.continue_search = true;

View File

@@ -45,11 +45,6 @@ template<typename T> ccl_device_forceinline T *get_payload_ptr_2()
return pointer_unpack_from_uint<T>(optixGetPayload_2(), optixGetPayload_3());
}
template<typename T> ccl_device_forceinline T *get_payload_ptr_6()
{
return (T *)(((uint64_t)optixGetPayload_7() << 32) | optixGetPayload_6());
}
ccl_device_forceinline int get_object_id()
{
#ifdef __OBJECT_MOTION__
@@ -116,12 +111,6 @@ extern "C" __global__ void __anyhit__kernel_optix_local_hit()
return optixIgnoreIntersection();
}
const int prim = optixGetPrimitiveIndex();
ccl_private Ray *const ray = get_payload_ptr_6<Ray>();
if (intersection_skip_self_local(ray->self, prim)) {
return optixIgnoreIntersection();
}
const uint max_hits = optixGetPayload_5();
if (max_hits == 0) {
/* Special case for when no hit information is requested, just report that something was hit */
@@ -160,6 +149,8 @@ extern "C" __global__ void __anyhit__kernel_optix_local_hit()
local_isect->num_hits = 1;
}
const int prim = optixGetPrimitiveIndex();
Intersection *isect = &local_isect->hits[hit];
isect->t = optixGetRayTmax();
isect->prim = prim;
@@ -194,11 +185,6 @@ extern "C" __global__ void __anyhit__kernel_optix_shadow_all_hit()
}
# endif
ccl_private Ray *const ray = get_payload_ptr_6<Ray>();
if (intersection_skip_self_shadow(ray->self, object, prim)) {
return optixIgnoreIntersection();
}
float u = 0.0f, v = 0.0f;
int type = 0;
if (optixIsTriangleHit()) {
@@ -328,12 +314,6 @@ extern "C" __global__ void __anyhit__kernel_optix_volume_test()
if ((kernel_tex_fetch(__object_flag, object) & SD_OBJECT_HAS_VOLUME) == 0) {
return optixIgnoreIntersection();
}
const int prim = optixGetPrimitiveIndex();
ccl_private Ray *const ray = get_payload_ptr_6<Ray>();
if (intersection_skip_self(ray->self, object, prim)) {
return optixIgnoreIntersection();
}
}
extern "C" __global__ void __anyhit__kernel_optix_visibility_test()
@@ -350,31 +330,18 @@ extern "C" __global__ void __anyhit__kernel_optix_visibility_test()
# endif
#endif
#ifdef __VISIBILITY_FLAG__
const uint object = get_object_id();
const uint visibility = optixGetPayload_4();
#ifdef __VISIBILITY_FLAG__
if ((kernel_tex_fetch(__objects, object).visibility & visibility) == 0) {
return optixIgnoreIntersection();
}
#endif
const int prim = optixGetPrimitiveIndex();
ccl_private Ray *const ray = get_payload_ptr_6<Ray>();
/* Shadow ray early termination. */
if (visibility & PATH_RAY_SHADOW_OPAQUE) {
if (intersection_skip_self_shadow(ray->self, object, prim)) {
return optixIgnoreIntersection();
}
else {
/* Shadow ray early termination. */
return optixTerminateRay();
}
}
else {
if (intersection_skip_self(ray->self, object, prim)) {
return optixIgnoreIntersection();
}
return optixTerminateRay();
}
#endif
}
extern "C" __global__ void __closesthit__kernel_optix_hit()

View File

@@ -29,19 +29,46 @@
CCL_NAMESPACE_BEGIN
/**
* Use the barycentric coordinates to get the intersection location
/* Refine triangle intersection to more precise hit point. For rays that travel
* far the precision is often not so good, this reintersects the primitive from
* a closer distance.
*/
ccl_device_inline float3 motion_triangle_point_from_uv(KernelGlobals kg,
ccl_private ShaderData *sd,
const int isect_object,
const int isect_prim,
const float u,
const float v,
float3 verts[3])
ccl_device_inline float3 motion_triangle_refine(KernelGlobals kg,
ccl_private ShaderData *sd,
float3 P,
float3 D,
float t,
const int isect_object,
const int isect_prim,
float3 verts[3])
{
float w = 1.0f - u - v;
float3 P = u * verts[0] + v * verts[1] + w * verts[2];
#ifdef __INTERSECTION_REFINE__
if (!(sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
if (UNLIKELY(t == 0.0f)) {
return P;
}
const Transform tfm = object_get_inverse_transform(kg, sd);
P = transform_point(&tfm, P);
D = transform_direction(&tfm, D * t);
D = normalize_len(D, &t);
}
P = P + D * t;
/* Compute refined intersection distance. */
const float3 e1 = verts[0] - verts[2];
const float3 e2 = verts[1] - verts[2];
const float3 s1 = cross(D, e2);
const float invdivisor = 1.0f / dot(s1, e1);
const float3 d = P - verts[2];
const float3 s2 = cross(d, e1);
float rt = dot(e2, s2) * invdivisor;
/* Compute refined position. */
P = P + D * rt;
if (!(sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
const Transform tfm = object_get_transform(kg, sd);
@@ -49,8 +76,71 @@ ccl_device_inline float3 motion_triangle_point_from_uv(KernelGlobals kg,
}
return P;
#else
return P + D * t;
#endif
}
/* Same as above, except that t is assumed to be in object space
* for instancing.
*/
#ifdef __BVH_LOCAL__
# if defined(__KERNEL_CUDA__) && (defined(i386) || defined(_M_IX86))
ccl_device_noinline
# else
ccl_device_inline
# endif
float3
motion_triangle_refine_local(KernelGlobals kg,
ccl_private ShaderData *sd,
float3 P,
float3 D,
float t,
const int isect_object,
const int isect_prim,
float3 verts[3])
{
# if defined(__KERNEL_GPU_RAYTRACING__)
/* t is always in world space with OptiX and MetalRT. */
return motion_triangle_refine(kg, sd, P, D, t, isect_object, isect_prim, verts);
# else
# ifdef __INTERSECTION_REFINE__
if (!(sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
const Transform tfm = object_get_inverse_transform(kg, sd);
P = transform_point(&tfm, P);
D = transform_direction(&tfm, D);
D = normalize(D);
}
P = P + D * t;
/* compute refined intersection distance */
const float3 e1 = verts[0] - verts[2];
const float3 e2 = verts[1] - verts[2];
const float3 s1 = cross(D, e2);
const float invdivisor = 1.0f / dot(s1, e1);
const float3 d = P - verts[2];
const float3 s2 = cross(d, e1);
float rt = dot(e2, s2) * invdivisor;
P = P + D * rt;
if (!(sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
const Transform tfm = object_get_transform(kg, sd);
P = transform_point(&tfm, P);
}
return P;
# else /* __INTERSECTION_REFINE__ */
return P + D * t;
# endif /* __INTERSECTION_REFINE__ */
# endif
}
#endif /* __BVH_LOCAL__ */
/* Ray intersection. We simply compute the vertex positions at the given ray
* time and do a ray intersection with the resulting triangle.
*/

View File

@@ -68,7 +68,15 @@ ccl_device_noinline void motion_triangle_shader_setup(KernelGlobals kg,
verts[1] = (1.0f - t) * verts[1] + t * next_verts[1];
verts[2] = (1.0f - t) * verts[2] + t * next_verts[2];
/* Compute refined position. */
sd->P = motion_triangle_point_from_uv(kg, sd, isect_object, isect_prim, sd->u, sd->v, verts);
#ifdef __BVH_LOCAL__
if (is_local) {
sd->P = motion_triangle_refine_local(kg, sd, P, D, ray_t, isect_object, isect_prim, verts);
}
else
#endif /* __BVH_LOCAL__*/
{
sd->P = motion_triangle_refine(kg, sd, P, D, ray_t, isect_object, isect_prim, verts);
}
/* Compute face normal. */
float3 Ng;
if (sd->object_flag & SD_OBJECT_NEGATIVE_SCALE_APPLIED) {

View File

@@ -89,7 +89,7 @@ ccl_device_inline void shader_setup_from_ray(KernelGlobals kg,
sd->shader = kernel_tex_fetch(__tri_shader, sd->prim);
/* vectors */
sd->P = triangle_point_from_uv(kg, sd, isect->object, isect->prim, isect->u, isect->v);
sd->P = triangle_refine(kg, sd, ray->P, ray->D, isect->t, isect->object, isect->prim);
sd->Ng = Ng;
sd->N = Ng;

View File

@@ -142,23 +142,58 @@ ccl_device_inline bool triangle_intersect_local(KernelGlobals kg,
}
#endif /* __BVH_LOCAL__ */
/**
* Use the barycentric coordinates to get the intersection location
/* Refine triangle intersection to more precise hit point. For rays that travel
* far the precision is often not so good, this reintersects the primitive from
* a closer distance. */
/* Reintersections uses the paper:
*
* Tomas Moeller
* Fast, minimum storage ray/triangle intersection
* http://www.cs.virginia.edu/~gfx/Courses/2003/ImageSynthesis/papers/Acceleration/Fast%20MinimumStorage%20RayTriangle%20Intersection.pdf
*/
ccl_device_inline float3 triangle_point_from_uv(KernelGlobals kg,
ccl_private ShaderData *sd,
const int isect_object,
const int isect_prim,
const float u,
const float v)
ccl_device_inline float3 triangle_refine(KernelGlobals kg,
ccl_private ShaderData *sd,
float3 P,
float3 D,
float t,
const int isect_object,
const int isect_prim)
{
#ifdef __INTERSECTION_REFINE__
if (!(sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
if (UNLIKELY(t == 0.0f)) {
return P;
}
const Transform tfm = object_get_inverse_transform(kg, sd);
P = transform_point(&tfm, P);
D = transform_direction(&tfm, D * t);
D = normalize_len(D, &t);
}
P = P + D * t;
const uint tri_vindex = kernel_tex_fetch(__tri_vindex, isect_prim).w;
const packed_float3 tri_a = kernel_tex_fetch(__tri_verts, tri_vindex + 0),
tri_b = kernel_tex_fetch(__tri_verts, tri_vindex + 1),
tri_c = kernel_tex_fetch(__tri_verts, tri_vindex + 2);
float w = 1.0f - u - v;
float3 P = u * tri_a + v * tri_b + w * tri_c;
float3 edge1 = make_float3(tri_a.x - tri_c.x, tri_a.y - tri_c.y, tri_a.z - tri_c.z);
float3 edge2 = make_float3(tri_b.x - tri_c.x, tri_b.y - tri_c.y, tri_b.z - tri_c.z);
float3 tvec = make_float3(P.x - tri_c.x, P.y - tri_c.y, P.z - tri_c.z);
float3 qvec = cross(tvec, edge1);
float3 pvec = cross(D, edge2);
float det = dot(edge1, pvec);
if (det != 0.0f) {
/* If determinant is zero it means ray lies in the plane of
* the triangle. It is possible in theory due to watertight
* nature of triangle intersection. For such cases we simply
* don't refine intersection hoping it'll go all fine.
*/
float rt = dot(edge2, qvec) / det;
P = P + D * rt;
}
if (!(sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
const Transform tfm = object_get_transform(kg, sd);
@@ -166,6 +201,65 @@ ccl_device_inline float3 triangle_point_from_uv(KernelGlobals kg,
}
return P;
#else
return P + D * t;
#endif
}
/* Same as above, except that t is assumed to be in object space for
* instancing.
*/
ccl_device_inline float3 triangle_refine_local(KernelGlobals kg,
ccl_private ShaderData *sd,
float3 P,
float3 D,
float t,
const int isect_object,
const int isect_prim)
{
#if defined(__KERNEL_GPU_RAYTRACING__)
/* t is always in world space with OptiX and MetalRT. */
return triangle_refine(kg, sd, P, D, t, isect_object, isect_prim);
#else
if (!(sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
const Transform tfm = object_get_inverse_transform(kg, sd);
P = transform_point(&tfm, P);
D = transform_direction(&tfm, D);
D = normalize(D);
}
P = P + D * t;
# ifdef __INTERSECTION_REFINE__
const uint tri_vindex = kernel_tex_fetch(__tri_vindex, isect_prim).w;
const packed_float3 tri_a = kernel_tex_fetch(__tri_verts, tri_vindex + 0),
tri_b = kernel_tex_fetch(__tri_verts, tri_vindex + 1),
tri_c = kernel_tex_fetch(__tri_verts, tri_vindex + 2);
float3 edge1 = make_float3(tri_a.x - tri_c.x, tri_a.y - tri_c.y, tri_a.z - tri_c.z);
float3 edge2 = make_float3(tri_b.x - tri_c.x, tri_b.y - tri_c.y, tri_b.z - tri_c.z);
float3 tvec = make_float3(P.x - tri_c.x, P.y - tri_c.y, P.z - tri_c.z);
float3 qvec = cross(tvec, edge1);
float3 pvec = cross(D, edge2);
float det = dot(edge1, pvec);
if (det != 0.0f) {
/* If determinant is zero it means ray lies in the plane of
* the triangle. It is possible in theory due to watertight
* nature of triangle intersection. For such cases we simply
* don't refine intersection hoping it'll go all fine.
*/
float rt = dot(edge2, qvec) / det;
P = P + D * rt;
}
# endif /* __INTERSECTION_REFINE__ */
if (!(sd->object_flag & SD_OBJECT_TRANSFORM_APPLIED)) {
const Transform tfm = object_get_transform(kg, sd);
P = transform_point(&tfm, P);
}
return P;
#endif
}
CCL_NAMESPACE_END

View File

@@ -328,12 +328,6 @@ ccl_device void integrator_intersect_closest(KernelGlobals kg,
/* Scene Intersection. */
Intersection isect ccl_optional_struct_init;
isect.object = OBJECT_NONE;
isect.prim = PRIM_NONE;
ray.self.object = last_isect_object;
ray.self.prim = last_isect_prim;
ray.self.light_object = OBJECT_NONE;
ray.self.light_prim = PRIM_NONE;
bool hit = scene_intersect(kg, &ray, visibility, &isect);
/* TODO: remove this and do it in the various intersection functions instead. */

View File

@@ -156,10 +156,7 @@ ccl_device void integrator_intersect_shadow(KernelGlobals kg, IntegratorShadowSt
/* Read ray from integrator state into local memory. */
Ray ray ccl_optional_struct_init;
integrator_state_read_shadow_ray(kg, state, &ray);
ray.self.object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 0, object);
ray.self.prim = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 0, prim);
ray.self.light_object = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 1, object);
ray.self.light_prim = INTEGRATOR_STATE_ARRAY(state, shadow_isect, 1, prim);
/* Compute visibility. */
const uint visibility = integrate_intersect_shadow_visibility(kg, state);

View File

@@ -38,10 +38,7 @@ ccl_device void integrator_volume_stack_update_for_subsurface(KernelGlobals kg,
Ray volume_ray ccl_optional_struct_init;
volume_ray.P = from_P;
volume_ray.D = normalize_len(to_P - from_P, &volume_ray.t);
volume_ray.self.object = INTEGRATOR_STATE(state, isect, object);
volume_ray.self.prim = INTEGRATOR_STATE(state, isect, prim);
volume_ray.self.light_object = OBJECT_NONE;
volume_ray.self.light_prim = PRIM_NONE;
/* Store to avoid global fetches on every intersection step. */
const uint volume_stack_size = kernel_data.volume_stack_size;
@@ -71,7 +68,7 @@ ccl_device void integrator_volume_stack_update_for_subsurface(KernelGlobals kg,
volume_stack_enter_exit(kg, state, stack_sd);
/* Move ray forward. */
volume_ray.P = stack_sd->P;
volume_ray.P = ray_offset(stack_sd->P, -stack_sd->Ng);
if (volume_ray.t != FLT_MAX) {
volume_ray.D = normalize_len(to_P - volume_ray.P, &volume_ray.t);
}
@@ -94,10 +91,6 @@ ccl_device void integrator_volume_stack_init(KernelGlobals kg, IntegratorState s
* fewest hits. */
volume_ray.D = make_float3(0.0f, 0.0f, 1.0f);
volume_ray.t = FLT_MAX;
volume_ray.self.object = OBJECT_NONE;
volume_ray.self.prim = PRIM_NONE;
volume_ray.self.light_object = OBJECT_NONE;
volume_ray.self.light_prim = PRIM_NONE;
int stack_index = 0, enclosed_index = 0;
@@ -210,7 +203,7 @@ ccl_device void integrator_volume_stack_init(KernelGlobals kg, IntegratorState s
}
/* Move ray forward. */
volume_ray.P = stack_sd->P;
volume_ray.P = ray_offset(stack_sd->P, -stack_sd->Ng);
++step;
}
#endif

View File

@@ -37,9 +37,8 @@ ccl_device_inline void integrate_light(KernelGlobals kg,
/* Advance ray beyond light. */
/* TODO: can we make this more numerically robust to avoid reintersecting the
* same light in some cases? Ray should not intersect surface anymore as the
* object and prim ids will prevent self intersection. */
const float3 new_ray_P = ray_P + ray_D * isect.t;
* same light in some cases? */
const float3 new_ray_P = ray_offset(ray_P + ray_D * isect.t, ray_D);
INTEGRATOR_STATE_WRITE(state, ray, P) = new_ray_P;
INTEGRATOR_STATE_WRITE(state, ray, t) -= isect.t;
@@ -47,7 +46,7 @@ ccl_device_inline void integrate_light(KernelGlobals kg,
const float mis_ray_t = INTEGRATOR_STATE(state, path, mis_ray_t);
ray_P -= ray_D * mis_ray_t;
isect.t += mis_ray_t;
INTEGRATOR_STATE_WRITE(state, path, mis_ray_t) = isect.t;
INTEGRATOR_STATE_WRITE(state, path, mis_ray_t) = mis_ray_t + isect.t;
LightSample ls ccl_optional_struct_init;
const bool use_light_sample = light_sample_from_intersection(kg, &isect, ray_P, ray_D, &ls);

View File

@@ -83,10 +83,7 @@ ccl_device_inline void integrate_transparent_volume_shadow(KernelGlobals kg,
/* Setup shader data. */
Ray ray ccl_optional_struct_init;
integrator_state_read_shadow_ray(kg, state, &ray);
ray.self.object = OBJECT_NONE;
ray.self.prim = PRIM_NONE;
ray.self.light_object = OBJECT_NONE;
ray.self.light_prim = PRIM_NONE;
/* Modify ray position and length to match current segment. */
const float start_t = (hit == 0) ? 0.0f :
INTEGRATOR_STATE_ARRAY(state, shadow_isect, hit - 1, t);
@@ -152,7 +149,7 @@ ccl_device_inline bool integrate_transparent_shadow(KernelGlobals kg,
const float last_hit_t = INTEGRATOR_STATE_ARRAY(state, shadow_isect, num_recorded_hits - 1, t);
const float3 ray_P = INTEGRATOR_STATE(state, shadow_ray, P);
const float3 ray_D = INTEGRATOR_STATE(state, shadow_ray, D);
INTEGRATOR_STATE_WRITE(state, shadow_ray, P) = ray_P + last_hit_t * ray_D;
INTEGRATOR_STATE_WRITE(state, shadow_ray, P) = ray_offset(ray_P + last_hit_t * ray_D, ray_D);
INTEGRATOR_STATE_WRITE(state, shadow_ray, t) -= last_hit_t;
}

View File

@@ -182,11 +182,6 @@ ccl_device_forceinline void integrate_surface_direct_light(KernelGlobals kg,
/* Write shadow ray and associated state to global memory. */
integrator_state_write_shadow_ray(kg, shadow_state, &ray);
// Save memory by storing the light and object indices in the shadow_isect
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, object) = ray.self.light_object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, prim) = ray.self.light_prim;
/* Copy state from main path to shadow path. */
const uint16_t bounce = INTEGRATOR_STATE(state, path, bounce);
@@ -271,11 +266,13 @@ ccl_device_forceinline int integrate_surface_bsdf_bssrdf_bounce(
}
/* Setup ray. Note that clipping works through transparent bounces. */
INTEGRATOR_STATE_WRITE(state, ray, P) = sd->P;
INTEGRATOR_STATE_WRITE(state, ray, P) = ray_offset(sd->P,
(label & LABEL_TRANSMIT) ? -sd->Ng : sd->Ng);
INTEGRATOR_STATE_WRITE(state, ray, D) = normalize(bsdf_omega_in);
INTEGRATOR_STATE_WRITE(state, ray, t) = (label & LABEL_TRANSPARENT) ?
INTEGRATOR_STATE(state, ray, t) - sd->ray_length :
FLT_MAX;
#ifdef __RAY_DIFFERENTIALS__
INTEGRATOR_STATE_WRITE(state, ray, dP) = differential_make_compact(sd->dP);
INTEGRATOR_STATE_WRITE(state, ray, dD) = differential_make_compact(bsdf_domega_in);
@@ -319,7 +316,7 @@ ccl_device_forceinline bool integrate_surface_volume_only_bounce(IntegratorState
}
/* Setup ray position, direction stays unchanged. */
INTEGRATOR_STATE_WRITE(state, ray, P) = sd->P;
INTEGRATOR_STATE_WRITE(state, ray, P) = ray_offset(sd->P, -sd->Ng);
/* Clipping works through transparent. */
INTEGRATOR_STATE_WRITE(state, ray, t) -= sd->ray_length;
@@ -363,14 +360,10 @@ ccl_device_forceinline void integrate_surface_ao(KernelGlobals kg,
}
Ray ray ccl_optional_struct_init;
ray.P = sd->P;
ray.P = ray_offset(sd->P, sd->Ng);
ray.D = ao_D;
ray.t = kernel_data.integrator.ao_bounces_distance;
ray.time = sd->time;
ray.self.object = sd->object;
ray.self.prim = sd->prim;
ray.self.light_object = OBJECT_NONE;
ray.self.light_prim = PRIM_NONE;
ray.dP = differential_zero_compact();
ray.dD = differential_zero_compact();
@@ -382,10 +375,6 @@ ccl_device_forceinline void integrate_surface_ao(KernelGlobals kg,
/* Write shadow ray and associated state to global memory. */
integrator_state_write_shadow_ray(kg, shadow_state, &ray);
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, object) = ray.self.light_object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, prim) = ray.self.light_prim;
/* Copy state from main path to shadow path. */
const uint16_t bounce = INTEGRATOR_STATE(state, path, bounce);

View File

@@ -791,10 +791,6 @@ ccl_device_forceinline void integrate_volume_direct_light(
/* Write shadow ray and associated state to global memory. */
integrator_state_write_shadow_ray(kg, shadow_state, &ray);
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, object) = ray.self.object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 0, prim) = ray.self.prim;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, object) = ray.self.light_object;
INTEGRATOR_STATE_ARRAY_WRITE(shadow_state, shadow_isect, 1, prim) = ray.self.light_prim;
/* Copy state from main path to shadow path. */
const uint16_t bounce = INTEGRATOR_STATE(state, path, bounce);
@@ -877,13 +873,11 @@ ccl_device_forceinline bool integrate_volume_phase_scatter(
INTEGRATOR_STATE_WRITE(state, ray, P) = sd->P;
INTEGRATOR_STATE_WRITE(state, ray, D) = normalize(phase_omega_in);
INTEGRATOR_STATE_WRITE(state, ray, t) = FLT_MAX;
# ifdef __RAY_DIFFERENTIALS__
INTEGRATOR_STATE_WRITE(state, ray, dP) = differential_make_compact(sd->dP);
INTEGRATOR_STATE_WRITE(state, ray, dD) = differential_make_compact(phase_domega_in);
# endif
// Save memory by storing last hit prim and object in isect
INTEGRATOR_STATE_WRITE(state, isect, prim) = sd->prim;
INTEGRATOR_STATE_WRITE(state, isect, object) = sd->object;
/* Update throughput. */
const float3 throughput = INTEGRATOR_STATE(state, path, throughput);

View File

@@ -61,7 +61,6 @@ KERNEL_STRUCT_MEMBER(shadow_ray, packed_float3, D, KERNEL_FEATURE_PATH_TRACING)
KERNEL_STRUCT_MEMBER(shadow_ray, float, t, KERNEL_FEATURE_PATH_TRACING)
KERNEL_STRUCT_MEMBER(shadow_ray, float, time, KERNEL_FEATURE_PATH_TRACING)
KERNEL_STRUCT_MEMBER(shadow_ray, float, dP, KERNEL_FEATURE_PATH_TRACING)
KERNEL_STRUCT_MEMBER(shadow_ray, int, object, KERNEL_FEATURE_PATH_TRACING)
KERNEL_STRUCT_END(shadow_ray)
/*********************** Shadow Intersection result **************************/

View File

@@ -57,6 +57,7 @@ ccl_device int subsurface_bounce(KernelGlobals kg,
/* Pass along object info, reusing isect to save memory. */
INTEGRATOR_STATE_WRITE(state, subsurface, Ng) = sd->Ng;
INTEGRATOR_STATE_WRITE(state, isect, object) = sd->object;
uint32_t path_flag = (INTEGRATOR_STATE(state, path, flag) & ~PATH_RAY_CAMERA) |
((sc->type == CLOSURE_BSSRDF_BURLEY_ID) ? PATH_RAY_SUBSURFACE_DISK :
@@ -164,8 +165,10 @@ ccl_device_inline bool subsurface_scatter(KernelGlobals kg, IntegratorState stat
if (object_flag & SD_OBJECT_INTERSECTS_VOLUME) {
float3 P = INTEGRATOR_STATE(state, ray, P);
const float3 Ng = INTEGRATOR_STATE(state, subsurface, Ng);
const float3 offset_P = ray_offset(P, -Ng);
integrator_volume_stack_update_for_subsurface(kg, state, P, ray.P);
integrator_volume_stack_update_for_subsurface(kg, state, offset_P, ray.P);
}
}
# endif /* __VOLUME__ */

View File

@@ -99,10 +99,6 @@ ccl_device_inline bool subsurface_disk(KernelGlobals kg,
ray.dP = ray_dP;
ray.dD = differential_zero_compact();
ray.time = time;
ray.self.object = OBJECT_NONE;
ray.self.prim = PRIM_NONE;
ray.self.light_object = OBJECT_NONE;
ray.self.light_prim = OBJECT_NONE;
/* Intersect with the same object. if multiple intersections are found it
* will use at most BSSRDF_MAX_HITS hits, a random subset of all hits. */

View File

@@ -195,7 +195,6 @@ ccl_device_inline bool subsurface_random_walk(KernelGlobals kg,
const float time = INTEGRATOR_STATE(state, ray, time);
const float3 Ng = INTEGRATOR_STATE(state, subsurface, Ng);
const int object = INTEGRATOR_STATE(state, isect, object);
const int prim = INTEGRATOR_STATE(state, isect, prim);
/* Sample diffuse surface scatter into the object. */
float3 D;
@@ -206,16 +205,12 @@ ccl_device_inline bool subsurface_random_walk(KernelGlobals kg,
}
/* Setup ray. */
ray.P = P;
ray.P = ray_offset(P, -Ng);
ray.D = D;
ray.t = FLT_MAX;
ray.time = time;
ray.dP = ray_dP;
ray.dD = differential_zero_compact();
ray.self.object = object;
ray.self.prim = prim;
ray.self.light_object = OBJECT_NONE;
ray.self.light_prim = PRIM_NONE;
#ifndef __KERNEL_GPU_RAYTRACING__
/* Compute or fetch object transforms. */
@@ -382,15 +377,7 @@ ccl_device_inline bool subsurface_random_walk(KernelGlobals kg,
* If yes, we will later use backwards guided sampling in order to have a decent
* chance of connecting to it.
* TODO: Maybe use less than 10 times the mean free path? */
if (bounce == 0) {
ray.t = max(t, 10.0f / (min3(sigma_t)));
}
else {
ray.t = t;
/* After the first bounce the object can intersect the same surface again */
ray.self.object = OBJECT_NONE;
ray.self.prim = PRIM_NONE;
}
ray.t = (bounce == 0) ? max(t, 10.0f / (min3(sigma_t))) : t;
scene_intersect_local(kg, &ray, &ss_isect, object, NULL, 1);
hit = (ss_isect.num_hits > 0);
@@ -421,6 +408,13 @@ ccl_device_inline bool subsurface_random_walk(KernelGlobals kg,
if (hit) {
t = ray.t;
}
else if (bounce == 0) {
/* Restore original position if nothing was hit after the first bounce,
* without the ray_offset() that was added to avoid self-intersection.
* Otherwise if that offset is relatively large compared to the scattering
* radius, we never go back up high enough to exit the surface. */
ray.P = P;
}
/* Advance to new scatter location. */
ray.P += t * ray.D;

View File

@@ -418,8 +418,8 @@ ccl_device bool light_sample_from_intersection(KernelGlobals kg,
LightType type = (LightType)klight->type;
ls->type = type;
ls->shader = klight->shader_id;
ls->object = isect->object;
ls->prim = isect->prim;
ls->object = PRIM_NONE;
ls->prim = PRIM_NONE;
ls->lamp = lamp;
/* todo: missing texture coordinates */
ls->t = isect->t;

View File

@@ -198,7 +198,7 @@ ccl_device_inline float3 shadow_ray_offset(KernelGlobals kg,
float NL = dot(sd->N, L);
bool transmit = (NL < 0.0f);
float3 Ng = (transmit ? -sd->Ng : sd->Ng);
float3 P = sd->P;
float3 P = ray_offset(sd->P, Ng);
if ((sd->type & PRIMITIVE_TRIANGLE) && (sd->shader & SHADER_SMOOTH_NORMAL)) {
const float offset_cutoff =
@@ -243,7 +243,7 @@ ccl_device_inline void shadow_ray_setup(ccl_private const ShaderData *ccl_restri
}
else {
/* other lights, avoid self-intersection */
ray->D = ls->P - P;
ray->D = ray_offset(ls->P, ls->Ng) - P;
ray->D = normalize_len(ray->D, &ray->t);
}
}
@@ -257,12 +257,6 @@ ccl_device_inline void shadow_ray_setup(ccl_private const ShaderData *ccl_restri
ray->dP = differential_make_compact(sd->dP);
ray->dD = differential_zero_compact();
ray->time = sd->time;
/* Fill in intersection surface and light details. */
ray->self.prim = sd->prim;
ray->self.object = sd->object;
ray->self.light_prim = ls->prim;
ray->self.light_object = ls->object;
}
/* Create shadow ray towards light sample. */

View File

@@ -70,14 +70,10 @@ ccl_device float svm_ao(
/* Create ray. */
Ray ray;
ray.P = sd->P;
ray.P = ray_offset(sd->P, N);
ray.D = D.x * T + D.y * B + D.z * N;
ray.t = max_dist;
ray.time = sd->time;
ray.self.object = sd->object;
ray.self.prim = sd->prim;
ray.self.light_object = OBJECT_NONE;
ray.self.light_prim = PRIM_NONE;
ray.dP = differential_zero_compact();
ray.dD = differential_zero_compact();

View File

@@ -196,10 +196,6 @@ ccl_device float3 svm_bevel(
ray.dP = differential_zero_compact();
ray.dD = differential_zero_compact();
ray.time = sd->time;
ray.self.object = OBJECT_NONE;
ray.self.prim = PRIM_NONE;
ray.self.light_object = OBJECT_NONE;
ray.self.light_prim = PRIM_NONE;
/* Intersect with the same object. if multiple intersections are found it
* will use at most LOCAL_MAX_HITS hits, a random subset of all hits. */
@@ -211,24 +207,15 @@ ccl_device float3 svm_bevel(
/* Quickly retrieve P and Ng without setting up ShaderData. */
float3 hit_P;
if (sd->type == PRIMITIVE_TRIANGLE) {
hit_P = triangle_point_from_uv(kg,
sd,
isect.hits[hit].object,
isect.hits[hit].prim,
isect.hits[hit].u,
isect.hits[hit].v);
hit_P = triangle_refine_local(
kg, sd, ray.P, ray.D, ray.t, isect.hits[hit].object, isect.hits[hit].prim);
}
# ifdef __OBJECT_MOTION__
else if (sd->type == PRIMITIVE_MOTION_TRIANGLE) {
float3 verts[3];
motion_triangle_vertices(kg, sd->object, isect.hits[hit].prim, sd->time, verts);
hit_P = motion_triangle_point_from_uv(kg,
sd,
isect.hits[hit].object,
isect.hits[hit].prim,
isect.hits[hit].u,
isect.hits[hit].v,
verts);
hit_P = motion_triangle_refine_local(
kg, sd, ray.P, ray.D, ray.t, isect.hits[hit].object, isect.hits[hit].prim, verts);
}
# endif /* __OBJECT_MOTION__ */

View File

@@ -512,21 +512,12 @@ typedef struct differential {
/* Ray */
typedef struct RaySelfPrimitives {
int prim; /* Primitive the ray is starting from */
int object; /* Instance prim is a part of */
int light_prim; /* Light primitive */
int light_object; /* Light object */
} RaySelfPrimitives;
typedef struct Ray {
float3 P; /* origin */
float3 D; /* direction */
float t; /* length of the ray */
float time; /* time (for motion blur) */
RaySelfPrimitives self;
#ifdef __RAY_DIFFERENTIALS__
float dP;
float dD;

View File

@@ -761,15 +761,6 @@ class ASSETBROWSER_PT_metadata_preview(asset_utils.AssetMetaDataPanel, Panel):
col.operator("ed.lib_id_load_custom_preview", icon='FILEBROWSER', text="")
col.separator()
col.operator("ed.lib_id_generate_preview", icon='FILE_REFRESH', text="")
col.menu("ASSETBROWSER_MT_metadata_preview_menu", icon='DOWNARROW_HLT', text="")
class ASSETBROWSER_MT_metadata_preview_menu(bpy.types.Menu):
bl_label = "Preview"
def draw(self, context):
layout = self.layout
layout.operator("ed.lib_id_generate_preview_from_object", text="Render Active Object")
class ASSETBROWSER_PT_metadata_tags(asset_utils.AssetMetaDataPanel, Panel):
@@ -849,7 +840,6 @@ classes = (
ASSETBROWSER_MT_view,
ASSETBROWSER_MT_select,
ASSETBROWSER_MT_edit,
ASSETBROWSER_MT_metadata_preview_menu,
ASSETBROWSER_PT_metadata,
ASSETBROWSER_PT_metadata_preview,
ASSETBROWSER_PT_metadata_tags,

View File

@@ -46,7 +46,7 @@ void *BKE_camera_add(struct Main *bmain, const char *name);
/**
* Get the camera's DOF value, takes the DOF object into account.
*/
float BKE_camera_object_dof_distance(const struct Object *ob);
float BKE_camera_object_dof_distance(struct Object *ob);
int BKE_camera_sensor_fit(int sensor_fit, float sizex, float sizey);
float BKE_camera_sensor_size(int sensor_fit, float sensor_x, float sensor_y);

View File

@@ -436,12 +436,6 @@ int CustomData_get_render_layer(const struct CustomData *data, int type);
int CustomData_get_clone_layer(const struct CustomData *data, int type);
int CustomData_get_stencil_layer(const struct CustomData *data, int type);
/**
* Returns name of the active layer of the given type or NULL
* if no such active layer is defined.
*/
const char *CustomData_get_active_layer_name(const struct CustomData *data, int type);
/**
* Copies the data from source to the data element at index in the first layer of type
* no effect if there is no layer of type.

View File

@@ -24,8 +24,6 @@
#include "BLI_utildefines.h"
#include "BLI_rect.h"
#ifdef __cplusplus
extern "C" {
#endif
@@ -563,7 +561,6 @@ struct GPUTexture *BKE_image_get_gpu_tilemap(struct Image *image,
* Is the alpha of the `GPUTexture` for a given image/ibuf premultiplied.
*/
bool BKE_image_has_gpu_texture_premultiplied_alpha(struct Image *image, struct ImBuf *ibuf);
/**
* Partial update of texture for texture painting.
* This is often much quicker than fully updating the texture for high resolution images.
@@ -575,14 +572,8 @@ void BKE_image_update_gputexture(
* The next time the #GPUTexture is used these tiles will be refreshes. This saves time
* when writing to the same place multiple times This happens for during foreground rendering.
*/
void BKE_image_update_gputexture_delayed(struct Image *ima,
struct ImageTile *image_tile,
struct ImBuf *ibuf,
int x,
int y,
int w,
int h);
void BKE_image_update_gputexture_delayed(
struct Image *ima, struct ImBuf *ibuf, int x, int y, int w, int h);
/**
* Called on entering and exiting texture paint mode,
* temporary disabling/enabling mipmapping on all images for quick texture
@@ -600,32 +591,6 @@ bool BKE_image_remove_renderslot(struct Image *ima, struct ImageUser *iuser, int
struct RenderSlot *BKE_image_get_renderslot(struct Image *ima, int index);
bool BKE_image_clear_renderslot(struct Image *ima, struct ImageUser *iuser, int slot);
/* --- image_partial_update.cc --- */
/** Image partial updates. */
struct PartialUpdateUser;
/**
* \brief Create a new PartialUpdateUser. An Object that contains data to use partial updates.
*/
struct PartialUpdateUser *BKE_image_partial_update_create(const struct Image *image);
/**
* \brief free a partial update user.
*/
void BKE_image_partial_update_free(struct PartialUpdateUser *user);
/* --- partial updater (image side) --- */
struct PartialUpdateRegister;
void BKE_image_partial_update_register_free(struct Image *image);
/** \brief Mark a region of the image to update. */
void BKE_image_partial_update_mark_region(struct Image *image,
const struct ImageTile *image_tile,
const struct ImBuf *image_buffer,
const rcti *updated_region);
/** \brief Mark the whole image to be updated. */
void BKE_image_partial_update_mark_full_update(struct Image *image);
#ifdef __cplusplus
}
#endif

View File

@@ -1,298 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2021, Blender Foundation.
*/
/** \file
* \ingroup bke
*
* To reduce the overhead of image processing this file contains a mechanism to detect areas of the
* image that are changed. These areas are organized in chunks. Changes that happen over time are
* organized in changesets.
*
* A common usecase is to update GPUTexture for drawing where only that part is uploaded that only
* changed.
*/
#pragma once
#include "BLI_utildefines.h"
#include "BLI_rect.h"
#include "DNA_image_types.h"
extern "C" {
struct PartialUpdateUser;
struct PartialUpdateRegister;
}
namespace blender::bke::image {
using TileNumber = int;
namespace partial_update {
/* --- image_partial_update.cc --- */
/** Image partial updates. */
/**
* \brief Result codes of #BKE_image_partial_update_collect_changes.
*/
enum class ePartialUpdateCollectResult {
/** \brief Unable to construct partial updates. Caller should perform a full update. */
FullUpdateNeeded,
/** \brief No changes detected since the last time requested. */
NoChangesDetected,
/** \brief Changes detected since the last time requested. */
PartialChangesDetected,
};
/**
* \brief A region to update.
*
* Data is organized in tiles. These tiles are in texel space (1 unit is a single texel). When
* tiles are requested they are merged with neighboring tiles.
*/
struct PartialUpdateRegion {
/** \brief region of the image that has been updated. Region can be bigger than actual changes.
*/
struct rcti region;
/**
* \brief Tile number (UDIM) that this region belongs to.
*/
TileNumber tile_number;
};
/**
* \brief Return codes of #BKE_image_partial_update_get_next_change.
*/
enum class ePartialUpdateIterResult {
/** \brief no tiles left when iterating over tiles. */
Finished = 0,
/** \brief a chunk was available and has been loaded. */
ChangeAvailable = 1,
};
/**
* \brief collect the partial update since the last request.
*
* Invoke #BKE_image_partial_update_get_next_change to iterate over the collected tiles.
*
* \returns ePartialUpdateCollectResult::FullUpdateNeeded: called should not use partial updates
* but recalculate the full image. This result can be expected when called for the first time for a
* user and when it isn't possible to reconstruct the changes as the internal state doesn't have
* enough data stored. ePartialUpdateCollectResult::NoChangesDetected: The have been no changes
* detected since last invoke for the same user.
* ePartialUpdateCollectResult::PartialChangesDetected: Parts of the image has been updated since
* last invoke for the same user. The changes can be read by using
* #BKE_image_partial_update_get_next_change.
*/
ePartialUpdateCollectResult BKE_image_partial_update_collect_changes(
struct Image *image, struct PartialUpdateUser *user);
ePartialUpdateIterResult BKE_image_partial_update_get_next_change(
struct PartialUpdateUser *user, struct PartialUpdateRegion *r_region);
/** \brief Abstract class to load tile data when using the PartialUpdateChecker. */
class AbstractTileData {
protected:
virtual ~AbstractTileData() = default;
public:
/**
* \brief Load the data for the given tile_number.
*
* Invoked when changes are on a different tile compared to the previous tile..
*/
virtual void init_data(TileNumber tile_number) = 0;
/**
* \brief Unload the data that has been loaded.
*
* Invoked when changes are on a different tile compared to the previous tile or when finished
* iterating over the changes.
*/
virtual void free_data() = 0;
};
/**
* \brief Class to not load any tile specific data when iterating over changes.
*/
class NoTileData : AbstractTileData {
public:
NoTileData(Image *UNUSED(image), ImageUser *UNUSED(image_user))
{
}
void init_data(TileNumber UNUSED(new_tile_number)) override
{
}
void free_data() override
{
}
};
/**
* \brief Load the ImageTile and ImBuf associated with the partial change.
*/
class ImageTileData : AbstractTileData {
public:
/**
* \brief Not owned Image that is being iterated over.
*/
Image *image;
/**
* \brief Local copy of the image user.
*
* The local copy is required so we don't change the image user of the caller.
* We need to change it in order to request data for a specific tile.
*/
ImageUser image_user = {0};
/**
* \brief ImageTile associated with the loaded tile.
* Data is not owned by this instance but by the `image`.
*/
ImageTile *tile = nullptr;
/**
* \brief ImBuf of the loaded tile.
*
* Can be nullptr when the file doesn't exist or when the tile hasn't been initialized.
*/
ImBuf *tile_buffer = nullptr;
ImageTileData(Image *image, ImageUser *image_user) : image(image)
{
if (image_user != nullptr) {
this->image_user = *image_user;
}
}
void init_data(TileNumber new_tile_number) override
{
image_user.tile = new_tile_number;
tile = BKE_image_get_tile(image, new_tile_number);
tile_buffer = BKE_image_acquire_ibuf(image, &image_user, NULL);
}
void free_data() override
{
BKE_image_release_ibuf(image, tile_buffer, nullptr);
tile = nullptr;
tile_buffer = nullptr;
}
};
template<typename TileData = NoTileData> struct PartialUpdateChecker {
/**
* \brief Not owned Image that is being iterated over.
*/
Image *image;
ImageUser *image_user;
/**
* \brief the collected changes are stored inside the PartialUpdateUser.
*/
PartialUpdateUser *user;
struct CollectResult {
PartialUpdateChecker<TileData> *checker;
/**
* \brief Tile specific data.
*/
TileData tile_data;
PartialUpdateRegion changed_region;
ePartialUpdateCollectResult result_code;
private:
TileNumber last_tile_number;
public:
CollectResult(PartialUpdateChecker<TileData> *checker, ePartialUpdateCollectResult result_code)
: checker(checker),
tile_data(checker->image, checker->image_user),
result_code(result_code)
{
}
const ePartialUpdateCollectResult get_result_code() const
{
return result_code;
}
/**
* \brief Load the next changed region.
*
* This member function can only be called when partial changes are detected.
* (`get_result_code()` returns `ePartialUpdateCollectResult::PartialChangesDetected`).
*
* When changes for another tile than the previous tile is loaded the #tile_data will be
* updated.
*/
ePartialUpdateIterResult get_next_change()
{
BLI_assert(result_code == ePartialUpdateCollectResult::PartialChangesDetected);
ePartialUpdateIterResult result = BKE_image_partial_update_get_next_change(checker->user,
&changed_region);
switch (result) {
case ePartialUpdateIterResult::Finished:
tile_data.free_data();
return result;
case ePartialUpdateIterResult::ChangeAvailable:
if (last_tile_number == changed_region.tile_number) {
return result;
}
tile_data.free_data();
tile_data.init_data(changed_region.tile_number);
last_tile_number = changed_region.tile_number;
return result;
default:
BLI_assert_unreachable();
return result;
}
}
};
public:
PartialUpdateChecker(Image *image, ImageUser *image_user, PartialUpdateUser *user)
: image(image), image_user(image_user), user(user)
{
}
/**
* \brief Check for new changes since the last time this method was invoked for this #user.
*/
CollectResult collect_changes()
{
ePartialUpdateCollectResult collect_result = BKE_image_partial_update_collect_changes(image,
user);
return CollectResult(this, collect_result);
}
};
} // namespace partial_update
} // namespace blender::bke::image

View File

@@ -165,7 +165,6 @@ set(SRC
intern/idprop_utils.c
intern/idtype.c
intern/image.c
intern/image_partial_update.cc
intern/image_gen.c
intern/image_gpu.cc
intern/image_save.c
@@ -823,7 +822,6 @@ if(WITH_GTESTS)
intern/cryptomatte_test.cc
intern/fcurve_test.cc
intern/idprop_serialize_test.cc
intern/image_partial_update_test.cc
intern/lattice_deform_test.cc
intern/layer_test.cc
intern/lib_id_remapper_test.cc

View File

@@ -218,7 +218,7 @@ void *BKE_camera_add(Main *bmain, const char *name)
return cam;
}
float BKE_camera_object_dof_distance(const Object *ob)
float BKE_camera_object_dof_distance(Object *ob)
{
Camera *cam = (Camera *)ob->data;
if (ob->type != OB_CAMERA) {

View File

@@ -66,8 +66,8 @@ static void vert_extrude_to_mesh_data(const Spline &spline,
if (spline.is_cyclic() && spline.evaluated_edges_size() > 1) {
MEdge &edge = r_edges[edge_offset + spline.evaluated_edges_size() - 1];
edge.v1 = vert_offset + eval_size - 1;
edge.v2 = vert_offset;
edge.v1 = vert_offset;
edge.v2 = vert_offset + eval_size - 1;
edge.flag = ME_LOOSEEDGE;
}

View File

@@ -2427,13 +2427,6 @@ int CustomData_get_stencil_layer(const CustomData *data, int type)
return (layer_index != -1) ? data->layers[layer_index].active_mask : -1;
}
const char *CustomData_get_active_layer_name(const struct CustomData *data, const int type)
{
/* Get the layer index of the active layer of this type. */
const int layer_index = CustomData_get_active_layer_index(data, type);
return layer_index < 0 ? NULL : data->layers[layer_index].name;
}
void CustomData_set_layer_active(CustomData *data, int type, int n)
{
for (int i = 0; i < data->totlayer; i++) {

View File

@@ -134,22 +134,6 @@ static void image_runtime_reset_on_copy(struct Image *image)
{
image->runtime.cache_mutex = MEM_mallocN(sizeof(ThreadMutex), "image runtime cache_mutex");
BLI_mutex_init(image->runtime.cache_mutex);
image->runtime.partial_update_register = NULL;
image->runtime.partial_update_user = NULL;
}
static void image_runtime_free_data(struct Image *image)
{
BLI_mutex_end(image->runtime.cache_mutex);
MEM_freeN(image->runtime.cache_mutex);
image->runtime.cache_mutex = NULL;
if (image->runtime.partial_update_user != NULL) {
BKE_image_partial_update_free(image->runtime.partial_update_user);
image->runtime.partial_update_user = NULL;
}
BKE_image_partial_update_register_free(image);
}
static void image_init_data(ID *id)
@@ -229,8 +213,10 @@ static void image_free_data(ID *id)
BKE_previewimg_free(&image->preview);
BLI_freelistN(&image->tiles);
BLI_freelistN(&image->gpu_refresh_areas);
image_runtime_free_data(image);
BLI_mutex_end(image->runtime.cache_mutex);
MEM_freeN(image->runtime.cache_mutex);
}
static void image_foreach_cache(ID *id,
@@ -335,8 +321,7 @@ static void image_blend_write(BlendWriter *writer, ID *id, const void *id_addres
ima->cache = NULL;
ima->gpuflag = 0;
BLI_listbase_clear(&ima->anims);
ima->runtime.partial_update_register = NULL;
ima->runtime.partial_update_user = NULL;
BLI_listbase_clear(&ima->gpu_refresh_areas);
for (int i = 0; i < 3; i++) {
for (int j = 0; j < 2; j++) {
for (int resolution = 0; resolution < IMA_TEXTURE_RESOLUTION_LEN; resolution++) {
@@ -416,6 +401,7 @@ static void image_blend_read_data(BlendDataReader *reader, ID *id)
ima->lastused = 0;
ima->gpuflag = 0;
BLI_listbase_clear(&ima->gpu_refresh_areas);
image_runtime_reset(ima);
}

View File

@@ -38,7 +38,6 @@
#include "BKE_global.h"
#include "BKE_image.h"
#include "BKE_image_partial_update.hh"
#include "BKE_main.h"
#include "GPU_capabilities.h"
@@ -47,10 +46,6 @@
#include "PIL_time.h"
using namespace blender::bke::image::partial_update;
extern "C" {
/* Prototypes. */
static void gpu_free_unused_buffers();
static void image_free_gpu(Image *ima, const bool immediate);
@@ -58,6 +53,14 @@ static void image_free_gpu_limited_scale(Image *ima);
static void image_update_gputexture_ex(
Image *ima, ImageTile *tile, ImBuf *ibuf, int x, int y, int w, int h);
/* Internal structs. */
#define IMA_PARTIAL_REFRESH_TILE_SIZE 256
struct ImagePartialRefresh {
struct ImagePartialRefresh *next, *prev;
int tile_x;
int tile_y;
};
bool BKE_image_has_gpu_texture_premultiplied_alpha(Image *image, ImBuf *ibuf)
{
if (image) {
@@ -334,48 +337,6 @@ static void image_update_reusable_textures(Image *ima,
}
}
static void image_gpu_texture_partial_update_changes_available(
Image *image, PartialUpdateChecker<ImageTileData>::CollectResult &changes)
{
while (changes.get_next_change() == ePartialUpdateIterResult::ChangeAvailable) {
const int tile_offset_x = changes.changed_region.region.xmin;
const int tile_offset_y = changes.changed_region.region.ymin;
const int tile_width = min_ii(changes.tile_data.tile_buffer->x,
BLI_rcti_size_x(&changes.changed_region.region));
const int tile_height = min_ii(changes.tile_data.tile_buffer->y,
BLI_rcti_size_y(&changes.changed_region.region));
image_update_gputexture_ex(image,
changes.tile_data.tile,
changes.tile_data.tile_buffer,
tile_offset_x,
tile_offset_y,
tile_width,
tile_height);
}
}
static void image_gpu_texture_try_partial_update(Image *image, ImageUser *iuser)
{
PartialUpdateChecker<ImageTileData> checker(image, iuser, image->runtime.partial_update_user);
PartialUpdateChecker<ImageTileData>::CollectResult changes = checker.collect_changes();
switch (changes.get_result_code()) {
case ePartialUpdateCollectResult::FullUpdateNeeded: {
image_free_gpu(image, true);
break;
}
case ePartialUpdateCollectResult::PartialChangesDetected: {
image_gpu_texture_partial_update_changes_available(image, changes);
break;
}
case ePartialUpdateCollectResult::NoChangesDetected: {
/* GPUTextures are up to date. */
break;
}
}
}
static GPUTexture *image_get_gpu_texture(Image *ima,
ImageUser *iuser,
ImBuf *ibuf,
@@ -409,20 +370,31 @@ static GPUTexture *image_get_gpu_texture(Image *ima,
}
#undef GPU_FLAGS_TO_CHECK
/* TODO(jbakker): We should replace the IMA_GPU_REFRESH flag with a call to
* BKE_image-partial_update_mark_full_update. Although the flag is quicker it leads to double
* administration. */
if ((ima->gpuflag & IMA_GPU_REFRESH) != 0) {
BKE_image_partial_update_mark_full_update(ima);
ima->gpuflag &= ~IMA_GPU_REFRESH;
/* Check if image has been updated and tagged to be updated (full or partial). */
ImageTile *tile = BKE_image_get_tile(ima, 0);
if (((ima->gpuflag & IMA_GPU_REFRESH) != 0) ||
((ibuf == nullptr || tile == nullptr) && ((ima->gpuflag & IMA_GPU_PARTIAL_REFRESH) != 0))) {
image_free_gpu(ima, true);
BLI_freelistN(&ima->gpu_refresh_areas);
ima->gpuflag &= ~(IMA_GPU_REFRESH | IMA_GPU_PARTIAL_REFRESH);
}
if (ima->runtime.partial_update_user == nullptr) {
ima->runtime.partial_update_user = BKE_image_partial_update_create(ima);
else if (ima->gpuflag & IMA_GPU_PARTIAL_REFRESH) {
BLI_assert(ibuf);
BLI_assert(tile);
ImagePartialRefresh *refresh_area;
while ((
refresh_area = static_cast<ImagePartialRefresh *>(BLI_pophead(&ima->gpu_refresh_areas)))) {
const int tile_offset_x = refresh_area->tile_x * IMA_PARTIAL_REFRESH_TILE_SIZE;
const int tile_offset_y = refresh_area->tile_y * IMA_PARTIAL_REFRESH_TILE_SIZE;
const int tile_width = MIN2(IMA_PARTIAL_REFRESH_TILE_SIZE, ibuf->x - tile_offset_x);
const int tile_height = MIN2(IMA_PARTIAL_REFRESH_TILE_SIZE, ibuf->y - tile_offset_y);
image_update_gputexture_ex(
ima, tile, ibuf, tile_offset_x, tile_offset_y, tile_width, tile_height);
MEM_freeN(refresh_area);
}
ima->gpuflag &= ~IMA_GPU_PARTIAL_REFRESH;
}
image_gpu_texture_try_partial_update(ima, iuser);
/* Tag as in active use for garbage collector. */
BKE_image_tag_time(ima);
@@ -445,7 +417,6 @@ static GPUTexture *image_get_gpu_texture(Image *ima,
/* Check if we have a valid image. If not, we return a dummy
* texture with zero bind-code so we don't keep trying. */
ImageTile *tile = BKE_image_get_tile(ima, 0);
if (tile == nullptr) {
*tex = image_gpu_texture_error_create(textarget);
return *tex;
@@ -456,7 +427,8 @@ static GPUTexture *image_get_gpu_texture(Image *ima,
if (ibuf_intern == nullptr) {
ibuf_intern = BKE_image_acquire_ibuf(ima, iuser, nullptr);
if (ibuf_intern == nullptr) {
return image_gpu_texture_error_create(textarget);
*tex = image_gpu_texture_error_create(textarget);
return *tex;
}
}
@@ -505,12 +477,13 @@ static GPUTexture *image_get_gpu_texture(Image *ima,
break;
}
if (*tex) {
GPU_texture_orig_size_set(*tex, ibuf_intern->x, ibuf_intern->y);
/* if `ibuf` was given, we don't own the `ibuf_intern` */
if (ibuf == nullptr) {
BKE_image_release_ibuf(ima, ibuf_intern, nullptr);
}
if (ibuf != ibuf_intern) {
BKE_image_release_ibuf(ima, ibuf_intern, nullptr);
if (*tex) {
GPU_texture_orig_size_set(*tex, ibuf_intern->x, ibuf_intern->y);
}
return *tex;
@@ -930,33 +903,87 @@ static void image_update_gputexture_ex(
void BKE_image_update_gputexture(Image *ima, ImageUser *iuser, int x, int y, int w, int h)
{
ImageTile *image_tile = BKE_image_get_tile_from_iuser(ima, iuser);
ImBuf *ibuf = BKE_image_acquire_ibuf(ima, iuser, nullptr);
BKE_image_update_gputexture_delayed(ima, image_tile, ibuf, x, y, w, h);
ImageTile *tile = BKE_image_get_tile_from_iuser(ima, iuser);
if ((ibuf == nullptr) || (w == 0) || (h == 0)) {
/* Full reload of texture. */
BKE_image_free_gputextures(ima);
}
image_update_gputexture_ex(ima, tile, ibuf, x, y, w, h);
BKE_image_release_ibuf(ima, ibuf, nullptr);
}
/* Mark areas on the GPUTexture that needs to be updated. The areas are marked in chunks.
* The next time the GPUTexture is used these tiles will be refreshes. This saves time
* when writing to the same place multiple times This happens for during foreground
* rendering. */
void BKE_image_update_gputexture_delayed(struct Image *ima,
struct ImageTile *image_tile,
struct ImBuf *ibuf,
int x,
int y,
int w,
int h)
void BKE_image_update_gputexture_delayed(
struct Image *ima, struct ImBuf *ibuf, int x, int y, int w, int h)
{
/* Check for full refresh. */
if (ibuf != nullptr && ima->source != IMA_SRC_TILED && x == 0 && y == 0 && w == ibuf->x &&
h == ibuf->y) {
BKE_image_partial_update_mark_full_update(ima);
if (ibuf && x == 0 && y == 0 && w == ibuf->x && h == ibuf->y) {
ima->gpuflag |= IMA_GPU_REFRESH;
}
/* Check if we can promote partial refresh to a full refresh. */
if ((ima->gpuflag & (IMA_GPU_REFRESH | IMA_GPU_PARTIAL_REFRESH)) ==
(IMA_GPU_REFRESH | IMA_GPU_PARTIAL_REFRESH)) {
ima->gpuflag &= ~IMA_GPU_PARTIAL_REFRESH;
BLI_freelistN(&ima->gpu_refresh_areas);
}
/* Image is already marked for complete refresh. */
if (ima->gpuflag & IMA_GPU_REFRESH) {
return;
}
/* Schedule the tiles that covers the requested area. */
const int start_tile_x = x / IMA_PARTIAL_REFRESH_TILE_SIZE;
const int start_tile_y = y / IMA_PARTIAL_REFRESH_TILE_SIZE;
const int end_tile_x = (x + w) / IMA_PARTIAL_REFRESH_TILE_SIZE;
const int end_tile_y = (y + h) / IMA_PARTIAL_REFRESH_TILE_SIZE;
const int num_tiles_x = (end_tile_x + 1) - (start_tile_x);
const int num_tiles_y = (end_tile_y + 1) - (start_tile_y);
const int num_tiles = num_tiles_x * num_tiles_y;
const bool allocate_on_heap = BLI_BITMAP_SIZE(num_tiles) > 16;
BLI_bitmap *requested_tiles = nullptr;
if (allocate_on_heap) {
requested_tiles = BLI_BITMAP_NEW(num_tiles, __func__);
}
else {
rcti dirty_region;
BLI_rcti_init(&dirty_region, x, x + w, y, y + h);
BKE_image_partial_update_mark_region(ima, image_tile, ibuf, &dirty_region);
requested_tiles = BLI_BITMAP_NEW_ALLOCA(num_tiles);
}
/* Mark the tiles that have already been requested. They don't need to be requested again. */
int num_tiles_not_scheduled = num_tiles;
LISTBASE_FOREACH (ImagePartialRefresh *, area, &ima->gpu_refresh_areas) {
if (area->tile_x < start_tile_x || area->tile_x > end_tile_x || area->tile_y < start_tile_y ||
area->tile_y > end_tile_y) {
continue;
}
int requested_tile_index = (area->tile_x - start_tile_x) +
(area->tile_y - start_tile_y) * num_tiles_x;
BLI_BITMAP_ENABLE(requested_tiles, requested_tile_index);
num_tiles_not_scheduled--;
if (num_tiles_not_scheduled == 0) {
break;
}
}
/* Schedule the tiles that aren't requested yet. */
if (num_tiles_not_scheduled) {
int tile_index = 0;
for (int tile_y = start_tile_y; tile_y <= end_tile_y; tile_y++) {
for (int tile_x = start_tile_x; tile_x <= end_tile_x; tile_x++) {
if (!BLI_BITMAP_TEST_BOOL(requested_tiles, tile_index)) {
ImagePartialRefresh *area = static_cast<ImagePartialRefresh *>(
MEM_mallocN(sizeof(ImagePartialRefresh), __func__));
area->tile_x = tile_x;
area->tile_y = tile_y;
BLI_addtail(&ima->gpu_refresh_areas, area);
}
tile_index++;
}
}
ima->gpuflag |= IMA_GPU_PARTIAL_REFRESH;
}
if (allocate_on_heap) {
MEM_freeN(requested_tiles);
}
}
@@ -989,4 +1016,3 @@ void BKE_image_paint_set_mipmap(Main *bmain, bool mipmap)
}
/** \} */
}

View File

@@ -1,598 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2021, Blender Foundation.
*/
/**
* \file image_gpu_partial_update.cc
* \ingroup bke
*
* To reduce the overhead of image processing this file contains a mechanism to detect areas of the
* image that are changed. These areas are organized in chunks. Changes that happen over time are
* organized in changesets.
*
* A common usecase is to update GPUTexture for drawing where only that part is uploaded that only
* changed.
*
* Usage:
*
* ```
* Image *image = ...;
* ImBuf *image_buffer = ...;
*
* // Partial_update_user should be kept for the whole session where the changes needs to be
* // tracked. Keep this instance alive as long as you need to track image changes.
*
* PartialUpdateUser *partial_update_user = BKE_image_partial_update_create(image);
*
* ...
*
* switch (BKE_image_partial_update_collect_changes(image, image_buffer))
* {
* case ePartialUpdateCollectResult::FullUpdateNeeded:
* // Unable to do partial updates. Perform a full update.
* break;
* case ePartialUpdateCollectResult::PartialChangesDetected:
* PartialUpdateRegion change;
* while (BKE_image_partial_update_get_next_change(partial_update_user, &change) ==
* ePartialUpdateIterResult::ChangeAvailable){
* // Do something with the change.
* }
* case ePartialUpdateCollectResult::NoChangesDetected:
* break;
* }
*
* ...
*
* // Free partial_update_user.
* BKE_image_partial_update_free(partial_update_user);
*
* ```
*/
#include <optional>
#include "BKE_image.h"
#include "BKE_image_partial_update.hh"
#include "DNA_image_types.h"
#include "IMB_imbuf.h"
#include "IMB_imbuf_types.h"
#include "BLI_vector.hh"
namespace blender::bke::image::partial_update {
/** \brief Size of chunks to track changes. */
constexpr int CHUNK_SIZE = 256;
/**
* \brief Max number of changesets to keep in history.
*
* A higher number would need more memory and processing
* to calculate a changeset, but would lead to do partial updates for requests that don't happen
* every frame.
*
* A to small number would lead to more full updates when changes couldn't be reconstructed from
* the available history.
*/
constexpr int MAX_HISTORY_LEN = 4;
/**
* \brief get the chunk number for the give pixel coordinate.
*
* As chunks are squares the this member can be used for both x and y axis.
*/
static int chunk_number_for_pixel(int pixel_offset)
{
int chunk_offset = pixel_offset / CHUNK_SIZE;
if (pixel_offset < 0) {
chunk_offset -= 1;
}
return chunk_offset;
}
struct PartialUpdateUserImpl;
struct PartialUpdateRegisterImpl;
/**
* Wrap PartialUpdateUserImpl to its C-struct (PartialUpdateUser).
*/
static struct PartialUpdateUser *wrap(PartialUpdateUserImpl *user)
{
return static_cast<struct PartialUpdateUser *>(static_cast<void *>(user));
}
/**
* Unwrap the PartialUpdateUser C-struct to its CPP counterpart (PartialUpdateUserImpl).
*/
static PartialUpdateUserImpl *unwrap(struct PartialUpdateUser *user)
{
return static_cast<PartialUpdateUserImpl *>(static_cast<void *>(user));
}
/**
* Wrap PartialUpdateRegisterImpl to its C-struct (PartialUpdateRegister).
*/
static struct PartialUpdateRegister *wrap(PartialUpdateRegisterImpl *partial_update_register)
{
return static_cast<struct PartialUpdateRegister *>(static_cast<void *>(partial_update_register));
}
/**
* Unwrap the PartialUpdateRegister C-struct to its CPP counterpart (PartialUpdateRegisterImpl).
*/
static PartialUpdateRegisterImpl *unwrap(struct PartialUpdateRegister *partial_update_register)
{
return static_cast<PartialUpdateRegisterImpl *>(static_cast<void *>(partial_update_register));
}
using TileNumber = int32_t;
using ChangesetID = int64_t;
constexpr ChangesetID UnknownChangesetID = -1;
struct PartialUpdateUserImpl {
/** \brief last changeset id that was seen by this user. */
ChangesetID last_changeset_id = UnknownChangesetID;
/** \brief regions that have been updated. */
Vector<PartialUpdateRegion> updated_regions;
#ifdef NDEBUG
/** \brief reference to image to validate correct API usage. */
const void *debug_image_;
#endif
/**
* \brief Clear the list of updated regions.
*
* Updated regions should be cleared at the start of #BKE_image_partial_update_collect_changes so
* the
*/
void clear_updated_regions()
{
updated_regions.clear();
}
};
/**
* \brief Dirty chunks of an ImageTile.
*
* Internally dirty tiles are grouped together in change sets to make sure that the correct
* answer can be built for different users reducing the amount of merges.
*/
struct TileChangeset {
private:
/** \brief Dirty flag for each chunk. */
std::vector<bool> chunk_dirty_flags_;
/** \brief are there dirty/ */
bool has_dirty_chunks_ = false;
public:
/** \brief Width of the tile in pixels. */
int tile_width;
/** \brief Height of the tile in pixels. */
int tile_height;
/** \brief Number of chunks along the x-axis. */
int chunk_x_len;
/** \brief Number of chunks along the y-axis. */
int chunk_y_len;
TileNumber tile_number;
void clear()
{
init_chunks(chunk_x_len, chunk_y_len);
}
/**
* \brief Update the resolution of the tile.
*
* \returns true: resolution has been updated.
* false: resolution was unchanged.
*/
bool update_resolution(const ImBuf *image_buffer)
{
if (tile_width == image_buffer->x && tile_height == image_buffer->y) {
return false;
}
tile_width = image_buffer->x;
tile_height = image_buffer->y;
int chunk_x_len = tile_width / CHUNK_SIZE;
int chunk_y_len = tile_height / CHUNK_SIZE;
init_chunks(chunk_x_len, chunk_y_len);
return true;
}
void mark_region(const rcti *updated_region)
{
int start_x_chunk = chunk_number_for_pixel(updated_region->xmin);
int end_x_chunk = chunk_number_for_pixel(updated_region->xmax - 1);
int start_y_chunk = chunk_number_for_pixel(updated_region->ymin);
int end_y_chunk = chunk_number_for_pixel(updated_region->ymax - 1);
/* Clamp tiles to tiles in image. */
start_x_chunk = max_ii(0, start_x_chunk);
start_y_chunk = max_ii(0, start_y_chunk);
end_x_chunk = min_ii(chunk_x_len - 1, end_x_chunk);
end_y_chunk = min_ii(chunk_y_len - 1, end_y_chunk);
/* Early exit when no tiles need to be updated. */
if (start_x_chunk >= chunk_x_len) {
return;
}
if (start_y_chunk >= chunk_y_len) {
return;
}
if (end_x_chunk < 0) {
return;
}
if (end_y_chunk < 0) {
return;
}
mark_chunks_dirty(start_x_chunk, start_y_chunk, end_x_chunk, end_y_chunk);
}
void mark_chunks_dirty(int start_x_chunk, int start_y_chunk, int end_x_chunk, int end_y_chunk)
{
for (int chunk_y = start_y_chunk; chunk_y <= end_y_chunk; chunk_y++) {
for (int chunk_x = start_x_chunk; chunk_x <= end_x_chunk; chunk_x++) {
int chunk_index = chunk_y * chunk_x_len + chunk_x;
chunk_dirty_flags_[chunk_index] = true;
}
}
has_dirty_chunks_ = true;
}
bool has_dirty_chunks() const
{
return has_dirty_chunks_;
}
void init_chunks(int chunk_x_len_, int chunk_y_len_)
{
chunk_x_len = chunk_x_len_;
chunk_y_len = chunk_y_len_;
const int chunk_len = chunk_x_len * chunk_y_len;
const int previous_chunk_len = chunk_dirty_flags_.size();
chunk_dirty_flags_.resize(chunk_len);
/* Fast exit. When the changeset was already empty no need to re-init the chunk_validity. */
if (!has_dirty_chunks()) {
return;
}
for (int index = 0; index < min_ii(chunk_len, previous_chunk_len); index++) {
chunk_dirty_flags_[index] = false;
}
has_dirty_chunks_ = false;
}
/** \brief Merge the given changeset into the receiver. */
void merge(const TileChangeset &other)
{
BLI_assert(chunk_x_len == other.chunk_x_len);
BLI_assert(chunk_y_len == other.chunk_y_len);
const int chunk_len = chunk_x_len * chunk_y_len;
for (int chunk_index = 0; chunk_index < chunk_len; chunk_index++) {
chunk_dirty_flags_[chunk_index] = chunk_dirty_flags_[chunk_index] |
other.chunk_dirty_flags_[chunk_index];
}
has_dirty_chunks_ |= other.has_dirty_chunks_;
}
/** \brief has a chunk changed inside this changeset. */
bool is_chunk_dirty(int chunk_x, int chunk_y) const
{
const int chunk_index = chunk_y * chunk_x_len + chunk_x;
return chunk_dirty_flags_[chunk_index];
}
};
/** \brief Changeset keeping track of changes for an image */
struct Changeset {
private:
Vector<TileChangeset> tiles;
public:
/** \brief Keep track if any of the tiles have dirty chunks. */
bool has_dirty_chunks;
/**
* \brief Retrieve the TileChangeset for the given ImageTile.
*
* When the TileChangeset isn't found, it will be added.
*/
TileChangeset &operator[](const ImageTile *image_tile)
{
for (TileChangeset &tile_changeset : tiles) {
if (tile_changeset.tile_number == image_tile->tile_number) {
return tile_changeset;
}
}
TileChangeset tile_changeset;
tile_changeset.tile_number = image_tile->tile_number;
tiles.append_as(tile_changeset);
return tiles.last();
}
/** \brief Does this changeset contain data for the given tile. */
bool has_tile(const ImageTile *image_tile)
{
for (TileChangeset &tile_changeset : tiles) {
if (tile_changeset.tile_number == image_tile->tile_number) {
return true;
}
}
return false;
}
/** \brief Clear this changeset. */
void clear()
{
tiles.clear();
has_dirty_chunks = false;
}
};
/**
* \brief Partial update changes stored inside the image runtime.
*
* The PartialUpdateRegisterImpl will keep track of changes over time. Changes are groups inside
* TileChangesets.
*/
struct PartialUpdateRegisterImpl {
/** \brief changeset id of the first changeset kept in #history. */
ChangesetID first_changeset_id;
/** \brief changeset id of the top changeset kept in #history. */
ChangesetID last_changeset_id;
/** \brief history of changesets. */
Vector<Changeset> history;
/** \brief The current changeset. New changes will be added to this changeset. */
Changeset current_changeset;
void update_resolution(const ImageTile *image_tile, const ImBuf *image_buffer)
{
TileChangeset &tile_changeset = current_changeset[image_tile];
const bool has_dirty_chunks = tile_changeset.has_dirty_chunks();
const bool resolution_changed = tile_changeset.update_resolution(image_buffer);
if (has_dirty_chunks && resolution_changed && !history.is_empty()) {
mark_full_update();
}
}
void mark_full_update()
{
history.clear();
last_changeset_id++;
current_changeset.clear();
first_changeset_id = last_changeset_id;
}
void mark_region(const ImageTile *image_tile, const rcti *updated_region)
{
TileChangeset &tile_changeset = current_changeset[image_tile];
tile_changeset.mark_region(updated_region);
current_changeset.has_dirty_chunks |= tile_changeset.has_dirty_chunks();
}
void ensure_empty_changeset()
{
if (!current_changeset.has_dirty_chunks) {
/* No need to create a new changeset when previous changeset does not contain any dirty
* tiles. */
return;
}
commit_current_changeset();
limit_history();
}
/** \brief Move the current changeset to the history and resets the current changeset. */
void commit_current_changeset()
{
history.append_as(std::move(current_changeset));
current_changeset.clear();
last_changeset_id++;
}
/** \brief Limit the number of items in the changeset. */
void limit_history()
{
const int num_items_to_remove = max_ii(history.size() - MAX_HISTORY_LEN, 0);
if (num_items_to_remove == 0) {
return;
}
history.remove(0, num_items_to_remove);
first_changeset_id += num_items_to_remove;
}
/**
* /brief Check if data is available to construct the update tiles for the given
* changeset_id.
*
* The update tiles can be created when changeset id is between
*/
bool can_construct(ChangesetID changeset_id)
{
return changeset_id >= first_changeset_id;
}
/**
* \brief collect all historic changes since a given changeset.
*/
std::optional<TileChangeset> changed_tile_chunks_since(const ImageTile *image_tile,
const ChangesetID from_changeset)
{
std::optional<TileChangeset> changed_chunks = std::nullopt;
for (int index = from_changeset - first_changeset_id; index < history.size(); index++) {
if (!history[index].has_tile(image_tile)) {
continue;
}
TileChangeset &tile_changeset = history[index][image_tile];
if (!changed_chunks.has_value()) {
changed_chunks = std::make_optional<TileChangeset>();
changed_chunks->init_chunks(tile_changeset.chunk_x_len, tile_changeset.chunk_y_len);
changed_chunks->tile_number = image_tile->tile_number;
}
changed_chunks->merge(tile_changeset);
}
return changed_chunks;
}
};
static PartialUpdateRegister *image_partial_update_register_ensure(Image *image)
{
if (image->runtime.partial_update_register == nullptr) {
PartialUpdateRegisterImpl *partial_update_register = MEM_new<PartialUpdateRegisterImpl>(
__func__);
image->runtime.partial_update_register = wrap(partial_update_register);
}
return image->runtime.partial_update_register;
}
ePartialUpdateCollectResult BKE_image_partial_update_collect_changes(Image *image,
PartialUpdateUser *user)
{
PartialUpdateUserImpl *user_impl = unwrap(user);
#ifdef NDEBUG
BLI_assert(image == user_impl->debug_image_);
#endif
user_impl->clear_updated_regions();
PartialUpdateRegisterImpl *partial_updater = unwrap(image_partial_update_register_ensure(image));
partial_updater->ensure_empty_changeset();
if (!partial_updater->can_construct(user_impl->last_changeset_id)) {
user_impl->last_changeset_id = partial_updater->last_changeset_id;
return ePartialUpdateCollectResult::FullUpdateNeeded;
}
/* Check if there are changes since last invocation for the user. */
if (user_impl->last_changeset_id == partial_updater->last_changeset_id) {
return ePartialUpdateCollectResult::NoChangesDetected;
}
/* Collect changed tiles. */
LISTBASE_FOREACH (ImageTile *, tile, &image->tiles) {
std::optional<TileChangeset> changed_chunks = partial_updater->changed_tile_chunks_since(
tile, user_impl->last_changeset_id);
/* Check if chunks of this tile are dirty. */
if (!changed_chunks.has_value()) {
continue;
}
if (!changed_chunks->has_dirty_chunks()) {
continue;
}
/* Convert tiles in the changeset to rectangles that are dirty. */
for (int chunk_y = 0; chunk_y < changed_chunks->chunk_y_len; chunk_y++) {
for (int chunk_x = 0; chunk_x < changed_chunks->chunk_x_len; chunk_x++) {
if (!changed_chunks->is_chunk_dirty(chunk_x, chunk_y)) {
continue;
}
PartialUpdateRegion region;
region.tile_number = tile->tile_number;
BLI_rcti_init(&region.region,
chunk_x * CHUNK_SIZE,
(chunk_x + 1) * CHUNK_SIZE,
chunk_y * CHUNK_SIZE,
(chunk_y + 1) * CHUNK_SIZE);
user_impl->updated_regions.append_as(region);
}
}
}
user_impl->last_changeset_id = partial_updater->last_changeset_id;
return ePartialUpdateCollectResult::PartialChangesDetected;
}
ePartialUpdateIterResult BKE_image_partial_update_get_next_change(PartialUpdateUser *user,
PartialUpdateRegion *r_region)
{
PartialUpdateUserImpl *user_impl = unwrap(user);
if (user_impl->updated_regions.is_empty()) {
return ePartialUpdateIterResult::Finished;
}
PartialUpdateRegion region = user_impl->updated_regions.pop_last();
*r_region = region;
return ePartialUpdateIterResult::ChangeAvailable;
}
} // namespace blender::bke::image::partial_update
extern "C" {
using namespace blender::bke::image::partial_update;
// TODO(jbakker): cleanup parameter.
struct PartialUpdateUser *BKE_image_partial_update_create(const struct Image *image)
{
PartialUpdateUserImpl *user_impl = MEM_new<PartialUpdateUserImpl>(__func__);
#ifdef NDEBUG
user_impl->debug_image_ = image;
#else
UNUSED_VARS(image);
#endif
return wrap(user_impl);
}
void BKE_image_partial_update_free(PartialUpdateUser *user)
{
PartialUpdateUserImpl *user_impl = unwrap(user);
MEM_delete(user_impl);
}
/* --- Image side --- */
void BKE_image_partial_update_register_free(Image *image)
{
PartialUpdateRegisterImpl *partial_update_register = unwrap(
image->runtime.partial_update_register);
if (partial_update_register) {
MEM_delete(partial_update_register);
}
image->runtime.partial_update_register = nullptr;
}
void BKE_image_partial_update_mark_region(Image *image,
const ImageTile *image_tile,
const ImBuf *image_buffer,
const rcti *updated_region)
{
PartialUpdateRegisterImpl *partial_updater = unwrap(image_partial_update_register_ensure(image));
partial_updater->update_resolution(image_tile, image_buffer);
partial_updater->mark_region(image_tile, updated_region);
}
void BKE_image_partial_update_mark_full_update(Image *image)
{
PartialUpdateRegisterImpl *partial_updater = unwrap(image_partial_update_register_ensure(image));
partial_updater->mark_full_update();
}
}

View File

@@ -1,393 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* The Original Code is Copyright (C) 2020 by Blender Foundation.
*/
#include "testing/testing.h"
#include "CLG_log.h"
#include "BKE_appdir.h"
#include "BKE_idtype.h"
#include "BKE_image.h"
#include "BKE_image_partial_update.hh"
#include "BKE_main.h"
#include "IMB_imbuf.h"
#include "IMB_moviecache.h"
#include "DNA_image_types.h"
#include "MEM_guardedalloc.h"
namespace blender::bke::image::partial_update {
constexpr float black_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
class ImagePartialUpdateTest : public testing::Test {
protected:
Main *bmain;
Image *image;
ImageTile *image_tile;
ImageUser image_user = {nullptr};
ImBuf *image_buffer;
PartialUpdateUser *partial_update_user;
private:
Image *create_test_image(int width, int height)
{
return BKE_image_add_generated(bmain,
width,
height,
"Test Image",
32,
true,
IMA_GENTYPE_BLANK,
black_color,
false,
false,
false);
}
protected:
void SetUp() override
{
CLG_init();
BKE_idtype_init();
BKE_appdir_init();
IMB_init();
bmain = BKE_main_new();
/* Creating an image generates a mem-leak during tests. */
image = create_test_image(1024, 1024);
image_tile = BKE_image_get_tile(image, 0);
image_buffer = BKE_image_acquire_ibuf(image, nullptr, nullptr);
partial_update_user = BKE_image_partial_update_create(image);
}
void TearDown() override
{
BKE_image_release_ibuf(image, image_buffer, nullptr);
BKE_image_partial_update_free(partial_update_user);
BKE_main_free(bmain);
IMB_moviecache_destruct();
IMB_exit();
BKE_appdir_exit();
CLG_exit();
}
};
TEST_F(ImagePartialUpdateTest, mark_full_update)
{
ePartialUpdateCollectResult result;
/* First tile should always return a full update. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::FullUpdateNeeded);
/* Second invoke should now detect no changes. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
/* Mark full update */
BKE_image_partial_update_mark_full_update(image);
/* Validate need full update followed by no changes. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::FullUpdateNeeded);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
}
TEST_F(ImagePartialUpdateTest, mark_single_tile)
{
ePartialUpdateCollectResult result;
/* First tile should always return a full update. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::FullUpdateNeeded);
/* Second invoke should now detect no changes. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
/* Mark region. */
rcti region;
BLI_rcti_init(&region, 10, 20, 40, 50);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
/* Partial Update should be available. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::PartialChangesDetected);
/* Check tiles. */
PartialUpdateRegion changed_region;
ePartialUpdateIterResult iter_result;
iter_result = BKE_image_partial_update_get_next_change(partial_update_user, &changed_region);
EXPECT_EQ(iter_result, ePartialUpdateIterResult::ChangeAvailable);
EXPECT_EQ(BLI_rcti_inside_rcti(&changed_region.region, &region), true);
iter_result = BKE_image_partial_update_get_next_change(partial_update_user, &changed_region);
EXPECT_EQ(iter_result, ePartialUpdateIterResult::Finished);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
}
TEST_F(ImagePartialUpdateTest, mark_unconnected_tiles)
{
ePartialUpdateCollectResult result;
/* First tile should always return a full update. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::FullUpdateNeeded);
/* Second invoke should now detect no changes. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
/* Mark region. */
rcti region_a;
BLI_rcti_init(&region_a, 10, 20, 40, 50);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region_a);
rcti region_b;
BLI_rcti_init(&region_b, 710, 720, 740, 750);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region_b);
/* Partial Update should be available. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::PartialChangesDetected);
/* Check tiles. */
PartialUpdateRegion changed_region;
ePartialUpdateIterResult iter_result;
iter_result = BKE_image_partial_update_get_next_change(partial_update_user, &changed_region);
EXPECT_EQ(iter_result, ePartialUpdateIterResult::ChangeAvailable);
EXPECT_EQ(BLI_rcti_inside_rcti(&changed_region.region, &region_b), true);
iter_result = BKE_image_partial_update_get_next_change(partial_update_user, &changed_region);
EXPECT_EQ(iter_result, ePartialUpdateIterResult::ChangeAvailable);
EXPECT_EQ(BLI_rcti_inside_rcti(&changed_region.region, &region_a), true);
iter_result = BKE_image_partial_update_get_next_change(partial_update_user, &changed_region);
EXPECT_EQ(iter_result, ePartialUpdateIterResult::Finished);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
}
TEST_F(ImagePartialUpdateTest, donot_mark_outside_image)
{
ePartialUpdateCollectResult result;
/* First tile should always return a full update. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::FullUpdateNeeded);
/* Second invoke should now detect no changes. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
/* Mark region. */
rcti region;
/* Axis. */
BLI_rcti_init(&region, -100, 0, 50, 100);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
BLI_rcti_init(&region, 1024, 1100, 50, 100);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
BLI_rcti_init(&region, 50, 100, -100, 0);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
BLI_rcti_init(&region, 50, 100, 1024, 1100);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
/* Diagonals. */
BLI_rcti_init(&region, -100, 0, -100, 0);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
BLI_rcti_init(&region, -100, 0, 1024, 1100);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
BLI_rcti_init(&region, 1024, 1100, -100, 0);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
BLI_rcti_init(&region, 1024, 1100, 1024, 1100);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
}
TEST_F(ImagePartialUpdateTest, mark_inside_image)
{
ePartialUpdateCollectResult result;
/* First tile should always return a full update. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::FullUpdateNeeded);
/* Second invoke should now detect no changes. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
/* Mark region. */
rcti region;
BLI_rcti_init(&region, 0, 1, 0, 1);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::PartialChangesDetected);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
BLI_rcti_init(&region, 1023, 1024, 0, 1);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::PartialChangesDetected);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
BLI_rcti_init(&region, 1023, 1024, 1023, 1024);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::PartialChangesDetected);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
BLI_rcti_init(&region, 1023, 1024, 0, 1);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::PartialChangesDetected);
}
TEST_F(ImagePartialUpdateTest, sequential_mark_region)
{
ePartialUpdateCollectResult result;
/* First tile should always return a full update. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::FullUpdateNeeded);
/* Second invoke should now detect no changes. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
{
/* Mark region. */
rcti region;
BLI_rcti_init(&region, 10, 20, 40, 50);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
/* Partial Update should be available. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::PartialChangesDetected);
/* Check tiles. */
PartialUpdateRegion changed_region;
ePartialUpdateIterResult iter_result;
iter_result = BKE_image_partial_update_get_next_change(partial_update_user, &changed_region);
EXPECT_EQ(iter_result, ePartialUpdateIterResult::ChangeAvailable);
EXPECT_EQ(BLI_rcti_inside_rcti(&changed_region.region, &region), true);
iter_result = BKE_image_partial_update_get_next_change(partial_update_user, &changed_region);
EXPECT_EQ(iter_result, ePartialUpdateIterResult::Finished);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
}
{
/* Mark different region. */
rcti region;
BLI_rcti_init(&region, 710, 720, 740, 750);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
/* Partial Update should be available. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::PartialChangesDetected);
/* Check tiles. */
PartialUpdateRegion changed_region;
ePartialUpdateIterResult iter_result;
iter_result = BKE_image_partial_update_get_next_change(partial_update_user, &changed_region);
EXPECT_EQ(iter_result, ePartialUpdateIterResult::ChangeAvailable);
EXPECT_EQ(BLI_rcti_inside_rcti(&changed_region.region, &region), true);
iter_result = BKE_image_partial_update_get_next_change(partial_update_user, &changed_region);
EXPECT_EQ(iter_result, ePartialUpdateIterResult::Finished);
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
}
}
TEST_F(ImagePartialUpdateTest, mark_multiple_chunks)
{
ePartialUpdateCollectResult result;
/* First tile should always return a full update. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::FullUpdateNeeded);
/* Second invoke should now detect no changes. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::NoChangesDetected);
/* Mark region. */
rcti region;
BLI_rcti_init(&region, 300, 700, 300, 700);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
/* Partial Update should be available. */
result = BKE_image_partial_update_collect_changes(image, partial_update_user);
EXPECT_EQ(result, ePartialUpdateCollectResult::PartialChangesDetected);
/* Check tiles. */
PartialUpdateRegion changed_region;
int num_chunks_found = 0;
while (BKE_image_partial_update_get_next_change(partial_update_user, &changed_region) ==
ePartialUpdateIterResult::ChangeAvailable) {
BLI_rcti_isect(&changed_region.region, &region, nullptr);
num_chunks_found++;
}
EXPECT_EQ(num_chunks_found, 4);
}
TEST_F(ImagePartialUpdateTest, iterator)
{
PartialUpdateChecker<NoTileData> checker(image, &image_user, partial_update_user);
/* First tile should always return a full update. */
PartialUpdateChecker<NoTileData>::CollectResult changes = checker.collect_changes();
EXPECT_EQ(changes.get_result_code(), ePartialUpdateCollectResult::FullUpdateNeeded);
/* Second invoke should now detect no changes. */
changes = checker.collect_changes();
EXPECT_EQ(changes.get_result_code(), ePartialUpdateCollectResult::NoChangesDetected);
/* Mark region. */
rcti region;
BLI_rcti_init(&region, 300, 700, 300, 700);
BKE_image_partial_update_mark_region(image, image_tile, image_buffer, &region);
/* Partial Update should be available. */
changes = checker.collect_changes();
EXPECT_EQ(changes.get_result_code(), ePartialUpdateCollectResult::PartialChangesDetected);
/* Check tiles. */
int num_tiles_found = 0;
while (changes.get_next_change() == ePartialUpdateIterResult::ChangeAvailable) {
BLI_rcti_isect(&changes.changed_region.region, &region, nullptr);
num_tiles_found++;
}
EXPECT_EQ(num_tiles_found, 4);
}
} // namespace blender::bke::image::partial_update

View File

@@ -1553,13 +1553,11 @@ static void lib_override_resync_tagging_finalize_recurse(Main *bmain,
CLOG_ERROR(
&LOG,
"While processing indirect level %d, ID %s from lib %s of indirect level %d detected "
"as needing resync, skipping.",
"as needing resync.",
library_indirect_level,
id->name,
id->lib->filepath,
id->lib->temp_index);
id->tag &= ~LIB_TAG_LIB_OVERRIDE_NEED_RESYNC;
return;
}
MainIDRelationsEntry *entry = BLI_ghash_lookup(bmain->relations->relations_from_pointers, id);

View File

@@ -46,7 +46,7 @@ void BKE_mesh_foreach_mapped_vert(
BMIter iter;
BMVert *eve;
int i;
if (mesh->runtime.edit_data != NULL && mesh->runtime.edit_data->vertexCos != NULL) {
if (mesh->runtime.edit_data->vertexCos != NULL) {
const float(*vertexCos)[3] = mesh->runtime.edit_data->vertexCos;
const float(*vertexNos)[3];
if (flag & MESH_FOREACH_USE_NORMAL) {
@@ -106,7 +106,7 @@ void BKE_mesh_foreach_mapped_edge(
BMIter iter;
BMEdge *eed;
int i;
if (mesh->runtime.edit_data != NULL && mesh->runtime.edit_data->vertexCos != NULL) {
if (mesh->runtime.edit_data->vertexCos != NULL) {
const float(*vertexCos)[3] = mesh->runtime.edit_data->vertexCos;
BM_mesh_elem_index_ensure(bm, BM_VERT);
@@ -164,8 +164,7 @@ void BKE_mesh_foreach_mapped_loop(Mesh *mesh,
BMIter iter;
BMFace *efa;
const float(*vertexCos)[3] = mesh->runtime.edit_data ? mesh->runtime.edit_data->vertexCos :
NULL;
const float(*vertexCos)[3] = mesh->runtime.edit_data->vertexCos;
/* XXX: investigate using EditMesh data. */
const float(*lnors)[3] = (flag & MESH_FOREACH_USE_NORMAL) ?
@@ -232,7 +231,7 @@ void BKE_mesh_foreach_mapped_face_center(
void *userData,
MeshForeachFlag flag)
{
if (mesh->edit_mesh != NULL && mesh->runtime.edit_data != NULL) {
if (mesh->edit_mesh != NULL) {
BMEditMesh *em = mesh->edit_mesh;
BMesh *bm = em->bm;
const float(*polyCos)[3];

View File

@@ -420,7 +420,7 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
/* calculate custom normals into loop_normals, then mirror first half into second half */
BKE_mesh_normals_loop_split(result->mvert,
BKE_mesh_vertex_normals_ensure(result),
BKE_mesh_vertex_normals_ensure(mesh),
result->totvert,
result->medge,
result->totedge,
@@ -428,7 +428,7 @@ Mesh *BKE_mesh_mirror_apply_mirror_on_axis_for_modifier(MirrorModifierData *mmd,
loop_normals,
totloop,
result->mpoly,
BKE_mesh_poly_normals_ensure(result),
BKE_mesh_poly_normals_ensure(mesh),
totpoly,
true,
mesh->smoothresh,

View File

@@ -107,20 +107,6 @@ struct float4x4 {
return &values[0][0];
}
float *operator[](const int64_t index)
{
BLI_assert(index >= 0);
BLI_assert(index < 4);
return &values[index][0];
}
const float *operator[](const int64_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < 4);
return &values[index][0];
}
using c_style_float4x4 = float[4][4];
c_style_float4x4 &ptr()
{

View File

@@ -404,7 +404,6 @@ void invert_m4_m4_safe_ortho(float Ainv[4][4], const float A[4][4]);
void scale_m3_fl(float R[3][3], float scale);
void scale_m4_fl(float R[4][4], float scale);
void scale_m4_v2(float R[4][4], const float scale[2]);
/**
* This computes the overall volume scale factor of a transformation matrix.

View File

@@ -380,11 +380,6 @@ void BLI_path_normalize_unc_16(wchar_t *path_16);
void BLI_path_normalize_unc(char *path_16, int maxlen);
#endif
/**
* Returns true if the given paths are equal.
*/
bool BLI_paths_equal(const char *p1, const char *p2);
/**
* Appends a suffix to the string, fitting it before the extension
*

View File

@@ -337,18 +337,6 @@ constexpr int64_t StringRefBase::find(StringRef str, int64_t pos) const
return index_or_npos_to_int64(std::string_view(*this).find(str, static_cast<size_t>(pos)));
}
constexpr int64_t StringRefBase::rfind(char c, int64_t pos) const
{
BLI_assert(pos >= 0);
return index_or_npos_to_int64(std::string_view(*this).rfind(c, static_cast<size_t>(pos)));
}
constexpr int64_t StringRefBase::rfind(StringRef str, int64_t pos) const
{
BLI_assert(pos >= 0);
return index_or_npos_to_int64(std::string_view(*this).rfind(str, static_cast<size_t>(pos)));
}
constexpr int64_t StringRefBase::find_first_of(StringRef chars, int64_t pos) const
{
BLI_assert(pos >= 0);

View File

@@ -2296,17 +2296,6 @@ void scale_m4_fl(float R[4][4], float scale)
R[3][0] = R[3][1] = R[3][2] = 0.0;
}
void scale_m4_v2(float R[4][4], const float scale[2])
{
R[0][0] = scale[0];
R[1][1] = scale[1];
R[2][2] = R[3][3] = 1.0;
R[0][1] = R[0][2] = R[0][3] = 0.0;
R[1][0] = R[1][2] = R[1][3] = 0.0;
R[2][0] = R[2][1] = R[2][3] = 0.0;
R[3][0] = R[3][1] = R[3][2] = 0.0;
}
void translate_m4(float mat[4][4], float Tx, float Ty, float Tz)
{
mat[3][0] += (Tx * mat[0][0] + Ty * mat[1][0] + Tz * mat[2][0]);

View File

@@ -1829,21 +1829,3 @@ void BLI_path_slash_native(char *path)
BLI_str_replace_char(path + BLI_path_unc_prefix_len(path), ALTSEP, SEP);
#endif
}
bool BLI_paths_equal(const char *p1, const char *p2)
{
/* Normalize the paths so we can compare them. */
char norm_p1[FILE_MAX];
char norm_p2[FILE_MAX];
BLI_strncpy(norm_p1, p1, sizeof(norm_p1));
BLI_strncpy(norm_p2, p2, sizeof(norm_p2));
BLI_path_slash_native(norm_p1);
BLI_path_slash_native(norm_p2);
BLI_path_normalize(NULL, norm_p1);
BLI_path_normalize(NULL, norm_p2);
return BLI_path_cmp(norm_p1, norm_p2) == 0;
}

View File

@@ -154,7 +154,7 @@ set(SRC
engines/workbench/workbench_materials.c
engines/workbench/workbench_opaque.c
engines/workbench/workbench_render.c
engines/workbench/workbench_shader.cc
engines/workbench/workbench_shader.c
engines/workbench/workbench_shadow.c
engines/workbench/workbench_transparent.c
engines/workbench/workbench_volume.c
@@ -227,17 +227,11 @@ set(SRC
engines/eevee/eevee_lut.h
engines/eevee/eevee_private.h
engines/external/external_engine.h
engines/image/image_batches.hh
engines/image/image_drawing_mode.hh
engines/image/image_drawing_mode_image_space.hh
engines/image/image_engine.h
engines/image/image_instance_data.hh
engines/image/image_partial_updater.hh
engines/image/image_private.hh
engines/image/image_shader_params.hh
engines/image/image_space_image.hh
engines/image/image_space_node.hh
engines/image/image_space.hh
engines/image/image_wrappers.hh
engines/workbench/workbench_engine.h
engines/workbench/workbench_private.h
engines/workbench/workbench_shader_shared.h
@@ -348,6 +342,7 @@ set(GLSL_SRC
engines/workbench/shaders/workbench_common_lib.glsl
engines/workbench/shaders/workbench_composite_frag.glsl
engines/workbench/shaders/workbench_curvature_lib.glsl
engines/workbench/shaders/workbench_data_lib.glsl
engines/workbench/shaders/workbench_effect_cavity_frag.glsl
engines/workbench/shaders/workbench_effect_dof_frag.glsl
engines/workbench/shaders/workbench_effect_outline_frag.glsl
@@ -362,6 +357,7 @@ set(GLSL_SRC
engines/workbench/shaders/workbench_prepass_hair_vert.glsl
engines/workbench/shaders/workbench_prepass_pointcloud_vert.glsl
engines/workbench/shaders/workbench_prepass_vert.glsl
engines/workbench/shaders/workbench_shader_interface_lib.glsl
engines/workbench/shaders/workbench_shadow_caps_geom.glsl
engines/workbench/shaders/workbench_shadow_debug_frag.glsl
engines/workbench/shaders/workbench_shadow_geom.glsl
@@ -382,7 +378,6 @@ set(GLSL_SRC
intern/shaders/common_hair_refine_comp.glsl
intern/shaders/common_math_lib.glsl
intern/shaders/common_math_geom_lib.glsl
intern/shaders/common_view_clipping_lib.glsl
intern/shaders/common_view_lib.glsl
intern/shaders/common_fxaa_lib.glsl
intern/shaders/common_smaa_lib.glsl
@@ -537,7 +532,7 @@ set(GLSL_SOURCE_CONTENT "")
foreach(GLSL_FILE ${GLSL_SRC})
get_filename_component(GLSL_FILE_NAME ${GLSL_FILE} NAME)
string(REPLACE "." "_" GLSL_FILE_NAME_UNDERSCORES ${GLSL_FILE_NAME})
string(APPEND GLSL_SOURCE_CONTENT "SHADER_SOURCE\(datatoc_${GLSL_FILE_NAME_UNDERSCORES}, \"${GLSL_FILE_NAME}\", \"${GLSL_FILE}\"\)\n")
string(APPEND GLSL_SOURCE_CONTENT "SHADER_SOURCE\(datatoc_${GLSL_FILE_NAME_UNDERSCORES}, \"${GLSL_FILE_NAME}\"\)\n")
endforeach()
set(glsl_source_list_file "${CMAKE_CURRENT_BINARY_DIR}/glsl_draw_source_list.h")

View File

@@ -1,106 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2021, Blender Foundation.
*/
/** \file
* \ingroup draw_engine
*/
#pragma once
#include "image_texture_info.hh"
/** \brief Create GPUBatch for a IMAGE_ScreenSpaceTextureInfo. */
class BatchUpdater {
TextureInfo &info;
GPUVertFormat format = {0};
int pos_id;
int uv_id;
public:
BatchUpdater(TextureInfo &info) : info(info)
{
}
void update_batch()
{
ensure_clear_batch();
ensure_format();
init_batch();
}
void discard_batch()
{
GPU_BATCH_DISCARD_SAFE(info.batch);
}
private:
void ensure_clear_batch()
{
GPU_BATCH_CLEAR_SAFE(info.batch);
if (info.batch == nullptr) {
info.batch = GPU_batch_calloc();
}
}
void init_batch()
{
GPUVertBuf *vbo = create_vbo();
GPU_batch_init_ex(info.batch, GPU_PRIM_TRI_FAN, vbo, nullptr, GPU_BATCH_OWNS_VBO);
}
GPUVertBuf *create_vbo()
{
GPUVertBuf *vbo = GPU_vertbuf_create_with_format(&format);
GPU_vertbuf_data_alloc(vbo, 4);
float pos[4][2];
fill_tri_fan_from_rctf(pos, info.clipping_bounds);
float uv[4][2];
fill_tri_fan_from_rctf(uv, info.uv_bounds);
for (int i = 0; i < 4; i++) {
GPU_vertbuf_attr_set(vbo, pos_id, i, pos[i]);
GPU_vertbuf_attr_set(vbo, uv_id, i, uv[i]);
}
return vbo;
}
static void fill_tri_fan_from_rctf(float result[4][2], rctf &rect)
{
result[0][0] = rect.xmin;
result[0][1] = rect.ymin;
result[1][0] = rect.xmax;
result[1][1] = rect.ymin;
result[2][0] = rect.xmax;
result[2][1] = rect.ymax;
result[3][0] = rect.xmin;
result[3][1] = rect.ymax;
}
void ensure_format()
{
if (format.attr_len == 0) {
GPU_vertformat_attr_add(&format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
GPU_vertformat_attr_add(&format, "uv", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
pos_id = GPU_vertformat_attr_id_get(&format, "pos");
uv_id = GPU_vertformat_attr_id_get(&format, "uv");
}
}
};

View File

@@ -1,422 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2021, Blender Foundation.
*/
/** \file
* \ingroup draw_engine
*/
#pragma once
#include "BKE_image_partial_update.hh"
#include "IMB_imbuf_types.h"
#include "image_batches.hh"
#include "image_private.hh"
#include "image_wrappers.hh"
namespace blender::draw::image_engine {
constexpr float EPSILON_UV_BOUNDS = 0.00001f;
/**
* \brief Screen space method using a single texture spawning the whole screen.
*/
struct OneTextureMethod {
IMAGE_InstanceData *instance_data;
OneTextureMethod(IMAGE_InstanceData *instance_data) : instance_data(instance_data)
{
}
/** \brief Update the texture slot uv and screen space bounds. */
void update_screen_space_bounds(const ARegion *region)
{
/* Create a single texture that covers the visible screen space. */
BLI_rctf_init(
&instance_data->texture_infos[0].clipping_bounds, 0, region->winx, 0, region->winy);
instance_data->texture_infos[0].visible = true;
/* Mark the other textures as invalid. */
for (int i = 1; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
BLI_rctf_init_minmax(&instance_data->texture_infos[i].clipping_bounds);
instance_data->texture_infos[i].visible = false;
}
}
void update_uv_bounds(const ARegion *region)
{
TextureInfo &info = instance_data->texture_infos[0];
if (!BLI_rctf_compare(&info.uv_bounds, &region->v2d.cur, EPSILON_UV_BOUNDS)) {
info.uv_bounds = region->v2d.cur;
info.dirty = true;
}
/* Mark the other textures as invalid. */
for (int i = 1; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
BLI_rctf_init_minmax(&instance_data->texture_infos[i].clipping_bounds);
}
}
};
using namespace blender::bke::image::partial_update;
template<typename TextureMethod> class ScreenSpaceDrawingMode : public AbstractDrawingMode {
private:
DRWPass *create_image_pass() const
{
/* Write depth is needed for background overlay rendering. Near depth is used for
* transparency checker and Far depth is used for indicating the image size. */
DRWState state = static_cast<DRWState>(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH |
DRW_STATE_DEPTH_ALWAYS | DRW_STATE_BLEND_ALPHA_PREMUL);
return DRW_pass_create("Image", state);
}
void add_shgroups(const IMAGE_InstanceData *instance_data) const
{
const ShaderParameters &sh_params = instance_data->sh_params;
GPUShader *shader = IMAGE_shader_image_get(false);
DRWShadingGroup *shgrp = DRW_shgroup_create(shader, instance_data->passes.image_pass);
DRW_shgroup_uniform_vec2_copy(shgrp, "farNearDistances", sh_params.far_near);
DRW_shgroup_uniform_vec4_copy(shgrp, "color", ShaderParameters::color);
DRW_shgroup_uniform_vec4_copy(shgrp, "shuffle", sh_params.shuffle);
DRW_shgroup_uniform_int_copy(shgrp, "drawFlags", sh_params.flags);
DRW_shgroup_uniform_bool_copy(shgrp, "imgPremultiplied", sh_params.use_premul_alpha);
DRW_shgroup_uniform_vec2_copy(shgrp, "maxUv", instance_data->max_uv);
float image_mat[4][4];
unit_m4(image_mat);
for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
const TextureInfo &info = instance_data->texture_infos[i];
if (!info.visible) {
continue;
}
/*
Should be space relative translation.
image_mat[0][0] = info.clipping_bounds.xmax;
image_mat[1][1] = info.clipping_bounds.ymax;
*/
DRWShadingGroup *shgrp_sub = DRW_shgroup_create_sub(shgrp);
DRW_shgroup_uniform_texture_ex(shgrp_sub, "imageTexture", info.texture, GPU_SAMPLER_DEFAULT);
DRW_shgroup_call_obmat(shgrp_sub, info.batch, image_mat);
}
}
/**
* \brief Update GPUTextures for drawing the image.
*
* GPUTextures that are marked dirty are rebuild. GPUTextures that aren't marked dirty are
* updated with changed region of the image.
*/
void update_textures(IMAGE_InstanceData &instance_data,
Image *image,
ImageUser *image_user) const
{
PartialUpdateChecker<ImageTileData> checker(
image, image_user, instance_data.partial_update.user);
PartialUpdateChecker<ImageTileData>::CollectResult changes = checker.collect_changes();
switch (changes.get_result_code()) {
case ePartialUpdateCollectResult::FullUpdateNeeded:
instance_data.mark_all_texture_slots_dirty();
break;
case ePartialUpdateCollectResult::NoChangesDetected:
break;
case ePartialUpdateCollectResult::PartialChangesDetected:
/* Partial update when wrap repeat is enabled is not supported. */
if (instance_data.flags.do_tile_drawing) {
instance_data.mark_all_texture_slots_dirty();
}
else {
do_partial_update(changes, instance_data);
}
break;
}
do_full_update_for_dirty_textures(instance_data, image_user);
}
void do_partial_update(PartialUpdateChecker<ImageTileData>::CollectResult &iterator,
IMAGE_InstanceData &instance_data) const
{
while (iterator.get_next_change() == ePartialUpdateIterResult::ChangeAvailable) {
/* Quick exit when tile_buffer isn't availble. */
if (iterator.tile_data.tile_buffer == nullptr) {
continue;
}
ensure_float_buffer(*iterator.tile_data.tile_buffer);
const float tile_width = static_cast<float>(iterator.tile_data.tile_buffer->x);
const float tile_height = static_cast<float>(iterator.tile_data.tile_buffer->y);
for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
const TextureInfo &info = instance_data.texture_infos[i];
/* Dirty images will receive a full update. No need to do a partial one now. */
if (info.dirty) {
continue;
}
if (!info.visible) {
continue;
}
GPUTexture *texture = info.texture;
const float texture_width = GPU_texture_width(texture);
const float texture_height = GPU_texture_height(texture);
// TODO
// early bound check.
ImageTileWrapper tile_accessor(iterator.tile_data.tile);
float tile_offset_x = static_cast<float>(tile_accessor.get_tile_x_offset());
float tile_offset_y = static_cast<float>(tile_accessor.get_tile_y_offset());
rcti *changed_region_in_texel_space = &iterator.changed_region.region;
rctf changed_region_in_uv_space;
BLI_rctf_init(&changed_region_in_uv_space,
static_cast<float>(changed_region_in_texel_space->xmin) /
static_cast<float>(iterator.tile_data.tile_buffer->x) +
tile_offset_x,
static_cast<float>(changed_region_in_texel_space->xmax) /
static_cast<float>(iterator.tile_data.tile_buffer->x) +
tile_offset_x,
static_cast<float>(changed_region_in_texel_space->ymin) /
static_cast<float>(iterator.tile_data.tile_buffer->y) +
tile_offset_y,
static_cast<float>(changed_region_in_texel_space->ymax) /
static_cast<float>(iterator.tile_data.tile_buffer->y) +
tile_offset_y);
rctf changed_overlapping_region_in_uv_space;
const bool region_overlap = BLI_rctf_isect(
&info.uv_bounds, &changed_region_in_uv_space, &changed_overlapping_region_in_uv_space);
if (!region_overlap) {
continue;
}
// convert the overlapping region to texel space and to ss_pixel space...
// TODO: first convert to ss_pixel space as integer based. and from there go back to texel
// space. But perhaps this isn't needed and we could use an extraction offset somehow.
rcti gpu_texture_region_to_update;
BLI_rcti_init(&gpu_texture_region_to_update,
floor((changed_overlapping_region_in_uv_space.xmin - info.uv_bounds.xmin) *
texture_width / BLI_rctf_size_x(&info.uv_bounds)),
floor((changed_overlapping_region_in_uv_space.xmax - info.uv_bounds.xmin) *
texture_width / BLI_rctf_size_x(&info.uv_bounds)),
ceil((changed_overlapping_region_in_uv_space.ymin - info.uv_bounds.ymin) *
texture_height / BLI_rctf_size_y(&info.uv_bounds)),
ceil((changed_overlapping_region_in_uv_space.ymax - info.uv_bounds.ymin) *
texture_height / BLI_rctf_size_y(&info.uv_bounds)));
rcti tile_region_to_extract;
BLI_rcti_init(
&tile_region_to_extract,
floor((changed_overlapping_region_in_uv_space.xmin - tile_offset_x) * tile_width),
floor((changed_overlapping_region_in_uv_space.xmax - tile_offset_x) * tile_width),
ceil((changed_overlapping_region_in_uv_space.ymin - tile_offset_y) * tile_height),
ceil((changed_overlapping_region_in_uv_space.ymax - tile_offset_y) * tile_height));
// Create an image buffer with a size
// extract and scale into an imbuf
const int texture_region_width = BLI_rcti_size_x(&gpu_texture_region_to_update);
const int texture_region_height = BLI_rcti_size_y(&gpu_texture_region_to_update);
ImBuf extracted_buffer;
IMB_initImBuf(
&extracted_buffer, texture_region_width, texture_region_height, 32, IB_rectfloat);
int offset = 0;
ImBuf *tile_buffer = iterator.tile_data.tile_buffer;
for (int y = gpu_texture_region_to_update.ymin; y < gpu_texture_region_to_update.ymax;
y++) {
float yf = y / (float)texture_height;
float v = info.uv_bounds.ymax * yf + info.uv_bounds.ymin * (1.0 - yf) - tile_offset_y;
for (int x = gpu_texture_region_to_update.xmin; x < gpu_texture_region_to_update.xmax;
x++) {
float xf = x / (float)texture_width;
float u = info.uv_bounds.xmax * xf + info.uv_bounds.xmin * (1.0 - xf) - tile_offset_x;
nearest_interpolation_color(tile_buffer,
nullptr,
&extracted_buffer.rect_float[offset * 4],
u * tile_buffer->x,
v * tile_buffer->y);
offset++;
}
}
GPU_texture_update_sub(texture,
GPU_DATA_FLOAT,
extracted_buffer.rect_float,
gpu_texture_region_to_update.xmin,
gpu_texture_region_to_update.ymin,
0,
extracted_buffer.x,
extracted_buffer.y,
0);
imb_freerectImbuf_all(&extracted_buffer);
}
}
}
void do_full_update_for_dirty_textures(IMAGE_InstanceData &instance_data,
const ImageUser *image_user) const
{
for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
TextureInfo &info = instance_data.texture_infos[i];
if (!info.dirty) {
continue;
}
if (!info.visible) {
continue;
}
do_full_update_gpu_texture(info, instance_data, image_user);
}
}
void do_full_update_gpu_texture(TextureInfo &info,
IMAGE_InstanceData &instance_data,
const ImageUser *image_user) const
{
ImBuf texture_buffer;
const int texture_width = GPU_texture_width(info.texture);
const int texture_height = GPU_texture_height(info.texture);
IMB_initImBuf(&texture_buffer, texture_width, texture_height, 0, IB_rectfloat);
ImageUser tile_user = {0};
if (image_user) {
tile_user = *image_user;
}
void *lock;
Image *image = instance_data.image;
LISTBASE_FOREACH (ImageTile *, image_tile_ptr, &image->tiles) {
const ImageTileWrapper image_tile(image_tile_ptr);
tile_user.tile = image_tile.get_tile_number();
ImBuf *tile_buffer = BKE_image_acquire_ibuf(image, &tile_user, &lock);
if (tile_buffer == nullptr) {
/* Couldn't load the image buffer of the tile. */
continue;
}
do_full_update_texture_slot(instance_data, info, texture_buffer, *tile_buffer, image_tile);
BKE_image_release_ibuf(image, tile_buffer, lock);
}
GPU_texture_update(info.texture, GPU_DATA_FLOAT, texture_buffer.rect_float);
imb_freerectImbuf_all(&texture_buffer);
}
/**
* \brief Ensure that the float buffer of the given image buffer is available.
*/
void ensure_float_buffer(ImBuf &image_buffer) const
{
if (image_buffer.rect_float == nullptr) {
IMB_float_from_rect(&image_buffer);
}
}
void do_full_update_texture_slot(const IMAGE_InstanceData &instance_data,
const TextureInfo &texture_info,
ImBuf &texture_buffer,
ImBuf &tile_buffer,
const ImageTileWrapper &image_tile) const
{
const int texture_width = texture_buffer.x;
const int texture_height = texture_buffer.y;
ensure_float_buffer(tile_buffer);
/* IMB_transform works in a non-consistent space. This should be documented or fixed!.
* Construct a variant of the info_uv_to_texture that adds the texel space
* transformation.*/
float uv_to_texel[4][4];
copy_m4_m4(uv_to_texel, instance_data.ss_to_texture);
float scale[3] = {static_cast<float>(texture_width) / static_cast<float>(tile_buffer.x),
static_cast<float>(texture_height) / static_cast<float>(tile_buffer.y),
1.0f};
rescale_m4(uv_to_texel, scale);
uv_to_texel[3][0] += image_tile.get_tile_x_offset() / BLI_rctf_size_x(&texture_info.uv_bounds);
uv_to_texel[3][1] += image_tile.get_tile_y_offset() / BLI_rctf_size_y(&texture_info.uv_bounds);
uv_to_texel[3][0] *= texture_width;
uv_to_texel[3][1] *= texture_height;
invert_m4(uv_to_texel);
rctf crop_rect;
rctf *crop_rect_ptr = nullptr;
eIMBTransformMode transform_mode;
if (instance_data.flags.do_tile_drawing) {
transform_mode = IMB_TRANSFORM_MODE_WRAP_REPEAT;
}
else {
BLI_rctf_init(&crop_rect, 0.0, tile_buffer.x, 0.0, tile_buffer.y);
crop_rect_ptr = &crop_rect;
transform_mode = IMB_TRANSFORM_MODE_CROP_SRC;
}
IMB_transform(&tile_buffer,
&texture_buffer,
transform_mode,
IMB_FILTER_NEAREST,
uv_to_texel,
crop_rect_ptr);
}
public:
void cache_init(IMAGE_Data *vedata) const override
{
IMAGE_InstanceData *instance_data = vedata->instance_data;
instance_data->passes.image_pass = create_image_pass();
}
void cache_image(IMAGE_Data *vedata, Image *image, ImageUser *iuser) const override
{
const DRWContextState *draw_ctx = DRW_context_state_get();
IMAGE_InstanceData *instance_data = vedata->instance_data;
TextureMethod method(instance_data);
instance_data->partial_update.ensure_image(image);
instance_data->max_uv_update();
instance_data->clear_dirty_flag();
// Step: Find out which screen space textures are needed to draw on the screen. Remove the
// screen space textures that aren't needed.
const ARegion *region = draw_ctx->region;
method.update_screen_space_bounds(region);
method.update_uv_bounds(region);
// Step: Update the GPU textures based on the changes in the image.
instance_data->update_gpu_texture_allocations();
update_textures(*instance_data, image, iuser);
// Step: Add the GPU textures to the shgroup.
instance_data->update_batches();
add_shgroups(instance_data);
}
void draw_finish(IMAGE_Data *UNUSED(vedata)) const override
{
}
void draw_scene(IMAGE_Data *vedata) const override
{
IMAGE_InstanceData *instance_data = vedata->instance_data;
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
GPU_framebuffer_bind(dfbl->default_fb);
static float clear_col[4] = {0.0f, 0.0f, 0.0f, 0.0f};
GPU_framebuffer_clear_color_depth(dfbl->default_fb, clear_col, 1.0);
DRW_view_set_active(instance_data->view);
DRW_draw_pass(instance_data->passes.image_pass);
DRW_view_set_active(nullptr);
}
}; // namespace clipping
} // namespace blender::draw::image_engine

View File

@@ -0,0 +1,147 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2021, Blender Foundation.
*/
/** \file
* \ingroup draw_engine
*/
#pragma once
#include "image_private.hh"
namespace blender::draw::image_engine {
class ImageSpaceDrawingMode : public AbstractDrawingMode {
private:
DRWPass *create_image_pass() const
{
/* Write depth is needed for background overlay rendering. Near depth is used for
* transparency checker and Far depth is used for indicating the image size. */
DRWState state = static_cast<DRWState>(DRW_STATE_WRITE_COLOR | DRW_STATE_WRITE_DEPTH |
DRW_STATE_DEPTH_ALWAYS | DRW_STATE_BLEND_ALPHA_PREMUL);
return DRW_pass_create("Image", state);
}
void add_to_shgroup(AbstractSpaceAccessor *space,
DRWShadingGroup *grp,
const Image *image,
const ImBuf *image_buffer) const
{
float image_mat[4][4];
const DRWContextState *draw_ctx = DRW_context_state_get();
const ARegion *region = draw_ctx->region;
space->get_image_mat(image_buffer, region, image_mat);
GPUBatch *geom = DRW_cache_quad_get();
const float translate_x = image_mat[3][0];
const float translate_y = image_mat[3][1];
LISTBASE_FOREACH (ImageTile *, tile, &image->tiles) {
const int tile_x = ((tile->tile_number - 1001) % 10);
const int tile_y = ((tile->tile_number - 1001) / 10);
image_mat[3][0] = (float)tile_x + translate_x;
image_mat[3][1] = (float)tile_y + translate_y;
DRW_shgroup_call_obmat(grp, geom, image_mat);
}
}
public:
void cache_init(IMAGE_Data *vedata) const override
{
IMAGE_PassList *psl = vedata->psl;
psl->image_pass = create_image_pass();
}
void cache_image(AbstractSpaceAccessor *space,
IMAGE_Data *vedata,
Image *image,
ImageUser *iuser,
ImBuf *image_buffer) const override
{
IMAGE_PassList *psl = vedata->psl;
IMAGE_StorageList *stl = vedata->stl;
IMAGE_PrivateData *pd = stl->pd;
GPUTexture *tex_tile_data = nullptr;
space->get_gpu_textures(
image, iuser, image_buffer, &pd->texture, &pd->owns_texture, &tex_tile_data);
if (pd->texture == nullptr) {
return;
}
const bool is_tiled_texture = tex_tile_data != nullptr;
ShaderParameters sh_params;
sh_params.use_premul_alpha = BKE_image_has_gpu_texture_premultiplied_alpha(image,
image_buffer);
const DRWContextState *draw_ctx = DRW_context_state_get();
const Scene *scene = draw_ctx->scene;
if (scene->camera && scene->camera->type == OB_CAMERA) {
Camera *camera = static_cast<Camera *>(scene->camera->data);
copy_v2_fl2(sh_params.far_near, camera->clip_end, camera->clip_start);
}
space->get_shader_parameters(sh_params, image_buffer, is_tiled_texture);
GPUShader *shader = IMAGE_shader_image_get(is_tiled_texture);
DRWShadingGroup *shgrp = DRW_shgroup_create(shader, psl->image_pass);
if (is_tiled_texture) {
DRW_shgroup_uniform_texture_ex(shgrp, "imageTileArray", pd->texture, GPU_SAMPLER_DEFAULT);
DRW_shgroup_uniform_texture(shgrp, "imageTileData", tex_tile_data);
}
else {
DRW_shgroup_uniform_texture_ex(shgrp, "imageTexture", pd->texture, GPU_SAMPLER_DEFAULT);
}
DRW_shgroup_uniform_vec2_copy(shgrp, "farNearDistances", sh_params.far_near);
DRW_shgroup_uniform_vec4_copy(shgrp, "color", ShaderParameters::color);
DRW_shgroup_uniform_vec4_copy(shgrp, "shuffle", sh_params.shuffle);
DRW_shgroup_uniform_int_copy(shgrp, "drawFlags", sh_params.flags);
DRW_shgroup_uniform_bool_copy(shgrp, "imgPremultiplied", sh_params.use_premul_alpha);
add_to_shgroup(space, shgrp, image, image_buffer);
}
void draw_finish(IMAGE_Data *vedata) const override
{
IMAGE_StorageList *stl = vedata->stl;
IMAGE_PrivateData *pd = stl->pd;
if (pd->texture && pd->owns_texture) {
GPU_texture_free(pd->texture);
pd->owns_texture = false;
}
pd->texture = nullptr;
}
void draw_scene(IMAGE_Data *vedata) const override
{
IMAGE_PassList *psl = vedata->psl;
IMAGE_PrivateData *pd = vedata->stl->pd;
DefaultFramebufferList *dfbl = DRW_viewport_framebuffer_list_get();
GPU_framebuffer_bind(dfbl->default_fb);
static float clear_col[4] = {0.0f, 0.0f, 0.0f, 0.0f};
GPU_framebuffer_clear_color_depth(dfbl->default_fb, clear_col, 1.0);
DRW_view_set_active(pd->view);
DRW_draw_pass(psl->image_pass);
DRW_view_set_active(nullptr);
}
};
} // namespace blender::draw::image_engine

View File

@@ -41,7 +41,7 @@
#include "GPU_batch.h"
#include "image_drawing_mode.hh"
#include "image_drawing_mode_image_space.hh"
#include "image_engine.h"
#include "image_private.hh"
#include "image_space_image.hh"
@@ -68,7 +68,7 @@ template<
*
* Useful during development to switch between drawing implementations.
*/
typename DrawingMode = ScreenSpaceDrawingMode<OneTextureMethod>>
typename DrawingMode = ImageSpaceDrawingMode>
class ImageEngine {
private:
const DRWContextState *draw_ctx;
@@ -86,58 +86,48 @@ class ImageEngine {
void cache_init()
{
IMAGE_InstanceData *instance_data = vedata->instance_data;
drawing_mode.cache_init(vedata);
IMAGE_StorageList *stl = vedata->stl;
IMAGE_PrivateData *pd = stl->pd;
/* Setup full screen view matrix. */
const ARegion *region = draw_ctx->region;
float winmat[4][4], viewmat[4][4];
orthographic_m4(viewmat, 0.0, region->winx, 0.0, region->winy, 0.0, 1.0);
unit_m4(winmat);
instance_data->view = DRW_view_create(viewmat, winmat, nullptr, nullptr, nullptr);
drawing_mode.cache_init(vedata);
pd->view = nullptr;
if (space->has_view_override()) {
const ARegion *region = draw_ctx->region;
pd->view = space->create_view_override(region);
}
}
void cache_populate()
{
IMAGE_InstanceData *instance_data = vedata->instance_data;
IMAGE_StorageList *stl = vedata->stl;
IMAGE_PrivateData *pd = stl->pd;
Main *bmain = CTX_data_main(draw_ctx->evil_C);
instance_data->image = space->get_image(bmain);
if (instance_data->image == nullptr) {
pd->image = space->get_image(bmain);
if (pd->image == nullptr) {
/* Early exit, nothing to draw. */
return;
}
instance_data->flags.do_tile_drawing = instance_data->image->source != IMA_SRC_TILED &&
space->use_tile_drawing();
void *lock;
ImBuf *image_buffer = space->acquire_image_buffer(instance_data->image, &lock);
/* Setup the matrix to go from screen UV coordinates to UV texture space coordinates. */
float image_resolution[2] = {image_buffer ? image_buffer->x : 1024.0f,
image_buffer ? image_buffer->y : 1024.0f};
space->init_ss_to_texture_matrix(
draw_ctx->region, image_resolution, instance_data->ss_to_texture);
const Scene *scene = DRW_context_state_get()->scene;
instance_data->sh_params.update(space.get(), scene, instance_data->image, image_buffer);
space->release_buffer(instance_data->image, image_buffer, lock);
pd->ibuf = space->acquire_image_buffer(pd->image, &pd->lock);
ImageUser *iuser = space->get_image_user();
drawing_mode.cache_image(vedata, instance_data->image, iuser);
drawing_mode.cache_image(space.get(), vedata, pd->image, iuser, pd->ibuf);
}
void draw_finish()
{
drawing_mode.draw_finish(vedata);
IMAGE_InstanceData *instance_data = vedata->instance_data;
instance_data->image = nullptr;
IMAGE_StorageList *stl = vedata->stl;
IMAGE_PrivateData *pd = stl->pd;
space->release_buffer(pd->image, pd->ibuf, pd->lock);
pd->image = nullptr;
pd->ibuf = nullptr;
}
void draw_scene()
{
drawing_mode.draw_scene(vedata);
}
}; // namespace blender::draw::image_engine
};
/* -------------------------------------------------------------------- */
/** \name Engine Callbacks
@@ -147,9 +137,15 @@ static void IMAGE_engine_init(void *ved)
{
IMAGE_shader_library_ensure();
IMAGE_Data *vedata = (IMAGE_Data *)ved;
if (vedata->instance_data == nullptr) {
vedata->instance_data = MEM_new<IMAGE_InstanceData>(__func__);
IMAGE_StorageList *stl = vedata->stl;
if (!stl->pd) {
stl->pd = static_cast<IMAGE_PrivateData *>(MEM_callocN(sizeof(IMAGE_PrivateData), __func__));
}
IMAGE_PrivateData *pd = stl->pd;
pd->ibuf = nullptr;
pd->lock = nullptr;
pd->texture = nullptr;
}
static void IMAGE_cache_init(void *vedata)
@@ -178,12 +174,6 @@ static void IMAGE_engine_free()
IMAGE_shader_free();
}
static void IMAGE_instance_free(void *_instance_data)
{
IMAGE_InstanceData *instance_data = reinterpret_cast<IMAGE_InstanceData *>(_instance_data);
MEM_delete(instance_data);
}
/** \} */
static const DrawEngineDataSize IMAGE_data_size = DRW_VIEWPORT_DATA_SIZE(IMAGE_Data);
@@ -201,7 +191,7 @@ DrawEngineType draw_engine_image_type = {
&IMAGE_data_size, /* vedata_size */
&IMAGE_engine_init, /* engine_init */
&IMAGE_engine_free, /* engine_free */
&IMAGE_instance_free, /* instance_free */
nullptr, /* instance_free */
&IMAGE_cache_init, /* cache_init */
&IMAGE_cache_populate, /* cache_populate */
nullptr, /* cache_finish */

View File

@@ -1,134 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2021, Blender Foundation.
*/
/** \file
* \ingroup draw_engine
*/
#pragma once
#include "image_batches.hh"
#include "image_partial_updater.hh"
#include "image_private.hh"
#include "image_shader_params.hh"
#include "image_texture_info.hh"
#include "image_wrappers.hh"
#include "DRW_render.h"
/**
* \brief max allowed textures to use by the ScreenSpaceDrawingMode.
*
* 4 textures are used to reduce uploading screen space textures when translating the image.
*/
constexpr int SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN = 4;
struct IMAGE_InstanceData {
struct Image *image;
PartialImageUpdater partial_update;
struct DRWView *view;
ShaderParameters sh_params;
struct {
/**
* \brief should we perform tiled drawing (wrap repeat).
*
* Option is true when image is capable of tile drawing (image is not tile) and the tiled
* option is set in the space.
*/
bool do_tile_drawing : 1;
} flags;
struct {
DRWPass *image_pass;
} passes;
/** \brief Transform matrix to convert a normalized screen space coordinates to texture space. */
float ss_to_texture[4][4];
TextureInfo texture_infos[SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN];
/**
* \brief Maximum uv's that are on the border of the image.
*
* Larger UV coordinates would be drawn as a border. */
float max_uv[2];
public:
void max_uv_update()
{
copy_v2_fl2(max_uv, 1.0f, 1.0);
LISTBASE_FOREACH (ImageTile *, image_tile_ptr, &image->tiles) {
ImageTileWrapper image_tile(image_tile_ptr);
max_uv[0] = max_ii(max_uv[0], image_tile.get_tile_x_offset() + 1);
max_uv[1] = max_ii(max_uv[1], image_tile.get_tile_y_offset() + 1);
}
}
void clear_dirty_flag()
{
reset_dirty_flag(false);
}
void mark_all_texture_slots_dirty()
{
reset_dirty_flag(true);
}
void update_gpu_texture_allocations()
{
for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
TextureInfo &info = texture_infos[i];
const bool is_allocated = info.texture != nullptr;
const bool is_visible = info.visible;
const bool should_be_freed = !is_visible && is_allocated;
const bool should_be_created = is_visible && !is_allocated;
if (should_be_freed) {
GPU_texture_free(info.texture);
info.texture = nullptr;
}
if (should_be_created) {
DRW_texture_ensure_fullscreen_2d(
&info.texture, GPU_RGBA16F, static_cast<DRWTextureFlag>(0));
}
info.dirty |= should_be_created;
}
}
void update_batches()
{
for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
TextureInfo &info = texture_infos[i];
if (!info.dirty) {
continue;
}
BatchUpdater batch_updater(info);
batch_updater.update_batch();
}
}
private:
/** \brief Set dirty flag of all texture slots to the given value. */
void reset_dirty_flag(bool new_value)
{
for (int i = 0; i < SCREEN_SPACE_DRAWING_MODE_TEXTURE_LEN; i++) {
texture_infos[i].dirty = new_value;
}
}
};

View File

@@ -1,78 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2021, Blender Foundation.
*/
/** \file
* \ingroup draw_engine
*/
#pragma once
#include "BKE_image.h"
#include "BKE_image_partial_update.hh"
struct PartialImageUpdater {
struct PartialUpdateUser *user;
const struct Image *image;
/**
* \brief Ensure that there is a partial update user for the given image.
*/
void ensure_image(const Image *image)
{
if (!is_valid(image)) {
free();
create(image);
}
}
virtual ~PartialImageUpdater()
{
free();
}
private:
/**
* \brief check if the partial update user can still be used for the given image.
*
* When switching to a different image the partial update user should be recreated.
*/
bool is_valid(const Image *new_image) const
{
if (image != new_image) {
return false;
}
return user != nullptr;
}
void create(const Image *image)
{
BLI_assert(user == nullptr);
user = BKE_image_partial_update_create(image);
image = image;
}
void free()
{
if (user != nullptr) {
BKE_image_partial_update_free(user);
user = nullptr;
image = nullptr;
}
}
};

View File

@@ -24,11 +24,6 @@
#include <optional>
#include "BKE_image.h"
#include "image_instance_data.hh"
#include "image_texture_info.hh"
/* Forward declarations */
extern "C" {
struct GPUTexture;
@@ -40,13 +35,32 @@ struct Image;
namespace blender::draw::image_engine {
/* GPUViewport.storage
* Is freed every time the viewport engine changes. */
struct IMAGE_PassList {
DRWPass *image_pass;
};
struct IMAGE_PrivateData {
void *lock;
struct ImBuf *ibuf;
struct Image *image;
struct DRWView *view;
struct GPUTexture *texture;
bool owns_texture;
};
struct IMAGE_StorageList {
IMAGE_PrivateData *pd;
};
struct IMAGE_Data {
void *engine_type;
DRWViewportEmptyList *fbl;
DRWViewportEmptyList *txl;
DRWViewportEmptyList *psl;
DRWViewportEmptyList *stl;
IMAGE_InstanceData *instance_data;
IMAGE_PassList *psl;
IMAGE_StorageList *stl;
};
/* Shader parameters. */
@@ -55,6 +69,105 @@ struct IMAGE_Data {
#define IMAGE_DRAW_FLAG_SHUFFLING (1 << 2)
#define IMAGE_DRAW_FLAG_DEPTH (1 << 3)
#define IMAGE_DRAW_FLAG_DO_REPEAT (1 << 4)
#define IMAGE_DRAW_FLAG_USE_WORLD_POS (1 << 5)
struct ShaderParameters {
constexpr static float color[4] = {1.0f, 1.0f, 1.0f, 1.0f};
int flags = 0;
float shuffle[4];
float far_near[2];
bool use_premul_alpha = false;
ShaderParameters()
{
copy_v4_fl(shuffle, 1.0f);
copy_v2_fl2(far_near, 100.0f, 0.0f);
}
};
/**
* Space accessor.
*
* Image engine is used to draw the images inside multiple spaces \see SpaceLink.
* The AbstractSpaceAccessor is an interface to communicate with a space.
*/
class AbstractSpaceAccessor {
public:
virtual ~AbstractSpaceAccessor() = default;
/**
* Return the active image of the space.
*
* The returned image will be drawn in the space.
*
* The return value is optional.
*/
virtual Image *get_image(Main *bmain) = 0;
/**
* Return the #ImageUser of the space.
*
* The return value is optional.
*/
virtual ImageUser *get_image_user() = 0;
/**
* Acquire the image buffer of the image.
*
* \param image: Image to get the buffer from. Image is the same as returned from the #get_image
* member.
* \param lock: pointer to a lock object.
* \return Image buffer of the given image.
*/
virtual ImBuf *acquire_image_buffer(Image *image, void **lock) = 0;
/**
* Release a previous locked image from #acquire_image_buffer.
*/
virtual void release_buffer(Image *image, ImBuf *image_buffer, void *lock) = 0;
/**
* Update the r_shader_parameters with space specific settings.
*
* Only update the #ShaderParameters.flags and #ShaderParameters.shuffle. Other parameters
* are updated inside the image engine.
*/
virtual void get_shader_parameters(ShaderParameters &r_shader_parameters,
ImBuf *image_buffer,
bool is_tiled) = 0;
/**
* Retrieve the gpu textures to draw.
*/
virtual void get_gpu_textures(Image *image,
ImageUser *iuser,
ImBuf *image_buffer,
GPUTexture **r_gpu_texture,
bool *r_owns_texture,
GPUTexture **r_tex_tile_data) = 0;
/**
* Does this space override the view.
* When so this member should return true and the create_view_override must return the view to
* use during drawing.
*/
virtual bool has_view_override() const = 0;
/**
* Override the view for drawing.
* Should match #has_view_override.
*/
virtual DRWView *create_view_override(const ARegion *UNUSED(region)) = 0;
/**
* Initialize the matrix that will be used to draw the image. The matrix will be send as object
* matrix to the drawing pipeline.
*/
virtual void get_image_mat(const ImBuf *image_buffer,
const ARegion *region,
float r_mat[4][4]) const = 0;
}; // namespace blender::draw::image_engine
/**
* Abstract class for a drawing mode of the image engine.
@@ -66,7 +179,11 @@ class AbstractDrawingMode {
public:
virtual ~AbstractDrawingMode() = default;
virtual void cache_init(IMAGE_Data *vedata) const = 0;
virtual void cache_image(IMAGE_Data *vedata, Image *image, ImageUser *iuser) const = 0;
virtual void cache_image(AbstractSpaceAccessor *space,
IMAGE_Data *vedata,
Image *image,
ImageUser *iuser,
ImBuf *image_buffer) const = 0;
virtual void draw_scene(IMAGE_Data *vedata) const = 0;
virtual void draw_finish(IMAGE_Data *vedata) const = 0;
};

View File

@@ -1,59 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2021, Blender Foundation.
*/
/** \file
* \ingroup draw_engine
*/
#pragma once
#include "DNA_camera_types.h"
#include "DNA_image_types.h"
#include "DNA_scene_types.h"
#include "IMB_imbuf_types.h"
#include "BKE_image.h"
#include "BLI_math.h"
#include "image_space.hh"
struct ShaderParameters {
constexpr static float color[4] = {1.0f, 1.0f, 1.0f, 1.0f};
int flags = 0;
float shuffle[4];
float far_near[2];
bool use_premul_alpha = false;
void update(AbstractSpaceAccessor *space, const Scene *scene, Image *image, ImBuf *image_buffer)
{
copy_v4_fl(shuffle, 1.0f);
copy_v2_fl2(far_near, 100.0f, 0.0f);
use_premul_alpha = BKE_image_has_gpu_texture_premultiplied_alpha(image, image_buffer);
if (scene->camera && scene->camera->type == OB_CAMERA) {
Camera *camera = static_cast<Camera *>(scene->camera->data);
copy_v2_fl2(far_near, camera->clip_end, camera->clip_start);
}
const bool is_tiled_image = (image->source == IMA_SRC_TILED);
space->get_shader_parameters(*this, image_buffer, is_tiled_image);
}
};

View File

@@ -1,99 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2021, Blender Foundation.
*/
/** \file
* \ingroup draw_engine
*/
#pragma once
class ShaderParameters;
/**
* Space accessor.
*
* Image engine is used to draw the images inside multiple spaces \see SpaceLink.
* The AbstractSpaceAccessor is an interface to communicate with a space.
*/
class AbstractSpaceAccessor {
public:
virtual ~AbstractSpaceAccessor() = default;
/**
* Return the active image of the space.
*
* The returned image will be drawn in the space.
*
* The return value is optional.
*/
virtual Image *get_image(Main *bmain) = 0;
/**
* Return the #ImageUser of the space.
*
* The return value is optional.
*/
virtual ImageUser *get_image_user() = 0;
/**
* Acquire the image buffer of the image.
*
* \param image: Image to get the buffer from. Image is the same as returned from the #get_image
* member.
* \param lock: pointer to a lock object.
* \return Image buffer of the given image.
*/
virtual ImBuf *acquire_image_buffer(Image *image, void **lock) = 0;
/**
* Release a previous locked image from #acquire_image_buffer.
*/
virtual void release_buffer(Image *image, ImBuf *image_buffer, void *lock) = 0;
/**
* Update the r_shader_parameters with space specific settings.
*
* Only update the #ShaderParameters.flags and #ShaderParameters.shuffle. Other parameters
* are updated inside the image engine.
*/
virtual void get_shader_parameters(ShaderParameters &r_shader_parameters,
ImBuf *image_buffer,
bool is_tiled) = 0;
/**
* Retrieve the gpu textures to draw.
*/
virtual void get_gpu_textures(Image *image,
ImageUser *iuser,
ImBuf *image_buffer,
GPUTexture **r_gpu_texture,
bool *r_owns_texture,
GPUTexture **r_tex_tile_data) = 0;
/** \brief Is (wrap) repeat option enabled in the space. */
virtual bool use_tile_drawing() const = 0;
/**
* \brief Initialize r_uv_to_texture matrix to transform from normalized screen space coordinates
* (0..1) to texture space UV coordinates.
*/
virtual void init_ss_to_texture_matrix(const ARegion *region,
const float image_resolution[2],
float r_uv_to_texture[4][4]) const = 0;
}; // namespace blender::draw::image_engine

View File

@@ -61,6 +61,7 @@ class SpaceImageAccessor : public AbstractSpaceAccessor {
const int sima_flag = sima->flag & ED_space_image_get_display_channel_mask(image_buffer);
const bool do_repeat = (!is_tiled) && ((sima->flag & SI_DRAW_TILE) != 0);
SET_FLAG_FROM_TEST(r_shader_parameters.flags, do_repeat, IMAGE_DRAW_FLAG_DO_REPEAT);
SET_FLAG_FROM_TEST(r_shader_parameters.flags, is_tiled, IMAGE_DRAW_FLAG_USE_WORLD_POS);
if ((sima_flag & SI_USE_ALPHA) != 0) {
/* Show RGBA */
r_shader_parameters.flags |= IMAGE_DRAW_FLAG_SHOW_ALPHA | IMAGE_DRAW_FLAG_APPLY_ALPHA;
@@ -101,6 +102,15 @@ class SpaceImageAccessor : public AbstractSpaceAccessor {
}
}
bool has_view_override() const override
{
return false;
}
DRWView *create_view_override(const ARegion *UNUSED(region)) override
{
return nullptr;
}
void get_gpu_textures(Image *image,
ImageUser *iuser,
ImBuf *image_buffer,
@@ -161,26 +171,11 @@ class SpaceImageAccessor : public AbstractSpaceAccessor {
}
}
bool use_tile_drawing() const override
void get_image_mat(const ImBuf *UNUSED(image_buffer),
const ARegion *UNUSED(region),
float r_mat[4][4]) const override
{
return (sima->flag & SI_DRAW_TILE) != 0;
}
void init_ss_to_texture_matrix(const ARegion *region,
const float UNUSED(image_resolution[2]),
float r_uv_to_texture[4][4]) const override
{
// TODO: I remember that there was a function for this somewhere.
unit_m4(r_uv_to_texture);
float scale_x = 1.0 / BLI_rctf_size_x(&region->v2d.cur);
float scale_y = 1.0 / BLI_rctf_size_y(&region->v2d.cur);
float translate_x = scale_x * -region->v2d.cur.xmin;
float translate_y = scale_y * -region->v2d.cur.ymin;
r_uv_to_texture[0][0] = scale_x;
r_uv_to_texture[1][1] = scale_y;
r_uv_to_texture[3][0] = translate_x;
r_uv_to_texture[3][1] = translate_y;
unit_m4(r_mat);
}
};

View File

@@ -54,6 +54,20 @@ class SpaceNodeAccessor : public AbstractSpaceAccessor {
BKE_image_release_ibuf(image, ibuf, lock);
}
bool has_view_override() const override
{
return true;
}
DRWView *create_view_override(const ARegion *region) override
{
/* Setup a screen pixel view. The backdrop of the node editor doesn't follow the region. */
float winmat[4][4], viewmat[4][4];
orthographic_m4(viewmat, 0.0, region->winx, 0.0, region->winy, 0.0, 1.0);
unit_m4(winmat);
return DRW_view_create(viewmat, winmat, nullptr, nullptr, nullptr);
}
void get_shader_parameters(ShaderParameters &r_shader_parameters,
ImBuf *ibuf,
bool UNUSED(is_tiled)) override
@@ -106,33 +120,18 @@ class SpaceNodeAccessor : public AbstractSpaceAccessor {
*r_tex_tile_data = nullptr;
}
bool use_tile_drawing() const override
void get_image_mat(const ImBuf *image_buffer,
const ARegion *region,
float r_mat[4][4]) const override
{
return false;
}
unit_m4(r_mat);
const float ibuf_width = image_buffer->x;
const float ibuf_height = image_buffer->y;
/**
* The backdrop of the node editor isn't drawn in screen space UV space. But is locked with the
* screen.
*/
void init_ss_to_texture_matrix(const ARegion *region,
const float image_resolution[2],
float r_uv_to_texture[4][4]) const override
{
unit_m4(r_uv_to_texture);
float display_resolution[2];
mul_v2_v2fl(display_resolution, image_resolution, snode->zoom);
const float scale_x = display_resolution[0] / region->winx;
const float scale_y = display_resolution[1] / region->winy;
const float translate_x = ((region->winx - display_resolution[0]) * 0.5f + snode->xof) /
region->winx;
const float translate_y = ((region->winy - display_resolution[1]) * 0.5f + snode->yof) /
region->winy;
r_uv_to_texture[0][0] = scale_x;
r_uv_to_texture[1][1] = scale_y;
r_uv_to_texture[3][0] = translate_x;
r_uv_to_texture[3][1] = translate_y;
r_mat[0][0] = ibuf_width * snode->zoom;
r_mat[1][1] = ibuf_height * snode->zoom;
r_mat[3][0] = (region->winx - snode->zoom * ibuf_width) / 2 + snode->xof;
r_mat[3][1] = (region->winy - snode->zoom * ibuf_height) / 2 + snode->yof;
}
};

View File

@@ -1,76 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2020, Blender Foundation.
*/
/** \file
* \ingroup draw_engine
*/
#pragma once
#include "BLI_rect.h"
#include "GPU_batch.h"
#include "GPU_texture.h"
struct TextureInfo {
/**
* \brief Is the texture clipped.
*
* Resources of clipped textures are freed and ignored when performing partial updates.
*/
bool visible : 1;
/**
* \brief does this texture need a full update.
*
* When set to false the texture can be updated using a partial update.
*/
bool dirty : 1;
/** \brief area of the texture in screen space. */
rctf clipping_bounds;
/** \brief uv area of the texture. */
rctf uv_bounds;
/**
* \brief Batch to draw the associated texton the screen.
*
* contans a VBO with `pos` and 'uv'.
* `pos` (2xF32) is relative to the origin of the space.
* `uv` (2xF32) reflect the uv bounds.
*/
GPUBatch *batch;
/**
* \brief GPU Texture for a partial region of the image editor.
*/
GPUTexture *texture;
~TextureInfo()
{
if (batch != nullptr) {
GPU_batch_discard(batch);
batch = nullptr;
}
if (texture != nullptr) {
GPU_texture_free(texture);
texture = nullptr;
}
}
};

View File

@@ -1,49 +0,0 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright 2020, Blender Foundation.
*/
/** \file
* \ingroup draw_engine
*/
#pragma once
#include "DNA_image_types.h"
struct ImageTileWrapper {
ImageTile *image_tile;
ImageTileWrapper(ImageTile *image_tile) : image_tile(image_tile)
{
}
int get_tile_number() const
{
return image_tile->tile_number;
}
int get_tile_x_offset() const
{
int tile_number = get_tile_number();
return (tile_number - 1001) % 10;
}
int get_tile_y_offset() const
{
int tile_number = get_tile_number();
return (tile_number - 1001) / 10;
}
};

View File

@@ -7,7 +7,12 @@
#define IMAGE_DRAW_FLAG_DEPTH (1 << 3)
#define IMAGE_DRAW_FLAG_DO_REPEAT (1 << 4)
#ifdef TILED_IMAGE
uniform sampler2DArray imageTileArray;
uniform sampler1DArray imageTileData;
#else
uniform sampler2D imageTexture;
#endif
uniform bool imgPremultiplied;
uniform int drawFlags;
@@ -15,50 +20,75 @@ uniform vec2 farNearDistances;
uniform vec4 color;
uniform vec4 shuffle;
/* Maximum UV range.
* Negative UV coordinates and UV coordinates beyond maxUV would draw a border. */
uniform vec2 maxUv;
#define FAR_DISTANCE farNearDistances.x
#define NEAR_DISTANCE farNearDistances.y
#define Z_DEPTH_BORDER 1.0
#define Z_DEPTH_IMAGE 0.75
in vec2 uv_screen;
in vec2 uv_image;
in vec2 uvs;
out vec4 fragColor;
bool is_border(vec2 uv)
#ifdef TILED_IMAGE
/* TODO(fclem): deduplicate code. */
bool node_tex_tile_lookup(inout vec3 co, sampler2DArray ima, sampler1DArray map)
{
// TODO: should use bvec to reduce branching?
return (uv.x < 0.0 || uv.y < 0.0 || uv.x > maxUv.x || uv.y > maxUv.y);
vec2 tile_pos = floor(co.xy);
if (tile_pos.x < 0 || tile_pos.y < 0 || tile_pos.x >= 10) {
return false;
}
float tile = 10.0 * tile_pos.y + tile_pos.x;
if (tile >= textureSize(map, 0).x) {
return false;
}
/* Fetch tile information. */
float tile_layer = texelFetch(map, ivec2(tile, 0), 0).x;
if (tile_layer < 0.0) {
return false;
}
vec4 tile_info = texelFetch(map, ivec2(tile, 1), 0);
co = vec3(((co.xy - tile_pos) * tile_info.zw) + tile_info.xy, tile_layer);
return true;
}
#endif
void main()
{
ivec2 uvs_clamped = ivec2(uv_screen);
vec4 tex_color = texelFetch(imageTexture, uvs_clamped, 0);
vec4 tex_color;
/* Read texture */
#ifdef TILED_IMAGE
vec3 co = vec3(uvs, 0.0);
if (node_tex_tile_lookup(co, imageTileArray, imageTileData)) {
tex_color = texture(imageTileArray, co);
}
else {
tex_color = vec4(1.0, 0.0, 1.0, 1.0);
}
#else
vec2 uvs_clamped = ((drawFlags & IMAGE_DRAW_FLAG_DO_REPEAT) != 0) ?
fract(uvs) :
clamp(uvs, vec2(0.0), vec2(1.0));
tex_color = texture(imageTexture, uvs_clamped);
#endif
bool border = is_border(uv_image);
if (!border) {
if ((drawFlags & IMAGE_DRAW_FLAG_APPLY_ALPHA) != 0) {
if (!imgPremultiplied) {
tex_color.rgb *= tex_color.a;
}
}
if ((drawFlags & IMAGE_DRAW_FLAG_DEPTH) != 0) {
tex_color = smoothstep(FAR_DISTANCE, NEAR_DISTANCE, tex_color);
}
if ((drawFlags & IMAGE_DRAW_FLAG_SHUFFLING) != 0) {
tex_color = color * dot(tex_color, shuffle);
}
if ((drawFlags & IMAGE_DRAW_FLAG_SHOW_ALPHA) == 0) {
tex_color.a = 1.0;
if ((drawFlags & IMAGE_DRAW_FLAG_APPLY_ALPHA) != 0) {
if (!imgPremultiplied) {
tex_color.rgb *= tex_color.a;
}
}
if ((drawFlags & IMAGE_DRAW_FLAG_DEPTH) != 0) {
tex_color = smoothstep(FAR_DISTANCE, NEAR_DISTANCE, tex_color);
}
if ((drawFlags & IMAGE_DRAW_FLAG_SHUFFLING) != 0) {
tex_color = color * dot(tex_color, shuffle);
}
if ((drawFlags & IMAGE_DRAW_FLAG_SHOW_ALPHA) == 0) {
tex_color.a = 1.0;
}
fragColor = tex_color;
gl_FragDepth = border ? Z_DEPTH_BORDER : Z_DEPTH_IMAGE;
}

View File

@@ -1,24 +1,34 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#define IMAGE_DRAW_FLAG_DO_REPEAT (1 << 4)
#define IMAGE_DRAW_FLAG_USE_WORLD_POS (1 << 5)
#define IMAGE_Z_DEPTH 0.75
uniform int drawFlags;
in vec2 pos;
in vec2 uv;
/* Normalized screen space uv coordinates. */
out vec2 uv_screen;
out vec2 uv_image;
in vec3 pos;
out vec2 uvs;
void main()
{
vec3 image_pos = vec3(pos, 0.0);
uv_screen = image_pos.xy;
uv_image = uv;
/* `pos` contains the coordinates of a quad (-1..1). but we need the coordinates of an image
* plane (0..1) */
vec3 image_pos = pos * 0.5 + 0.5;
vec3 world_pos = point_object_to_world(image_pos);
vec4 position = point_world_to_ndc(world_pos);
gl_Position = position;
if ((drawFlags & IMAGE_DRAW_FLAG_DO_REPEAT) != 0) {
gl_Position = vec4(pos.xy, IMAGE_Z_DEPTH, 1.0);
uvs = point_view_to_object(image_pos).xy;
}
else {
vec3 world_pos = point_object_to_world(image_pos);
vec4 position = point_world_to_ndc(world_pos);
/* Move drawn pixels to the front. In the overlay engine the depth is used
* to detect if a transparency texture or the background color should be drawn.
* Vertices are between 0.0 and 0.2, Edges between 0.2 and 0.4
* actual pixels are at 0.75, 1.0 is used for the background. */
position.z = IMAGE_Z_DEPTH;
gl_Position = position;
/* UDIM texture uses the world position for tile selection. */
uvs = ((drawFlags & IMAGE_DRAW_FLAG_USE_WORLD_POS) != 0) ? world_pos.xy : image_pos.xy;
}
}

View File

@@ -1,41 +0,0 @@
#include "gpu_shader_create_info.hh"
/* -------------------------------------------------------------------- */
/** \name Base Composite
* \{ */
GPU_SHADER_CREATE_INFO(workbench_composite)
.sampler(0, ImageType::FLOAT_2D, "normalBuffer", Frequency::PASS)
.sampler(1, ImageType::FLOAT_2D, "materialBuffer", Frequency::PASS)
.uniform_buf(4, "WorldData", "world_data", Frequency::PASS)
.push_constant(Type::BOOL, "forceShadowing")
.fragment_out(0, Type::VEC4, "fragColor")
.typedef_source("workbench_shader_shared.h")
.fragment_source("workbench_composite_frag.glsl")
.additional_info("draw_fullscreen");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Lighting Type
* \{ */
GPU_SHADER_CREATE_INFO(workbench_composite_studio)
.define("V3D_LIGHTING_STUDIO")
.additional_info("workbench_composite")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(workbench_composite_matcap)
.define("V3D_LIGHTING_MATCAP")
.sampler(2, ImageType::FLOAT_2D, "matcap_diffuse_tx", Frequency::PASS)
.sampler(3, ImageType::FLOAT_2D, "matcap_specular_tx", Frequency::PASS)
.additional_info("workbench_composite")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(workbench_composite_flat)
.define("V3D_LIGHTING_FLAT")
.additional_info("workbench_composite")
.do_static_compilation(true);
/** \} */

View File

@@ -1,64 +0,0 @@
#include "gpu_shader_create_info.hh"
/* -------------------------------------------------------------------- */
/** \name TAA
* \{ */
GPU_SHADER_CREATE_INFO(workbench_taa)
.sampler(0, ImageType::FLOAT_2D, "colorBuffer")
.push_constant(Type::FLOAT, "samplesWeights", 9)
.fragment_out(0, Type::VEC4, "fragColor")
.fragment_source("workbench_effect_taa_frag.glsl")
.additional_info("draw_fullscreen")
.do_static_compilation(true);
/** \} */
/* -------------------------------------------------------------------- */
/** \name SMAA
* \{ */
GPU_SHADER_INTERFACE_INFO(workbench_smaa_iface, "")
.smooth(Type::VEC2, "uvs")
.smooth(Type::VEC2, "pixcoord")
.smooth(Type::VEC4, "offset[3]");
GPU_SHADER_CREATE_INFO(workbench_smaa)
.define("SMAA_GLSL_3")
.define("SMAA_RT_METRICS", "viewportMetrics")
.define("SMAA_PRESET_HIGH")
.define("SMAA_LUMA_WEIGHT", "float4(1.0, 1.0, 1.0, 1.0)")
.define("SMAA_NO_DISCARD")
.vertex_out(workbench_smaa_iface)
.push_constant(Type::VEC4, "viewportMetrics")
.vertex_source("workbench_effect_smaa_vert.glsl")
.fragment_source("workbench_effect_smaa_frag.glsl");
GPU_SHADER_CREATE_INFO(workbench_smaa_stage_0)
.define("SMAA_STAGE", "0")
.sampler(0, ImageType::FLOAT_2D, "colorTex")
.fragment_out(0, Type::VEC2, "out_edges")
.additional_info("workbench_smaa")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(workbench_smaa_stage_1)
.define("SMAA_STAGE", "1")
.sampler(0, ImageType::FLOAT_2D, "edgesTex")
.sampler(1, ImageType::FLOAT_2D, "areaTex")
.sampler(2, ImageType::FLOAT_2D, "searchTex")
.fragment_out(0, Type::VEC4, "out_weights")
.additional_info("workbench_smaa")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(workbench_smaa_stage_2)
.define("SMAA_STAGE", "2")
.sampler(0, ImageType::FLOAT_2D, "colorTex")
.sampler(1, ImageType::FLOAT_2D, "blendTex")
.push_constant(Type::FLOAT, "mixFactor")
.push_constant(Type::FLOAT, "taaAccumulatedWeight")
.fragment_out(0, Type::VEC4, "out_color")
.additional_info("workbench_smaa")
.do_static_compilation(true);
/** \} */

View File

@@ -1,55 +0,0 @@
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(workbench_effect_dof)
/* TODO(fclem): Split resources per stage. */
.sampler(0, ImageType::FLOAT_2D, "inputCocTex")
.sampler(1, ImageType::FLOAT_2D, "maxCocTilesTex")
.sampler(2, ImageType::FLOAT_2D, "sceneColorTex")
.sampler(3, ImageType::FLOAT_2D, "sceneDepthTex")
.sampler(4, ImageType::FLOAT_2D, "backgroundTex")
.sampler(5, ImageType::FLOAT_2D, "halfResColorTex")
.sampler(6, ImageType::FLOAT_2D, "blurTex")
.sampler(7, ImageType::FLOAT_2D, "noiseTex")
.push_constant(Type::VEC2, "invertedViewportSize")
.push_constant(Type::VEC2, "nearFar")
.push_constant(Type::VEC3, "dofParams")
.push_constant(Type::FLOAT, "noiseOffset")
.fragment_source("workbench_effect_dof_frag.glsl")
.additional_info("draw_fullscreen")
.additional_info("draw_view");
GPU_SHADER_CREATE_INFO(workbench_effect_dof_prepare)
.define("PREPARE")
.fragment_out(0, Type::VEC4, "halfResColor")
.fragment_out(1, Type::VEC2, "normalizedCoc")
.additional_info("workbench_effect_dof")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(workbench_effect_dof_downsample)
.define("DOWNSAMPLE")
.fragment_out(0, Type::VEC4, "outColor")
.fragment_out(1, Type::VEC2, "outCocs")
.additional_info("workbench_effect_dof")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(workbench_effect_dof_blur1)
.define("BLUR1")
.define("NUM_SAMPLES", "49")
.uniform_buf(1, "vec4", "samples[49]")
.fragment_out(0, Type::VEC4, "blurColor")
.additional_info("workbench_effect_dof")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(workbench_effect_dof_blur2)
.define("BLUR2")
.fragment_out(0, Type::VEC4, "finalColor")
.additional_info("workbench_effect_dof")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(workbench_effect_dof_resolve)
.define("RESOLVE")
.fragment_out(0, Type::VEC4, "finalColorAdd", DualBlend::SRC_0)
.fragment_out(0, Type::VEC4, "finalColorMul", DualBlend::SRC_1)
.additional_info("workbench_effect_dof")
.do_static_compilation(true);

View File

@@ -1,11 +0,0 @@
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(workbench_effect_outline)
.typedef_source("workbench_shader_shared.h")
.fragment_source("workbench_effect_outline_frag.glsl")
.sampler(0, ImageType::UINT_2D, "objectIdBuffer")
.uniform_buf(4, "WorldData", "world_data", Frequency::PASS)
.fragment_out(0, Type::VEC4, "fragColor")
.additional_info("draw_fullscreen")
.do_static_compilation(true);

View File

@@ -1,9 +0,0 @@
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(workbench_merge_infront)
.fragment_out(0, Type::VEC4, "fragColor")
.sampler(0, ImageType::DEPTH_2D, "depthBuffer")
.fragment_source("workbench_merge_infront_frag.glsl")
.additional_info("draw_fullscreen")
.do_static_compilation(true);

View File

@@ -1,149 +0,0 @@
#include "gpu_shader_create_info.hh"
/* -------------------------------------------------------------------- */
/** \name Object Type
* \{ */
GPU_SHADER_CREATE_INFO(workbench_mesh)
.vertex_in(0, Type::VEC3, "pos")
.vertex_in(1, Type::VEC3, "nor")
.vertex_in(2, Type::VEC4, "ac")
.vertex_in(3, Type::VEC2, "au")
.vertex_source("workbench_prepass_vert.glsl")
.additional_info("draw_mesh")
.additional_info("draw_resource_handle");
GPU_SHADER_CREATE_INFO(workbench_hair)
.sampler(0, ImageType::FLOAT_BUFFER, "ac", Frequency::BATCH)
.sampler(1, ImageType::FLOAT_BUFFER, "au", Frequency::BATCH)
.vertex_source("workbench_prepass_hair_vert.glsl")
.additional_info("draw_hair")
.additional_info("draw_resource_handle");
GPU_SHADER_CREATE_INFO(workbench_pointcloud)
.vertex_source("workbench_prepass_pointcloud_vert.glsl")
.additional_info("draw_pointcloud")
.additional_info("draw_resource_handle");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Texture Type
* \{ */
GPU_SHADER_CREATE_INFO(workbench_texture_none).define("TEXTURE_NONE");
GPU_SHADER_CREATE_INFO(workbench_texture_single)
.sampler(2, ImageType::FLOAT_2D, "imageTexture", Frequency::BATCH)
.push_constant(Type::BOOL, "imagePremult")
.push_constant(Type::FLOAT, "imageTransparencyCutoff")
.define("V3D_SHADING_TEXTURE_COLOR");
GPU_SHADER_CREATE_INFO(workbench_texture_tile)
.sampler(2, ImageType::FLOAT_2D_ARRAY, "imageTileArray", Frequency::BATCH)
.sampler(3, ImageType::FLOAT_1D_ARRAY, "imageTileData", Frequency::BATCH)
.push_constant(Type::BOOL, "imagePremult")
.push_constant(Type::FLOAT, "imageTransparencyCutoff")
.define("V3D_SHADING_TEXTURE_COLOR")
.define("TEXTURE_IMAGE_ARRAY");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Lighting Type (only for transparent)
* \{ */
GPU_SHADER_CREATE_INFO(workbench_lighting_flat).define("V3D_LIGHTING_FLAT");
GPU_SHADER_CREATE_INFO(workbench_lighting_studio).define("V3D_LIGHTING_STUDIO");
GPU_SHADER_CREATE_INFO(workbench_lighting_matcap)
.define("V3D_LIGHTING_MATCAP")
.sampler(4, ImageType::FLOAT_2D, "matcap_diffuse_tx", Frequency::PASS)
.sampler(5, ImageType::FLOAT_2D, "matcap_specular_tx", Frequency::PASS);
/** \} */
/* -------------------------------------------------------------------- */
/** \name Material Interface
* \{ */
GPU_SHADER_INTERFACE_INFO(workbench_material_iface, "")
.smooth(Type::VEC3, "normal_interp")
.smooth(Type::VEC3, "color_interp")
.smooth(Type::FLOAT, "alpha_interp")
.smooth(Type::VEC2, "uv_interp")
.flat(Type::INT, "object_id")
.flat(Type::FLOAT, "roughness")
.flat(Type::FLOAT, "metallic");
GPU_SHADER_CREATE_INFO(workbench_material)
.uniform_buf(4, "WorldData", "world_data", Frequency::PASS)
.uniform_buf(5, "vec4", "materials_data[4096]", Frequency::PASS)
.push_constant(Type::INT, "materialIndex")
.push_constant(Type::BOOL, "useMatcap")
.vertex_out(workbench_material_iface);
/** \} */
/* -------------------------------------------------------------------- */
/** \name Pipeline Type
* \{ */
GPU_SHADER_CREATE_INFO(workbench_transparent_accum)
/* Note: Blending will be skipped on objectId because output is a
non-normalized integer buffer. */
.fragment_out(0, Type::VEC4, "transparentAccum")
.fragment_out(1, Type::VEC4, "revealageAccum")
.fragment_out(2, Type::UINT, "objectId")
.push_constant(Type::BOOL, "forceShadowing")
.typedef_source("workbench_shader_shared.h")
.fragment_source("workbench_transparent_accum_frag.glsl");
GPU_SHADER_CREATE_INFO(workbench_opaque)
.fragment_out(0, Type::VEC4, "materialData")
.fragment_out(1, Type::VEC2, "normalData")
.fragment_out(2, Type::UINT, "objectId")
.typedef_source("workbench_shader_shared.h")
.fragment_source("workbench_prepass_frag.glsl");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Variations Declaration
* \{ */
#define WORKBENCH_FINAL_VARIATION(name, ...) \
GPU_SHADER_CREATE_INFO(name).additional_info(__VA_ARGS__).do_static_compilation(true);
#define WORKBENCH_CLIPPING_VARIATIONS(prefix, ...) \
WORKBENCH_FINAL_VARIATION(prefix##_clip, "drw_clipped", __VA_ARGS__) \
WORKBENCH_FINAL_VARIATION(prefix##_no_clip, __VA_ARGS__)
#define WORKBENCH_TEXTURE_VARIATIONS(prefix, ...) \
WORKBENCH_CLIPPING_VARIATIONS(prefix##_tex_none, "workbench_texture_none", __VA_ARGS__) \
WORKBENCH_CLIPPING_VARIATIONS(prefix##_tex_single, "workbench_texture_single", __VA_ARGS__) \
WORKBENCH_CLIPPING_VARIATIONS(prefix##_tex_tile, "workbench_texture_tile", __VA_ARGS__)
#define WORKBENCH_DATATYPE_VARIATIONS(prefix, ...) \
WORKBENCH_TEXTURE_VARIATIONS(prefix##_mesh, "workbench_mesh", __VA_ARGS__) \
WORKBENCH_TEXTURE_VARIATIONS(prefix##_hair, "workbench_hair", __VA_ARGS__) \
WORKBENCH_TEXTURE_VARIATIONS(prefix##_ptcloud, "workbench_pointcloud", __VA_ARGS__)
#define WORKBENCH_PIPELINE_VARIATIONS(prefix, ...) \
WORKBENCH_DATATYPE_VARIATIONS(prefix##_transp_studio, \
"workbench_transparent_accum", \
"workbench_lighting_studio", \
__VA_ARGS__) \
WORKBENCH_DATATYPE_VARIATIONS(prefix##_transp_matcap, \
"workbench_transparent_accum", \
"workbench_lighting_matcap", \
__VA_ARGS__) \
WORKBENCH_DATATYPE_VARIATIONS(prefix##_transp_flat, \
"workbench_transparent_accum", \
"workbench_lighting_flat", \
__VA_ARGS__) \
WORKBENCH_DATATYPE_VARIATIONS(prefix##_opaque, "workbench_opaque", __VA_ARGS__)
WORKBENCH_PIPELINE_VARIATIONS(workbench, "workbench_material");
/** \} */

View File

@@ -1,98 +0,0 @@
#include "gpu_shader_create_info.hh"
/* -------------------------------------------------------------------- */
/** \name Common
* \{ */
GPU_SHADER_INTERFACE_INFO(workbench_shadow_iface, "vData")
.smooth(Type::VEC3, "pos")
.smooth(Type::VEC4, "frontPosition")
.smooth(Type::VEC4, "backPosition");
GPU_SHADER_CREATE_INFO(workbench_shadow_common)
.vertex_in(0, Type::VEC3, "pos")
.vertex_out(workbench_shadow_iface)
.push_constant(Type::FLOAT, "lightDistance")
.push_constant(Type::VEC3, "lightDirection")
.vertex_source("workbench_shadow_vert.glsl")
.additional_info("draw_mesh");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Manifold Type
* \{ */
GPU_SHADER_CREATE_INFO(workbench_shadow_manifold)
.geometry_layout(PrimitiveIn::LINES_ADJACENCY, PrimitiveOut::TRIANGLE_STRIP, 4, 1)
.geometry_source("workbench_shadow_geom.glsl");
GPU_SHADER_CREATE_INFO(workbench_shadow_no_manifold)
.geometry_layout(PrimitiveIn::LINES_ADJACENCY, PrimitiveOut::TRIANGLE_STRIP, 4, 2)
.geometry_source("workbench_shadow_geom.glsl");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Caps Type
* \{ */
GPU_SHADER_CREATE_INFO(workbench_shadow_caps)
.geometry_layout(PrimitiveIn::TRIANGLES, PrimitiveOut::TRIANGLE_STRIP, 3, 2)
.geometry_source("workbench_shadow_caps_geom.glsl");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Debug Type
* \{ */
GPU_SHADER_CREATE_INFO(workbench_shadow_no_debug)
.fragment_source("gpu_shader_depth_only_frag.glsl");
GPU_SHADER_CREATE_INFO(workbench_shadow_debug)
.fragment_out(0, Type::VEC4, "materialData")
.fragment_out(1, Type::VEC4, "normalData")
.fragment_out(2, Type::UINT, "objectId")
.fragment_source("workbench_shadow_debug_frag.glsl");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Variations Declaration
* \{ */
#define WORKBENCH_SHADOW_VARIATIONS(suffix, ...) \
GPU_SHADER_CREATE_INFO(workbench_shadow_pass_manifold_no_caps##suffix) \
.define("SHADOW_PASS") \
.additional_info("workbench_shadow_common", "workbench_shadow_manifold", __VA_ARGS__) \
.do_static_compilation(true); \
GPU_SHADER_CREATE_INFO(workbench_shadow_pass_no_manifold_no_caps##suffix) \
.define("SHADOW_PASS") \
.define("DOUBLE_MANIFOLD") \
.additional_info("workbench_shadow_common", "workbench_shadow_no_manifold", __VA_ARGS__) \
.do_static_compilation(true); \
GPU_SHADER_CREATE_INFO(workbench_shadow_fail_manifold_caps##suffix) \
.define("SHADOW_FAIL") \
.additional_info("workbench_shadow_common", "workbench_shadow_caps", __VA_ARGS__) \
.do_static_compilation(true); \
GPU_SHADER_CREATE_INFO(workbench_shadow_fail_manifold_no_caps##suffix) \
.define("SHADOW_FAIL") \
.additional_info("workbench_shadow_common", "workbench_shadow_manifold", __VA_ARGS__) \
.do_static_compilation(true); \
GPU_SHADER_CREATE_INFO(workbench_shadow_fail_no_manifold_caps##suffix) \
.define("SHADOW_FAIL") \
.define("DOUBLE_MANIFOLD") \
.additional_info("workbench_shadow_common", "workbench_shadow_caps", __VA_ARGS__) \
.do_static_compilation(true); \
GPU_SHADER_CREATE_INFO(workbench_shadow_fail_no_manifold_no_caps##suffix) \
.define("SHADOW_FAIL") \
.define("DOUBLE_MANIFOLD") \
.additional_info("workbench_shadow_common", "workbench_shadow_no_manifold", __VA_ARGS__) \
.do_static_compilation(true);
WORKBENCH_SHADOW_VARIATIONS(, "workbench_shadow_no_debug")
WORKBENCH_SHADOW_VARIATIONS(_debug, "workbench_shadow_debug")
/** \} */

View File

@@ -1,10 +0,0 @@
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(workbench_transparent_resolve)
.fragment_out(0, Type::VEC4, "fragColor")
.sampler(0, ImageType::FLOAT_2D, "transparentAccum")
.sampler(1, ImageType::FLOAT_2D, "transparentRevealage")
.fragment_source("workbench_transparent_resolve_frag.glsl")
.additional_info("draw_fullscreen")
.do_static_compilation(true);

View File

@@ -1,114 +0,0 @@
#include "gpu_shader_create_info.hh"
/* -------------------------------------------------------------------- */
/** \name Volume shader base
* \{ */
GPU_SHADER_CREATE_INFO(workbench_volume)
.vertex_in(0, Type::VEC3, "pos")
.fragment_out(0, Type::VEC4, "fragColor")
.sampler(0, ImageType::DEPTH_2D, "depthBuffer")
.sampler(1, ImageType::FLOAT_3D, "densityTexture")
.push_constant(Type::INT, "samplesLen")
.push_constant(Type::FLOAT, "noiseOfs")
.push_constant(Type::FLOAT, "stepLength")
.push_constant(Type::FLOAT, "densityScale")
.vertex_source("workbench_volume_vert.glsl")
.fragment_source("workbench_volume_frag.glsl")
.additional_info("draw_object_infos");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Smoke variation
* \{ */
GPU_SHADER_CREATE_INFO(workbench_volume_smoke)
.define("VOLUME_SMOKE")
.sampler(2, ImageType::FLOAT_3D, "flameTexture")
.sampler(3, ImageType::FLOAT_1D, "flameColorTexture")
.additional_info("draw_mesh", "draw_resource_id_varying");
GPU_SHADER_CREATE_INFO(workbench_volume_object)
.define("VOLUME_OBJECT")
.push_constant(Type::MAT4, "volumeTextureToObject")
/* FIXME(fclem): This overflow the push_constant limit. */
.push_constant(Type::MAT4, "volumeObjectToTexture")
.additional_info("draw_volume", "draw_resource_id_varying");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Color Band variation
* \{ */
GPU_SHADER_CREATE_INFO(workbench_volume_coba)
.define("USE_COBA")
.sampler(4, ImageType::UINT_3D, "flagTexture")
.sampler(5, ImageType::FLOAT_1D, "transferTexture")
.push_constant(Type::BOOL, "showPhi")
.push_constant(Type::BOOL, "showFlags")
.push_constant(Type::BOOL, "showPressure")
.push_constant(Type::FLOAT, "gridScale");
GPU_SHADER_CREATE_INFO(workbench_volume_no_coba)
.sampler(4, ImageType::FLOAT_3D, "shadowTexture")
.sampler(5, ImageType::UINT_2D, "transferTexture")
.push_constant(Type::VEC3, "activeColor");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Sampling variation
* \{ */
GPU_SHADER_CREATE_INFO(workbench_volume_linear).define("USE_TRILINEAR");
GPU_SHADER_CREATE_INFO(workbench_volume_cubic).define("USE_TRICUBIC");
GPU_SHADER_CREATE_INFO(workbench_volume_closest).define("USE_CLOSEST");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Slice variation
* \{ */
GPU_SHADER_INTERFACE_INFO(workbench_volume_iface, "").smooth(Type::VEC3, "localPos");
GPU_SHADER_CREATE_INFO(workbench_volume_slice)
.define("VOLUME_SLICE")
.vertex_in(1, Type::VEC3, "uvs")
.vertex_out(workbench_volume_iface)
.push_constant(Type::INT, "sliceAxis") /* -1 is no slice. */
.push_constant(Type::FLOAT, "slicePosition");
/** \} */
/* -------------------------------------------------------------------- */
/** \name Variations Declaration
* \{ */
#define WORKBENCH_VOLUME_SLICE_VARIATIONS(prefix, ...) \
GPU_SHADER_CREATE_INFO(prefix##_slice) \
.additional_info("workbench_volume_slice", __VA_ARGS__) \
.do_static_compilation(true); \
GPU_SHADER_CREATE_INFO(prefix##_no_slice) \
.additional_info(__VA_ARGS__) \
.do_static_compilation(true);
#define WORKBENCH_VOLUME_COBA_VARIATIONS(prefix, ...) \
WORKBENCH_VOLUME_SLICE_VARIATIONS(prefix##_coba, "workbench_volume_coba", __VA_ARGS__) \
WORKBENCH_VOLUME_SLICE_VARIATIONS(prefix##_no_coba, "workbench_volume_no_coba", __VA_ARGS__)
#define WORKBENCH_VOLUME_INTERP_VARIATIONS(prefix, ...) \
WORKBENCH_VOLUME_COBA_VARIATIONS(prefix##_linear, "workbench_volume_linear", __VA_ARGS__) \
WORKBENCH_VOLUME_COBA_VARIATIONS(prefix##_cubic, "workbench_volume_cubic", __VA_ARGS__) \
WORKBENCH_VOLUME_COBA_VARIATIONS(prefix##_closest, "workbench_volume_closest", __VA_ARGS__)
#define WORKBENCH_VOLUME_SMOKE_VARIATIONS(prefix, ...) \
WORKBENCH_VOLUME_INTERP_VARIATIONS(prefix##_smoke, "workbench_volume_smoke", __VA_ARGS__) \
WORKBENCH_VOLUME_INTERP_VARIATIONS(prefix##_object, "workbench_volume_object", __VA_ARGS__)
WORKBENCH_VOLUME_SMOKE_VARIATIONS(workbench_volume, "workbench_volume")
/** \} */

View File

@@ -1,7 +1,15 @@
#pragma BLENDER_REQUIRE(common_view_lib.glsl)
#pragma BLENDER_REQUIRE(workbench_data_lib.glsl)
#pragma BLENDER_REQUIRE(workbench_common_lib.glsl)
layout(std140) uniform samples_block
{
vec4 samples_coords[512];
};
uniform sampler2D cavityJitter;
/* From The Alchemy screen-space ambient obscurance algorithm
* http://graphics.cs.williams.edu/papers/AlchemyHPG11/VV11AlchemyAO.pdf */

View File

@@ -4,6 +4,13 @@
#pragma BLENDER_REQUIRE(workbench_matcap_lib.glsl)
#pragma BLENDER_REQUIRE(workbench_world_light_lib.glsl)
uniform sampler2D materialBuffer;
uniform sampler2D normalBuffer;
in vec4 uvcoordsvar;
out vec4 fragColor;
void main()
{
/* Normal and Incident vector are in viewspace. Lighting is evaluated in viewspace. */
@@ -20,7 +27,7 @@ void main()
/* When using matcaps, mat_data.a is the back-face sign. */
N = (mat_data.a > 0.0) ? N : -N;
fragColor.rgb = get_matcap_lighting(matcap_diffuse_tx, matcap_specular_tx, base_color, N, I);
fragColor.rgb = get_matcap_lighting(base_color, N, I);
#endif
#ifdef V3D_LIGHTING_STUDIO
@@ -31,7 +38,7 @@ void main()
fragColor.rgb = base_color;
#endif
fragColor.rgb *= get_shadow(N, forceShadowing);
fragColor.rgb *= get_shadow(N);
fragColor.a = 1.0;
}

View File

@@ -1,4 +1,6 @@
#pragma BLENDER_REQUIRE(workbench_data_lib.glsl)
float curvature_soft_clamp(float curvature, float control)
{
if (curvature < 0.5 / control) {

View File

@@ -0,0 +1,48 @@
#ifndef WORKBENCH_SHADER_SHARED_H
struct LightData {
vec4 direction;
vec4 specular_color;
vec4 diffuse_color_wrap; /* rgb: diffuse col a: wrapped lighting factor */
};
struct WorldData {
vec4 viewport_size;
vec4 object_outline_color;
vec4 shadow_direction_vs;
float shadow_focus;
float shadow_shift;
float shadow_mul;
float shadow_add;
/* - 16 bytes alignment - */
LightData lights[4];
vec4 ambient_color;
int cavity_sample_start;
int cavity_sample_end;
float cavity_sample_count_inv;
float cavity_jitter_scale;
float cavity_valley_factor;
float cavity_ridge_factor;
float cavity_attenuation;
float cavity_distance;
float curvature_ridge;
float curvature_valley;
float ui_scale;
float _pad0;
int matcap_orientation;
bool use_specular;
int _pad1;
int _pad2;
};
# define viewport_size_inv viewport_size.zw
layout(std140) uniform world_block
{
WorldData world_data;
};
#endif

View File

@@ -4,6 +4,18 @@
#pragma BLENDER_REQUIRE(workbench_cavity_lib.glsl)
#pragma BLENDER_REQUIRE(workbench_curvature_lib.glsl)
#ifndef DRW_SHADER_SHARED_H
uniform sampler2D depthBuffer;
uniform sampler2D normalBuffer;
uniform usampler2D objectIdBuffer;
in vec4 uvcoordsvar;
out vec4 fragColor;
#endif
void main()
{
float cavity = 0.0, edges = 0.0, curvature = 0.0;

View File

@@ -3,13 +3,10 @@
GPU_SHADER_CREATE_INFO(workbench_effect_cavity_common)
.fragment_out(0, Type::VEC4, "fragColor")
.sampler(0, ImageType::DEPTH_2D, "depthBuffer")
.sampler(0, ImageType::FLOAT_2D, "depthBuffer")
.sampler(1, ImageType::FLOAT_2D, "normalBuffer")
.sampler(2, ImageType::UINT_2D, "objectIdBuffer")
.sampler(3, ImageType::FLOAT_2D, "cavityJitter")
.uniform_buf(3, "vec4", "samples_coords[512]")
.uniform_buf(4, "WorldData", "world_data", Frequency::PASS)
.typedef_source("workbench_shader_shared.h")
.fragment_source("workbench_effect_cavity_frag.glsl")
.additional_info("draw_fullscreen")
.additional_info("draw_view");

View File

@@ -7,6 +7,19 @@
* Converted and adapted from HLSL to GLSL by Clément Foucault
*/
uniform vec2 invertedViewportSize;
uniform vec2 nearFar;
uniform vec3 dofParams;
uniform float noiseOffset;
uniform sampler2D inputCocTex;
uniform sampler2D maxCocTilesTex;
uniform sampler2D sceneColorTex;
uniform sampler2D sceneDepthTex;
uniform sampler2D backgroundTex;
uniform sampler2D halfResColorTex;
uniform sampler2D blurTex;
uniform sampler2D noiseTex;
#define dof_aperturesize dofParams.x
#define dof_distance dofParams.y
#define dof_invsensorsize dofParams.z
@@ -40,6 +53,9 @@ float decode_signed_coc(vec2 cocs)
*/
#ifdef PREPARE
layout(location = 0) out vec4 halfResColor;
layout(location = 1) out vec2 normalizedCoc;
void main()
{
ivec4 texel = ivec4(gl_FragCoord.xyxy) * 2 + ivec4(0, 0, 1, 1);
@@ -83,6 +99,9 @@ void main()
*/
#ifdef DOWNSAMPLE
layout(location = 0) out vec4 outColor;
layout(location = 1) out vec2 outCocs;
void main()
{
vec4 texel = vec4(gl_FragCoord.xyxy) * 2.0 + vec4(0.0, 0.0, 1.0, 1.0);
@@ -197,6 +216,14 @@ void main()
* Outputs vertical blur and combined blur in MRT
*/
#ifdef BLUR1
layout(location = 0) out vec4 blurColor;
# define NUM_SAMPLES 49
layout(std140) uniform dofSamplesBlock
{
vec4 samples[NUM_SAMPLES];
};
vec2 get_random_vector(float offset)
{
@@ -281,6 +308,7 @@ void main()
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef BLUR2
out vec4 finalColor;
void main()
{
@@ -357,6 +385,9 @@ void main()
*/
#ifdef RESOLVE
layout(location = 0, index = 0) out vec4 finalColorAdd;
layout(location = 0, index = 1) out vec4 finalColorMul;
void main()
{
/* Fullscreen pass */

Some files were not shown because too many files have changed in this diff Show More