Fix #107123: Refactor sculpt normal calculation to require vert poly map #107458

Merged
Joseph Eagar merged 6 commits from HooglyBoogly/blender:pbvh-faces-normals-fix-alternate into main 2023-05-09 22:36:37 +02:00
42 changed files with 753 additions and 227 deletions
Showing only changes of commit 297c62cf95 - Show all commits

View File

@ -352,7 +352,23 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
return false;
}
# if OPTIX_ABI_VERSION >= 55
# if OPTIX_ABI_VERSION >= 84
OptixTask task = nullptr;
OptixResult result = optixModuleCreateWithTasks(context,
&module_options,
&pipeline_options,
ptx_data.data(),
ptx_data.size(),
nullptr,
nullptr,
&optix_module,
&task);
if (result == OPTIX_SUCCESS) {
TaskPool pool;
execute_optix_task(pool, task, result);
pool.wait_work();
}
# elif OPTIX_ABI_VERSION >= 55
OptixTask task = nullptr;
OptixResult result = optixModuleCreateFromPTXWithTasks(context,
&module_options,
@ -555,7 +571,11 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
memset(sbt_data.host_pointer, 0, sizeof(SbtRecord) * NUM_PROGRAM_GROUPS);
for (int i = 0; i < NUM_PROGRAM_GROUPS; ++i) {
optix_assert(optixSbtRecordPackHeader(groups[i], &sbt_data[i]));
# if OPTIX_ABI_VERSION >= 84
optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i], nullptr));
# else
optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i]));
# endif
}
sbt_data.copy_to_device(); /* Upload SBT to device. */
@ -577,7 +597,9 @@ bool OptiXDevice::load_kernels(const uint kernel_features)
OptixPipelineLinkOptions link_options = {};
link_options.maxTraceDepth = 1;
# if OPTIX_ABI_VERSION < 84
link_options.debugLevel = module_options.debugLevel;
# endif
if (use_osl) {
/* Re-create OSL pipeline in case kernels are reloaded after it has been created before. */
@ -768,6 +790,16 @@ bool OptiXDevice::load_osl_kernels()
return false;
}
# if OPTIX_ABI_VERSION >= 84
const OptixResult result = optixModuleCreate(context,
&module_options,
&pipeline_options,
ptx_data.data(),
ptx_data.size(),
nullptr,
0,
&osl_modules.back());
# else
const OptixResult result = optixModuleCreateFromPTX(context,
&module_options,
&pipeline_options,
@ -776,6 +808,7 @@ bool OptiXDevice::load_osl_kernels()
nullptr,
0,
&osl_modules.back());
# endif
if (result != OPTIX_SUCCESS) {
set_error(string_printf("Failed to load OptiX OSL services kernel from '%s' (%s)",
ptx_filename.c_str(),
@ -800,7 +833,21 @@ bool OptiXDevice::load_osl_kernels()
continue;
}
# if OPTIX_ABI_VERSION >= 55
# if OPTIX_ABI_VERSION >= 84
OptixTask task = nullptr;
results[i] = optixModuleCreateWithTasks(context,
&module_options,
&pipeline_options,
osl_kernels[i].ptx.data(),
osl_kernels[i].ptx.size(),
nullptr,
nullptr,
&osl_modules[i],
&task);
if (results[i] == OPTIX_SUCCESS) {
execute_optix_task(pool, task, results[i]);
}
# elif OPTIX_ABI_VERSION >= 55
OptixTask task = nullptr;
results[i] = optixModuleCreateFromPTXWithTasks(context,
&module_options,
@ -861,12 +908,20 @@ bool OptiXDevice::load_osl_kernels()
sbt_data.alloc(NUM_PROGRAM_GROUPS + osl_groups.size());
for (int i = 0; i < NUM_PROGRAM_GROUPS; ++i) {
optix_assert(optixSbtRecordPackHeader(groups[i], &sbt_data[i]));
# if OPTIX_ABI_VERSION >= 84
optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i], nullptr));
# else
optix_assert(optixProgramGroupGetStackSize(groups[i], &stack_size[i]));
# endif
}
for (size_t i = 0; i < osl_groups.size(); ++i) {
if (osl_groups[i] != NULL) {
optix_assert(optixSbtRecordPackHeader(osl_groups[i], &sbt_data[NUM_PROGRAM_GROUPS + i]));
# if OPTIX_ABI_VERSION >= 84
optix_assert(optixProgramGroupGetStackSize(osl_groups[i], &osl_stack_size[i], nullptr));
# else
optix_assert(optixProgramGroupGetStackSize(osl_groups[i], &osl_stack_size[i]));
# endif
}
else {
/* Default to "__direct_callable__dummy_services", so that OSL evaluation for empty
@ -878,7 +933,9 @@ bool OptiXDevice::load_osl_kernels()
OptixPipelineLinkOptions link_options = {};
link_options.maxTraceDepth = 0;
# if OPTIX_ABI_VERSION < 84
link_options.debugLevel = OPTIX_COMPILE_DEBUG_LEVEL_NONE;
# endif
{
vector<OptixProgramGroup> pipeline_groups;

View File

@ -161,22 +161,6 @@ constexpr size_t events_pending_default_size = 4096 / sizeof(void *);
static bool use_gnome_confine_hack = false;
#endif
/**
* GNOME (mutter 42.5) doesn't follow the WAYLAND spec regarding keyboard handling,
* unlike (other compositors: KDE-plasma, River & Sway which work without problems).
*
* This means GNOME can't know which modifiers are held when activating windows,
* so we guess the left modifiers are held.
*
* This define could be removed without changing any functionality,
* it just means GNOME users will see verbose warning messages that alert them about
* a known problem that needs to be fixed up-stream.
*
* This has been fixed for GNOME 43. Keep the workaround until support for gnome 42 is dropped.
* See: https://gitlab.gnome.org/GNOME/mutter/-/issues/2457
*/
#define USE_GNOME_KEYBOARD_SUPPRESS_WARNING
/**
* KDE (plasma 5.26.1) has a bug where the cursor surface needs to be committed
* (via `wl_surface_commit`) when it was hidden and is being set to visible again, see: #102048.
@ -766,13 +750,6 @@ struct GWL_Seat {
/** Keys held matching `xkb_state`. */
struct GWL_KeyboardDepressedState key_depressed;
#ifdef USE_GNOME_KEYBOARD_SUPPRESS_WARNING
struct {
bool any_mod_held = false;
bool any_keys_held_on_enter = false;
} key_depressed_suppress_warning;
#endif
/**
* Cache result of `xkb_keymap_mod_get_index`
* so every time a modifier is accessed a string lookup isn't required.
@ -844,21 +821,6 @@ static GWL_SeatStatePointer *gwl_seat_state_pointer_from_cursor_surface(
return nullptr;
}
static bool gwl_seat_key_depressed_suppress_warning(const GWL_Seat *seat)
{
bool suppress_warning = false;
#ifdef USE_GNOME_KEYBOARD_SUPPRESS_WARNING
if ((seat->key_depressed_suppress_warning.any_mod_held == true) &&
(seat->key_depressed_suppress_warning.any_keys_held_on_enter == false)) {
/* The compositor gave us invalid information, don't show a warning. */
suppress_warning = true;
}
#endif
return suppress_warning;
}
/**
* \note Caller must lock `timer_mutex`.
*/
@ -1867,17 +1829,13 @@ static void keyboard_depressed_state_key_event(GWL_Seat *seat,
const GHOST_TKey gkey,
const GHOST_TEventType etype)
{
const bool show_warning = !gwl_seat_key_depressed_suppress_warning(seat);
if (GHOST_KEY_MODIFIER_CHECK(gkey)) {
const int index = GHOST_KEY_MODIFIER_TO_INDEX(gkey);
int16_t &value = seat->key_depressed.mods[index];
if (etype == GHOST_kEventKeyUp) {
value -= 1;
if (UNLIKELY(value < 0)) {
if (show_warning) {
CLOG_WARN(LOG, "modifier (%d) has negative keys held (%d)!", index, value);
}
CLOG_WARN(LOG, "modifier (%d) has negative keys held (%d)!", index, value);
value = 0;
}
}
@ -3793,10 +3751,6 @@ static void keyboard_handle_enter(void *data,
}
keyboard_depressed_state_push_events_from_change(seat, key_depressed_prev);
#ifdef USE_GNOME_KEYBOARD_SUPPRESS_WARNING
seat->key_depressed_suppress_warning.any_keys_held_on_enter = keys->size != 0;
#endif
}
/**
@ -3827,11 +3781,6 @@ static void keyboard_handle_leave(void *data,
keyboard_handle_key_repeat_cancel(seat);
}
}
#ifdef USE_GNOME_KEYBOARD_SUPPRESS_WARNING
seat->key_depressed_suppress_warning.any_mod_held = false;
seat->key_depressed_suppress_warning.any_keys_held_on_enter = false;
#endif
}
/**
@ -4088,10 +4037,6 @@ static void keyboard_handle_modifiers(void *data,
}
}
#ifdef USE_GNOME_KEYBOARD_SUPPRESS_WARNING
seat->key_depressed_suppress_warning.any_mod_held = mods_depressed != 0;
#endif
seat->data_source_serial = serial;
}
@ -5770,9 +5715,27 @@ GHOST_TSuccess GHOST_SystemWayland::getModifierKeys(GHOST_ModifierKeys &keys) co
return GHOST_kFailure;
}
const xkb_mod_mask_t state = xkb_state_serialize_mods(seat->xkb_state, XKB_STATE_MODS_DEPRESSED);
const bool show_warning = !gwl_seat_key_depressed_suppress_warning(seat);
/* Only read the underlying `seat->xkb_state` when there is an active window.
* Without this, the following situation occurs:
*
* - A window is activated (before the #wl_keyboard_listener::enter has run).
* - The modifiers from `seat->xkb_state` don't match `seat->key_depressed`.
* - Dummy values are written into `seat->key_depressed` to account for the discrepancy
* (as `seat->xkb_state` is the source of truth), however the number of held modifiers
* is not longer valid (because it's not known from dummy values).
* - #wl_keyboard_listener::enter runs, however the events generated from the state change
* may not match the physically held keys because the dummy values are not accurate.
*
* As this is an edge-case caused by the order of callbacks that run on window activation,
* don't attempt to *fix* the values in `seat->key_depressed` before the keyboard enter
* handler runs. This means the result of `getModifierKeys` may be momentarily incorrect
* however it's corrected once #wl_keyboard_listener::enter runs.
*/
const bool is_keyboard_active = seat->keyboard.wl_surface_window != nullptr;
const xkb_mod_mask_t state = is_keyboard_active ?
xkb_state_serialize_mods(seat->xkb_state,
XKB_STATE_MODS_DEPRESSED) :
0;
/* Use local #GWL_KeyboardDepressedState to check which key is pressed.
* Use XKB as the source of truth, if there is any discrepancy. */
@ -5780,8 +5743,8 @@ GHOST_TSuccess GHOST_SystemWayland::getModifierKeys(GHOST_ModifierKeys &keys) co
if (UNLIKELY(seat->xkb_keymap_mod_index[i] == XKB_MOD_INVALID)) {
continue;
}
const GWL_ModifierInfo &mod_info = g_modifier_info_table[i];
const bool val = (state & (1 << seat->xkb_keymap_mod_index[i])) != 0;
/* NOTE(@ideasman42): it's important to write the XKB state back to #GWL_KeyboardDepressedState
* otherwise changes to modifiers in the future wont generate events.
* This can cause modifiers to be stuck when switching between windows in GNOME because
@ -5791,31 +5754,31 @@ GHOST_TSuccess GHOST_SystemWayland::getModifierKeys(GHOST_ModifierKeys &keys) co
bool val_l = depressed_l > 0;
bool val_r = depressed_r > 0;
/* This shouldn't be needed, but guard against any possibility of modifiers being stuck.
* Warn so if this happens it can be investigated. */
if (val) {
if (UNLIKELY(!(val_l || val_r))) {
if (show_warning) {
if (is_keyboard_active) {
const bool val = (state & (1 << seat->xkb_keymap_mod_index[i])) != 0;
/* This shouldn't be needed, but guard against any possibility of modifiers being stuck.
* Warn so if this happens it can be investigated. */
if (val) {
if (UNLIKELY(!(val_l || val_r))) {
CLOG_WARN(&LOG_WL_KEYBOARD_DEPRESSED_STATE,
"modifier (%s) state is inconsistent (GHOST held keys do not match XKB)",
mod_info.display_name);
/* Picking the left is arbitrary. */
val_l = true;
depressed_l = 1;
}
/* Picking the left is arbitrary. */
val_l = true;
depressed_l = 1;
}
}
else {
if (UNLIKELY(val_l || val_r)) {
if (show_warning) {
else {
if (UNLIKELY(val_l || val_r)) {
CLOG_WARN(&LOG_WL_KEYBOARD_DEPRESSED_STATE,
"modifier (%s) state is inconsistent (GHOST released keys do not match XKB)",
mod_info.display_name);
val_l = false;
val_r = false;
depressed_l = 0;
depressed_r = 0;
}
val_l = false;
val_r = false;
depressed_l = 0;
depressed_r = 0;
}
}

View File

@ -40,6 +40,8 @@
#include "DEG_depsgraph_build.h"
#include "DRW_engine.h"
#include "BLO_read_write.h"
static CLG_LogRef LOG = {"bke.mask"};
@ -57,6 +59,8 @@ static void mask_copy_data(Main *UNUSED(bmain),
/* TODO: add unused flag to those as well. */
BKE_mask_layer_copy_list(&mask_dst->masklayers, &mask_src->masklayers);
BLI_listbase_clear((ListBase *)&mask_dst->drawdata);
/* enable fake user by default */
id_fake_user_set(&mask_dst->id);
}
@ -67,6 +71,8 @@ static void mask_free_data(ID *id)
/* free mask data */
BKE_mask_layer_free_list(&mask->masklayers);
DRW_drawdata_free(id);
}
static void mask_foreach_id(ID *id, LibraryForeachIDData *data)

View File

@ -211,7 +211,7 @@ void BKE_mesh_calc_edges_legacy(Mesh *me)
mesh_calc_edges_mdata(
verts.data(),
(MFace *)CustomData_get_layer(&me->fdata, CD_MFACE),
me->mface,
static_cast<MLoop *>(CustomData_get_layer_for_write(&me->ldata, CD_MLOOP, me->totloop)),
static_cast<const MPoly *>(CustomData_get_layer(&me->pdata, CD_MPOLY)),
verts.size(),
@ -234,6 +234,28 @@ void BKE_mesh_calc_edges_legacy(Mesh *me)
BKE_mesh_strip_loose_faces(me);
}
void BKE_mesh_strip_loose_faces(Mesh *me)
{
/* NOTE: We need to keep this for edge creation (for now?), and some old `readfile.c` code. */
MFace *f;
int a, b;
MFace *mfaces = me->mface;
for (a = b = 0, f = mfaces; a < me->totface; a++, f++) {
if (f->v3) {
if (a != b) {
memcpy(&mfaces[b], f, sizeof(mfaces[b]));
CustomData_copy_data(&me->fdata, &me->fdata, a, b, 1);
}
b++;
}
}
if (a != b) {
CustomData_free_elem(&me->fdata, b, a - b);
me->totface = b;
}
}
/** \} */
/* -------------------------------------------------------------------- */
@ -417,7 +439,7 @@ static void convert_mfaces_to_mpolys(ID *id,
int totface_i,
int totloop_i,
int totpoly_i,
MEdge *edges,
blender::int2 *edges,
MFace *mface,
int *r_totloop,
int *r_totpoly)
@ -425,14 +447,10 @@ static void convert_mfaces_to_mpolys(ID *id,
MFace *mf;
MLoop *ml, *mloop;
MPoly *poly, *mpoly;
MEdge *edge;
EdgeHash *eh;
int numTex, numCol;
int i, j, totloop, totpoly, *polyindex;
/* old flag, clear to allow for reuse */
#define ME_FGON (1 << 3)
/* just in case some of these layers are filled in (can happen with python created meshes) */
CustomData_free(ldata, totloop_i);
CustomData_free(pdata, totpoly_i);
@ -474,13 +492,8 @@ static void convert_mfaces_to_mpolys(ID *id,
eh = BLI_edgehash_new_ex(__func__, uint(totedge_i));
/* build edge hash */
edge = edges;
for (i = 0; i < totedge_i; i++, edge++) {
BLI_edgehash_insert(eh, edge->v1, edge->v2, POINTER_FROM_UINT(i));
/* unrelated but avoid having the FGON flag enabled,
* so we can reuse it later for something else */
edge->flag_legacy &= ~ME_FGON;
for (i = 0; i < totedge_i; i++) {
BLI_edgehash_insert(eh, edges[i][0], edges[i][1], POINTER_FROM_UINT(i));
}
polyindex = (int *)CustomData_get_layer(fdata, CD_ORIGINDEX);
@ -534,8 +547,6 @@ static void convert_mfaces_to_mpolys(ID *id,
*r_totpoly = totpoly;
*r_totloop = totloop;
#undef ME_FGON
}
static void update_active_fdata_layers(Mesh &mesh, CustomData *fdata, CustomData *ldata)
@ -691,12 +702,6 @@ static void mesh_ensure_tessellation_customdata(Mesh *me)
void BKE_mesh_convert_mfaces_to_mpolys(Mesh *mesh)
{
const blender::Span<blender::int2> edges = mesh->edges();
blender::Array<MEdge> legacy_edges(mesh->totedge);
for (const int i : legacy_edges.index_range()) {
legacy_edges[i].v1 = edges[i][0];
legacy_edges[i].v2 = edges[i][1];
}
convert_mfaces_to_mpolys(&mesh->id,
&mesh->fdata,
&mesh->ldata,
@ -705,13 +710,14 @@ void BKE_mesh_convert_mfaces_to_mpolys(Mesh *mesh)
mesh->totface,
mesh->totloop,
mesh->totpoly,
legacy_edges.data(),
mesh->edges_for_write().data(),
(MFace *)CustomData_get_layer(&mesh->fdata, CD_MFACE),
&mesh->totloop,
&mesh->totpoly);
BKE_mesh_legacy_convert_loops_to_corners(mesh);
BKE_mesh_legacy_convert_polys_to_offsets(mesh);
mesh_ensure_tessellation_customdata(mesh);
BKE_mesh_legacy_convert_loops_to_corners(mesh);
}
/**
@ -755,19 +761,20 @@ static void CustomData_bmesh_do_versions_update_active_layers(CustomData *fdata,
void BKE_mesh_do_versions_convert_mfaces_to_mpolys(Mesh *mesh)
{
convert_mfaces_to_mpolys(
&mesh->id,
&mesh->fdata,
&mesh->ldata,
&mesh->pdata,
mesh->totedge,
mesh->totface,
mesh->totloop,
mesh->totpoly,
static_cast<MEdge *>(CustomData_get_layer_for_write(&mesh->edata, CD_MEDGE, mesh->totedge)),
(MFace *)CustomData_get_layer(&mesh->fdata, CD_MFACE),
&mesh->totloop,
&mesh->totpoly);
convert_mfaces_to_mpolys(&mesh->id,
&mesh->fdata,
&mesh->ldata,
&mesh->pdata,
mesh->totedge,
mesh->totface,
mesh->totloop,
mesh->totpoly,
mesh->edges_for_write().data(),
(MFace *)CustomData_get_layer(&mesh->fdata, CD_MFACE),
&mesh->totloop,
&mesh->totpoly);
BKE_mesh_legacy_convert_loops_to_corners(mesh);
BKE_mesh_legacy_convert_polys_to_offsets(mesh);
CustomData_bmesh_do_versions_update_active_layers(&mesh->fdata, &mesh->ldata);

View File

@ -1205,28 +1205,6 @@ bool BKE_mesh_validate_material_indices(Mesh *me)
/** \name Mesh Stripping (removing invalid data)
* \{ */
void BKE_mesh_strip_loose_faces(Mesh *me)
{
/* NOTE: We need to keep this for edge creation (for now?), and some old `readfile.c` code. */
MFace *f;
int a, b;
MFace *mfaces = (MFace *)CustomData_get_layer_for_write(&me->fdata, CD_MFACE, me->totface);
for (a = b = 0, f = mfaces; a < me->totface; a++, f++) {
if (f->v3) {
if (a != b) {
memcpy(&mfaces[b], f, sizeof(mfaces[b]));
CustomData_copy_data(&me->fdata, &me->fdata, a, b, 1);
}
b++;
}
}
if (a != b) {
CustomData_free_elem(&me->fdata, b, a - b);
me->totface = b;
}
}
void strip_loose_polysloops(Mesh *me, blender::BitSpan polys_to_remove)
{
MutableSpan<int> poly_offsets = me->poly_offsets_for_write();

View File

@ -27,11 +27,11 @@ void MaskNode::convert_to_operations(NodeConverter &converter,
/* Always connect the output image. */
MaskOperation *operation = new MaskOperation();
if (editor_node->custom1 & CMP_NODEFLAG_MASK_FIXED) {
if (editor_node->custom1 & CMP_NODE_MASK_FLAG_SIZE_FIXED) {
operation->set_mask_width(data->size_x);
operation->set_mask_height(data->size_y);
}
else if (editor_node->custom1 & CMP_NODEFLAG_MASK_FIXED_SCENE) {
else if (editor_node->custom1 & CMP_NODE_MASK_FLAG_SIZE_FIXED_SCENE) {
operation->set_mask_width(data->size_x * render_size_factor);
operation->set_mask_height(data->size_y * render_size_factor);
}
@ -42,9 +42,9 @@ void MaskNode::convert_to_operations(NodeConverter &converter,
operation->set_mask(mask);
operation->set_framenumber(context.get_framenumber());
operation->set_feather(bool(editor_node->custom1 & CMP_NODEFLAG_MASK_NO_FEATHER) == 0);
operation->set_feather(bool(editor_node->custom1 & CMP_NODE_MASK_FLAG_NO_FEATHER) == 0);
if ((editor_node->custom1 & CMP_NODEFLAG_MASK_MOTION_BLUR) && (editor_node->custom2 > 1) &&
if ((editor_node->custom1 & CMP_NODE_MASK_FLAG_MOTION_BLUR) && (editor_node->custom2 > 1) &&
(editor_node->custom3 > FLT_EPSILON)) {
operation->set_motion_blur_samples(editor_node->custom2);
operation->set_motion_blur_shutter(editor_node->custom3);

View File

@ -70,12 +70,14 @@ set(SRC
algorithms/COM_algorithm_smaa.hh
algorithms/COM_algorithm_symmetric_separable_blur.hh
cached_resources/intern/cached_mask.cc
cached_resources/intern/cached_texture.cc
cached_resources/intern/morphological_distance_feather_weights.cc
cached_resources/intern/smaa_precomputed_textures.cc
cached_resources/intern/symmetric_blur_weights.cc
cached_resources/intern/symmetric_separable_blur_weights.cc
cached_resources/COM_cached_mask.hh
cached_resources/COM_cached_resource.hh
cached_resources/COM_cached_texture.hh
cached_resources/COM_morphological_distance_feather_weights.hh

View File

@ -82,6 +82,9 @@ class Context {
/* Get the size of the compositing region. See get_compositing_region(). */
int2 get_compositing_region_size() const;
/* Get the normalized render percentage of the active scene. */
float get_render_percentage() const;
/* Get the current frame number of the active scene. */
int get_frame_number() const;

View File

@ -2,6 +2,7 @@
#pragma once
#include "COM_cached_mask.hh"
#include "COM_cached_texture.hh"
#include "COM_morphological_distance_feather_weights.hh"
#include "COM_smaa_precomputed_textures.hh"
@ -37,8 +38,9 @@ class StaticCacheManager {
SymmetricBlurWeightsContainer symmetric_blur_weights;
SymmetricSeparableBlurWeightsContainer symmetric_separable_blur_weights;
MorphologicalDistanceFeatherWeightsContainer morphological_distance_feather_weights;
SMAAPrecomputedTexturesContainer smaa_precomputed_textures;
CachedTextureContainer cached_textures;
CachedMaskContainer cached_masks;
SMAAPrecomputedTexturesContainer smaa_precomputed_textures;
/* Reset the cache manager by deleting the cached resources that are no longer needed because
* they weren't used in the last evaluation and prepare the remaining cached resources to track

View File

@ -0,0 +1,86 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include <cstdint>
#include <memory>
#include <string>
#include "BLI_map.hh"
#include "BLI_math_vector_types.hh"
#include "GPU_texture.h"
#include "DNA_mask_types.h"
#include "DNA_scene_types.h"
#include "COM_cached_resource.hh"
namespace blender::realtime_compositor {
class Context;
/* ------------------------------------------------------------------------------------------------
* Cached Mask Key.
*/
class CachedMaskKey {
public:
int2 size;
bool use_feather;
int motion_blur_samples;
float motion_blur_shutter;
CachedMaskKey(int2 size, bool use_feather, int motion_blur_samples, float motion_blur_shutter);
uint64_t hash() const;
};
bool operator==(const CachedMaskKey &a, const CachedMaskKey &b);
/* -------------------------------------------------------------------------------------------------
* Cached Mask.
*
* A cached resource that computes and caches a GPU texture containing the result of evaluating the
* given mask ID on a space that spans the given size, parametrized by the given parameters. */
class CachedMask : public CachedResource {
private:
GPUTexture *texture_ = nullptr;
public:
CachedMask(Mask *mask,
int2 size,
int frame,
bool use_feather,
int motion_blur_samples,
float motion_blur_shutter);
~CachedMask();
GPUTexture *texture();
};
/* ------------------------------------------------------------------------------------------------
* Cached Mask Container.
*/
class CachedMaskContainer : CachedResourceContainer {
private:
Map<std::string, Map<CachedMaskKey, std::unique_ptr<CachedMask>>> map_;
public:
void reset() override;
/* Check if the given mask ID has changed since the last time it was retrieved through its
* recalculate flag, and if so, invalidate its corresponding cached mask and reset the
* recalculate flag to ready it to track the next change. Then, check if there is an available
* CachedMask cached resource with the given parameters in the container, if one exists, return
* it, otherwise, return a newly created one and add it to the container. In both cases, tag the
* cached resource as needed to keep it cached for the next evaluation. */
CachedMask &get(Context &context,
Mask *mask,
int2 size,
bool use_feather,
int motion_blur_samples,
float motion_blur_shutter);
};
} // namespace blender::realtime_compositor

View File

@ -39,9 +39,8 @@ bool operator==(const CachedTextureKey &a, const CachedTextureKey &b);
/* -------------------------------------------------------------------------------------------------
* Cached Texture.
*
* A cached resource that computes and caches a GPU texture containing the the result of evaluating
* the given texture ID on a space that spans the given size, modified by the given offset and
* scale. */
* A cached resource that computes and caches a GPU texture containing the result of evaluating the
* given texture ID on a space that spans the given size, parametrized by the given parameters. */
class CachedTexture : public CachedResource {
private:
GPUTexture *color_texture_ = nullptr;

View File

@ -0,0 +1,199 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <cstdint>
#include <memory>
#include "BLI_array.hh"
#include "BLI_hash.hh"
#include "BLI_index_range.hh"
#include "BLI_listbase.h"
#include "BLI_math_vector_types.hh"
#include "BLI_task.hh"
#include "GPU_texture.h"
#include "BKE_lib_id.h"
#include "BKE_mask.h"
#include "DNA_ID.h"
#include "DNA_mask_types.h"
#include "COM_cached_mask.hh"
#include "COM_context.hh"
namespace blender::realtime_compositor {
/* --------------------------------------------------------------------
* Cached Mask Key.
*/
CachedMaskKey::CachedMaskKey(int2 size,
bool use_feather,
int motion_blur_samples,
float motion_blur_shutter)
: size(size),
use_feather(use_feather),
motion_blur_samples(motion_blur_samples),
motion_blur_shutter(motion_blur_shutter)
{
}
uint64_t CachedMaskKey::hash() const
{
return get_default_hash_4(size, use_feather, motion_blur_samples, motion_blur_shutter);
}
bool operator==(const CachedMaskKey &a, const CachedMaskKey &b)
{
return a.size == b.size && a.use_feather == b.use_feather &&
a.motion_blur_samples == b.motion_blur_samples &&
a.motion_blur_shutter == b.motion_blur_shutter;
}
/* --------------------------------------------------------------------
* Cached Mask.
*/
static Vector<MaskRasterHandle *> get_mask_raster_handles(Mask *mask,
int2 size,
int current_frame,
bool use_feather,
int motion_blur_samples,
float motion_blur_shutter)
{
Vector<MaskRasterHandle *> handles;
if (!mask) {
return handles;
}
/* If motion blur samples are 1, that means motion blur is disabled, in that case, just return
* the currently evaluated raster handle. */
if (motion_blur_samples == 1) {
MaskRasterHandle *handle = BKE_maskrasterize_handle_new();
BKE_maskrasterize_handle_init(handle, mask, size.x, size.y, true, true, use_feather);
handles.append(handle);
return handles;
}
/* Otherwise, we have a number of motion blur samples, so make a copy of the Mask ID and evaluate
* it at the different motion blur frames to get the needed raster handles. */
Mask *evaluation_mask = reinterpret_cast<Mask *>(
BKE_id_copy_ex(nullptr, &mask->id, nullptr, LIB_ID_COPY_LOCALIZE | LIB_ID_COPY_NO_ANIMDATA));
/* We evaluate at the frames in the range [current_frame - shutter, current_frame + shutter]. */
const float start_frame = current_frame - motion_blur_shutter;
const float frame_step = (motion_blur_shutter * 2.0f) / motion_blur_samples;
for (int i = 0; i < motion_blur_samples; i++) {
MaskRasterHandle *handle = BKE_maskrasterize_handle_new();
BKE_mask_evaluate(evaluation_mask, start_frame + frame_step * i, true);
BKE_maskrasterize_handle_init(
handle, evaluation_mask, size.x, size.y, true, true, use_feather);
handles.append(handle);
}
BKE_id_free(nullptr, &evaluation_mask->id);
return handles;
}
CachedMask::CachedMask(Mask *mask,
int2 size,
int frame,
bool use_feather,
int motion_blur_samples,
float motion_blur_shutter)
{
Vector<MaskRasterHandle *> handles = get_mask_raster_handles(
mask, size, frame, use_feather, motion_blur_samples, motion_blur_shutter);
Array<float> evaluated_mask(size.x * size.y);
threading::parallel_for(IndexRange(size.y), 1, [&](const IndexRange sub_y_range) {
for (const int64_t y : sub_y_range) {
for (const int64_t x : IndexRange(size.x)) {
/* Compute the coordinates in the [0, 1] range and add 0.5 to evaluate the mask at the
* center of pixels. */
const float2 coordinates = (float2(x, y) + 0.5f) / float2(size);
float mask_value = 0.0f;
for (MaskRasterHandle *handle : handles) {
mask_value += BKE_maskrasterize_handle_sample(handle, coordinates);
}
evaluated_mask[y * size.x + x] = mask_value / handles.size();
}
}
});
for (MaskRasterHandle *handle : handles) {
BKE_maskrasterize_handle_free(handle);
}
texture_ = GPU_texture_create_2d("Cached Mask",
size.x,
size.y,
1,
GPU_R16F,
GPU_TEXTURE_USAGE_SHADER_READ,
evaluated_mask.data());
}
CachedMask::~CachedMask()
{
GPU_texture_free(texture_);
}
GPUTexture *CachedMask::texture()
{
return texture_;
}
/* --------------------------------------------------------------------
* Cached Mask Container.
*/
void CachedMaskContainer::reset()
{
/* First, delete all cached masks that are no longer needed. */
for (auto &cached_masks_for_id : map_.values()) {
cached_masks_for_id.remove_if([](auto item) { return !item.value->needed; });
}
map_.remove_if([](auto item) { return item.value.is_empty(); });
/* Second, reset the needed status of the remaining cached masks to false to ready them to track
* their needed status for the next evaluation. */
for (auto &cached_masks_for_id : map_.values()) {
for (auto &value : cached_masks_for_id.values()) {
value->needed = false;
}
}
}
CachedMask &CachedMaskContainer::get(Context &context,
Mask *mask,
int2 size,
bool use_feather,
int motion_blur_samples,
float motion_blur_shutter)
{
const CachedMaskKey key(size, use_feather, motion_blur_samples, motion_blur_shutter);
auto &cached_masks_for_id = map_.lookup_or_add_default(mask->id.name);
/* Invalidate the cache for that mask ID if it was changed and reset the recalculate flag. */
if (context.query_id_recalc_flag(reinterpret_cast<ID *>(mask)) & ID_RECALC_ALL) {
cached_masks_for_id.clear();
}
auto &cached_mask = *cached_masks_for_id.lookup_or_add_cb(key, [&]() {
return std::make_unique<CachedMask>(mask,
size,
context.get_frame_number(),
use_feather,
motion_blur_samples,
motion_blur_shutter);
});
cached_mask.needed = true;
return cached_mask;
}
} // namespace blender::realtime_compositor

View File

@ -59,7 +59,7 @@ CachedTexture::CachedTexture(
threading::parallel_for(IndexRange(size.y), 1, [&](const IndexRange sub_y_range) {
for (const int64_t y : sub_y_range) {
for (const int64_t x : IndexRange(size.x)) {
/* Compute the coordinates in the [0, 1] range and add 0.5 to evaluate the texture at the
/* Compute the coordinates in the [-1, 1] range and add 0.5 to evaluate the texture at the
* center of pixels in case it was interpolated. */
float2 coordinates = ((float2(x, y) + 0.5f) / float2(size)) * 2.0f - 1.0f;
/* Note that it is expected that the offset is scaled by the scale. */

View File

@ -19,6 +19,11 @@ int2 Context::get_compositing_region_size() const
return int2(BLI_rcti_size_x(&compositing_region), BLI_rcti_size_y(&compositing_region));
}
float Context::get_render_percentage() const
{
return get_scene()->r.size / 100.0f;
}
int Context::get_frame_number() const
{
return get_scene()->r.cfra;

View File

@ -10,6 +10,7 @@ void StaticCacheManager::reset()
symmetric_separable_blur_weights.reset();
morphological_distance_feather_weights.reset();
cached_textures.reset();
cached_masks.reset();
smaa_precomputed_textures.reset();
}

View File

@ -833,6 +833,7 @@ static bool id_type_can_have_drawdata(const short id_type)
case ID_WO:
case ID_SCE:
case ID_TE:
case ID_MSK:
return true;
/* no DrawData */

View File

@ -43,25 +43,32 @@ using namespace blender;
using PointerRNAVec = blender::Vector<PointerRNA>;
/**
* Return the IDs to operate on as PointerRNA vector. Either a single one ("id" context member) or
* multiple ones ("selected_ids" context member).
* Return the IDs to operate on as PointerRNA vector. Prioritizes multiple selected ones
* ("selected_ids" context member) over a single active one ("id" context member), since usually
* batch operations are more useful.
*/
static PointerRNAVec asset_operation_get_ids_from_context(const bContext *C)
{
PointerRNAVec ids;
PointerRNA idptr = CTX_data_pointer_get_type(C, "id", &RNA_ID);
if (idptr.data) {
/* Single ID. */
ids.append(idptr);
}
else {
/* "selected_ids" context member. */
{
ListBase list;
CTX_data_selected_ids(C, &list);
LISTBASE_FOREACH (CollectionPointerLink *, link, &list) {
ids.append(link->ptr);
}
BLI_freelistN(&list);
if (!ids.is_empty()) {
return ids;
}
}
/* "id" context member. */
PointerRNA idptr = CTX_data_pointer_get_type(C, "id", &RNA_ID);
if (idptr.data) {
ids.append(idptr);
}
return ids;

View File

@ -2188,6 +2188,14 @@ int filelist_file_find_id(const FileList *filelist, const ID *id)
return -1;
}
ID *filelist_entry_get_id(const FileList *filelist, const int index)
{
BLI_assert(index >= 0 && index < filelist->filelist.entries_filtered_num);
const FileListInternEntry *intern_entry = filelist->filelist_intern.filtered[index];
return intern_entry->local_data.id;
}
ID *filelist_file_get_id(const FileDirEntry *file)
{
return file->id;

View File

@ -130,6 +130,10 @@ int filelist_file_find_id(const struct FileList *filelist, const struct ID *id);
* Get the ID a file represents (if any). For #FILE_MAIN, #FILE_MAIN_ASSET.
*/
struct ID *filelist_file_get_id(const struct FileDirEntry *file);
/**
* Same as #filelist_file_get_id(), but gets the file by index (doesn't require the file to be
* cached, uses #FileListInternEntry only). */
struct ID *filelist_entry_get_id(const struct FileList *filelist, int index);
bool filelist_uid_is_set(const FileUID uid);
void filelist_uid_unset(FileUID *r_uid);
void filelist_file_cache_slidingwindow_set(struct FileList *filelist, size_t window_size);

View File

@ -823,6 +823,7 @@ const char *file_context_dir[] = {
"asset_library_ref",
"selected_asset_files",
"id",
"selected_ids",
NULL,
};
@ -911,6 +912,24 @@ static int /*eContextResult*/ file_context(const bContext *C,
CTX_data_id_pointer_set(result, id);
return CTX_RESULT_OK;
}
if (CTX_data_equals(member, "selected_ids")) {
const int num_files_filtered = filelist_files_ensure(sfile->files);
for (int file_index = 0; file_index < num_files_filtered; file_index++) {
if (!filelist_entry_is_selected(sfile->files, file_index)) {
continue;
}
ID *id = filelist_entry_get_id(sfile->files, file_index);
if (!id) {
continue;
}
CTX_data_id_list_add(result, id);
}
CTX_data_type_set(result, CTX_DATA_TYPE_COLLECTION);
return CTX_RESULT_OK;
}
return CTX_RESULT_MEMBER_NOT_FOUND;
}

View File

@ -2031,6 +2031,7 @@ static int node_output_file_move_active_socket_exec(bContext *C, wmOperator *op)
nimf->active_input++;
}
BKE_ntree_update_tag_node_property(snode->edittree, node);
ED_node_tree_propagate_change(C, CTX_data_main(C), snode->edittree);
return OPERATOR_FINISHED;

View File

@ -423,6 +423,8 @@ static int outliner_item_rename(bContext *C, wmOperator *op, const wmEvent *even
void OUTLINER_OT_item_rename(wmOperatorType *ot)
{
PropertyRNA *prop;
ot->name = "Rename";
ot->idname = "OUTLINER_OT_item_rename";
ot->description = "Rename the active element";
@ -434,11 +436,12 @@ void OUTLINER_OT_item_rename(wmOperatorType *ot)
/* Flags. */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
RNA_def_boolean(ot->srna,
"use_active",
false,
"Use Active",
"Rename the active item, rather than the one the mouse is over");
prop = RNA_def_boolean(ot->srna,
"use_active",
false,
"Use Active",
"Rename the active item, rather than the one the mouse is over");
RNA_def_property_flag(prop, PropertyFlag(PROP_SKIP_SAVE | PROP_HIDDEN));
}
/** \} */

View File

@ -1507,34 +1507,17 @@ static void drawTransformPixel(const struct bContext *C, ARegion *region, void *
void saveTransform(bContext *C, TransInfo *t, wmOperator *op)
{
if (t->state == TRANS_CANCEL) {
/* No need to edit operator properties or tool settings if we are canceling the operation.
* These properties must match the original ones. */
return;
}
ToolSettings *ts = CTX_data_tool_settings(C);
PropertyRNA *prop;
/* Save back mode in case we're in the generic operator */
if ((prop = RNA_struct_find_property(op->ptr, "mode"))) {
RNA_property_enum_set(op->ptr, prop, t->mode);
}
if ((prop = RNA_struct_find_property(op->ptr, "value"))) {
if (RNA_property_array_check(prop)) {
RNA_property_float_set_array(op->ptr, prop, t->values_final);
}
else {
RNA_property_float_set(op->ptr, prop, t->values_final[0]);
}
}
bool use_prop_edit = false;
int prop_edit_flag = 0;
/* Save proportional edit settings.
* Skip saving proportional edit if it was not actually used. */
* Skip saving proportional edit if it was not actually used.
* Note that this value is being saved even if the operation is cancelled. This is to maintain a
* behavior already used by users. */
if (!(t->options & CTX_NO_PET)) {
bool use_prop_edit = false;
int prop_edit_flag = 0;
if (t->flag & T_PROP_EDIT_ALL) {
if (t->flag & T_PROP_EDIT) {
use_prop_edit = true;
@ -1588,7 +1571,15 @@ void saveTransform(bContext *C, TransInfo *t, wmOperator *op)
ts->prop_mode = t->prop_mode;
}
}
}
if (t->state == TRANS_CANCEL) {
/* No need to edit operator properties or tool settings if we are canceling the operation.
* These properties must match the original ones. */
return;
}
if (!(t->options & CTX_NO_PET)) {
if ((prop = RNA_struct_find_property(op->ptr, "use_proportional_edit"))) {
RNA_property_boolean_set(op->ptr, prop, use_prop_edit);
RNA_boolean_set(op->ptr, "use_proportional_connected", prop_edit_flag & PROP_EDIT_CONNECTED);
@ -1598,6 +1589,20 @@ void saveTransform(bContext *C, TransInfo *t, wmOperator *op)
}
}
/* Save back mode in case we're in the generic operator */
if ((prop = RNA_struct_find_property(op->ptr, "mode"))) {
RNA_property_enum_set(op->ptr, prop, t->mode);
}
if ((prop = RNA_struct_find_property(op->ptr, "value"))) {
if (RNA_property_array_check(prop)) {
RNA_property_float_set_array(op->ptr, prop, t->values_final);
}
else {
RNA_property_float_set(op->ptr, prop, t->values_final[0]);
}
}
/* Save snapping settings. */
if ((prop = RNA_struct_find_property(op->ptr, "snap"))) {
RNA_property_boolean_set(op->ptr, prop, (t->modifiers & MOD_SNAP) != 0);

View File

@ -339,14 +339,12 @@ AddCurvesOnMeshOutputs add_curves_on_mesh(CurvesGeometry &curves,
/* Find surface normal at root points. */
Array<float3> new_normals_su(added_curves_num);
bke::mesh_surface_sample::sample_corner_attribute(
inputs.surface_looptris,
looptri_indices,
bary_coords,
VArray<float3>::ForSpan(inputs.corner_normals_su),
IndexMask(added_curves_num),
new_normals_su.as_mutable_span());
/* TODO: Normalization. */
bke::mesh_surface_sample::sample_corner_normals(inputs.surface_looptris,
looptri_indices,
bary_coords,
inputs.corner_normals_su,
IndexMask(added_curves_num),
new_normals_su);
/* Initialize position attribute. */
if (inputs.interpolate_shape) {

View File

@ -761,7 +761,7 @@ float2 PackIsland::get_diagonal_support(const float scale,
float sx = fabsf(diagonal_rotated[0]);
float sy = fabsf(diagonal_rotated[1]);
return float2(sx + sy * 0.5f + margin, sx * 0.5f + sy + margin); /* Upper bound. */
return float2(sx + sy * 0.7071f + margin, sx * 0.7071f + sy + margin); /* Upper bound. */
}
float Occupancy::trace_island(const PackIsland *island,
@ -954,13 +954,18 @@ static bool rotate_inside_square(const Span<UVAABBIsland *> island_indices,
float *r_max_u,
float *r_max_v)
{
if (island_indices.size() == 0) {
return false; /* Nothing to do. */
}
if (!params.rotate) {
return false; /* Unable to rotate. */
}
if (params.shape_method == ED_UVPACK_SHAPE_AABB) {
return false; /* AABB margin calculations are not preserved under rotations. */
/* AABB margin calculations are not preserved under rotations. */
if (island_indices.size() > 1) { /* Unless there's only one island...*/
return false;
}
}
BLI_assert(islands.size() > 0);
UVMinimumEnclosingSquareFinder square_finder(scale, margin, &params);
square_finder.best_quad = std::max(*r_max_u / params.target_aspect_y, *r_max_v);
@ -1262,6 +1267,15 @@ static float pack_islands_scale_margin(const Span<PackIsland *> islands,
/* At this stage, `max_u` and `max_v` contain the box_pack/xatlas UVs. */
rotate_inside_square(aabbs.as_span().take_front(max_box_pack),
islands,
params,
scale,
margin,
r_phis,
&max_u,
&max_v);
/* Call Alpaca. */
if (params.rotate) {
pack_islands_alpaca_rotate(
@ -1271,8 +1285,6 @@ static float pack_islands_scale_margin(const Span<PackIsland *> islands,
pack_islands_alpaca_turbo(max_box_pack, aabbs, params.target_aspect_y, r_phis, &max_u, &max_v);
}
rotate_inside_square(aabbs, islands, params, scale, margin, r_phis, &max_u, &max_v);
return std::max(max_u / params.target_aspect_y, max_v);
}

View File

@ -703,7 +703,7 @@ void GPU_texture_free(GPUTexture *texture);
* TODO(fclem): Target conversion (ex: Texture 2D as Texture 2D Array) is not implemented yet.
*/
GPUTexture *GPU_texture_create_view(const char *name,
const GPUTexture *source_texture,
GPUTexture *source_texture,
eGPUTextureFormat view_format,
int mip_start,
int mip_len,

View File

@ -132,7 +132,7 @@ bool Texture::init_buffer(GPUVertBuf *vbo, eGPUTextureFormat format)
return this->init_internal(vbo);
}
bool Texture::init_view(const GPUTexture *src_,
bool Texture::init_view(GPUTexture *src_,
eGPUTextureFormat format,
eGPUTextureType type,
int mip_start,
@ -448,7 +448,7 @@ GPUTexture *GPU_texture_create_error(int dimension, bool is_array)
}
GPUTexture *GPU_texture_create_view(const char *name,
const GPUTexture *src,
GPUTexture *src,
eGPUTextureFormat format,
int mip_start,
int mip_len,

View File

@ -132,7 +132,7 @@ class Texture {
bool init_3D(int w, int h, int d, int mip_len, eGPUTextureFormat format);
bool init_cubemap(int w, int layers, int mip_len, eGPUTextureFormat format);
bool init_buffer(GPUVertBuf *vbo, eGPUTextureFormat format);
bool init_view(const GPUTexture *src,
bool init_view(GPUTexture *src,
eGPUTextureFormat format,
eGPUTextureType type,
int mip_start,
@ -313,7 +313,7 @@ class Texture {
protected:
virtual bool init_internal() = 0;
virtual bool init_internal(GPUVertBuf *vbo) = 0;
virtual bool init_internal(const GPUTexture *src, int mip_offset, int layer_offset) = 0;
virtual bool init_internal(GPUTexture *src, int mip_offset, int layer_offset) = 0;
};
/* Syntactic sugar. */

View File

@ -181,7 +181,6 @@ class MTLTexture : public Texture {
bool is_baked_ = false;
MTLTextureDescriptor *texture_descriptor_ = nullptr;
id<MTLTexture> texture_ = nil;
MTLTextureUsage usage_;
/* Texture Storage. */
id<MTLBuffer> texture_buffer_ = nil;
@ -284,7 +283,7 @@ class MTLTexture : public Texture {
protected:
bool init_internal() override;
bool init_internal(GPUVertBuf *vbo) override;
bool init_internal(const GPUTexture *src,
bool init_internal(GPUTexture *src,
int mip_offset,
int layer_offset) override; /* Texture View */

View File

@ -159,7 +159,7 @@ void gpu::MTLTexture::bake_mip_swizzle_view()
}
int range_len = min_ii((mip_texture_max_level_ - mip_texture_base_level_) + 1,
texture_.mipmapLevelCount);
texture_.mipmapLevelCount - mip_texture_base_level_);
BLI_assert(range_len > 0);
BLI_assert(mip_texture_base_level_ < texture_.mipmapLevelCount);
BLI_assert(mip_texture_base_layer_ < num_slices);
@ -1152,7 +1152,7 @@ void gpu::MTLTexture::ensure_mipmaps(int miplvl)
void gpu::MTLTexture::generate_mipmap()
{
/* Fetch Active Context. */
MTLContext *ctx = reinterpret_cast<MTLContext *>(GPU_context_active_get());
MTLContext *ctx = static_cast<MTLContext *>(unwrap(GPU_context_active_get()));
BLI_assert(ctx);
if (!ctx->device) {
@ -1265,7 +1265,7 @@ void gpu::MTLTexture::clear(eGPUDataFormat data_format, const void *data)
/* Create clear framebuffer. */
GPUFrameBuffer *prev_fb = GPU_framebuffer_active_get();
FrameBuffer *fb = reinterpret_cast<FrameBuffer *>(this->get_blit_framebuffer(0, 0));
FrameBuffer *fb = unwrap(this->get_blit_framebuffer(0, 0));
fb->bind(true);
fb->clear_attachment(this->attachment_type(0), data_format, data);
GPU_framebuffer_bind(prev_fb);
@ -1805,7 +1805,7 @@ bool gpu::MTLTexture::init_internal(GPUVertBuf *vbo)
return true;
}
bool gpu::MTLTexture::init_internal(const GPUTexture *src, int mip_offset, int layer_offset)
bool gpu::MTLTexture::init_internal(GPUTexture *src, int mip_offset, int layer_offset)
{
BLI_assert(src);
@ -1817,12 +1817,17 @@ bool gpu::MTLTexture::init_internal(const GPUTexture *src, int mip_offset, int l
source_texture_ = src;
mip_texture_base_level_ = mip_offset;
mip_texture_base_layer_ = layer_offset;
texture_view_dirty_flags_ |= TEXTURE_VIEW_MIP_DIRTY;
/* Assign usage. */
gpu_image_usage_flags_ = GPU_texture_usage(src);
BLI_assert_msg(
gpu_image_usage_flags_ & GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW,
"Source texture of TextureView must have GPU_TEXTURE_USAGE_MIP_SWIZZLE_VIEW usage flag.");
/* Assign texture as view. */
const gpu::MTLTexture *mtltex = static_cast<const gpu::MTLTexture *>(unwrap(src));
gpu::MTLTexture *mtltex = static_cast<gpu::MTLTexture *>(unwrap(src));
mtltex->ensure_baked();
texture_ = mtltex->texture_;
BLI_assert(texture_);
[texture_ retain];

View File

@ -178,7 +178,7 @@ bool GLTexture::init_internal(GPUVertBuf *vbo)
return true;
}
bool GLTexture::init_internal(const GPUTexture *src, int mip_offset, int layer_offset)
bool GLTexture::init_internal(GPUTexture *src, int mip_offset, int layer_offset)
{
BLI_assert(GLContext::texture_storage_support);

View File

@ -117,7 +117,7 @@ class GLTexture : public Texture {
/** Return true on success. */
bool init_internal(GPUVertBuf *vbo) override;
/** Return true on success. */
bool init_internal(const GPUTexture *src, int mip_offset, int layer_offset) override;
bool init_internal(GPUTexture *src, int mip_offset, int layer_offset) override;
private:
bool proxy_check(int mip);

View File

@ -174,7 +174,7 @@ bool VKTexture::init_internal(GPUVertBuf * /*vbo*/)
return false;
}
bool VKTexture::init_internal(const GPUTexture * /*src*/, int /*mip_offset*/, int /*layer_offset*/)
bool VKTexture::init_internal(GPUTexture * /*src*/, int /*mip_offset*/, int /*layer_offset*/)
{
return false;
}

View File

@ -61,7 +61,7 @@ class VKTexture : public Texture {
protected:
bool init_internal() override;
bool init_internal(GPUVertBuf *vbo) override;
bool init_internal(const GPUTexture *src, int mip_offset, int layer_offset) override;
bool init_internal(GPUTexture *src, int mip_offset, int layer_offset) override;
private:
/** Is this texture already allocated on device. */

View File

@ -47,6 +47,7 @@ static const pxr::TfToken primvar_float2("UsdPrimvarReader_float2", pxr::TfToken
static const pxr::TfToken roughness("roughness", pxr::TfToken::Immortal);
static const pxr::TfToken specular("specular", pxr::TfToken::Immortal);
static const pxr::TfToken opacity("opacity", pxr::TfToken::Immortal);
static const pxr::TfToken opacityThreshold("opacityThreshold", pxr::TfToken::Immortal);
static const pxr::TfToken surface("surface", pxr::TfToken::Immortal);
static const pxr::TfToken perspective("perspective", pxr::TfToken::Immortal);
static const pxr::TfToken orthographic("orthographic", pxr::TfToken::Immortal);
@ -144,6 +145,8 @@ void create_usd_preview_surface_material(const USDExporterContext &usd_export_co
const InputSpecMap &input_map = preview_surface_input_map();
bool has_opacity = false;
/* Set the preview surface inputs. */
LISTBASE_FOREACH (bNodeSocket *, sock, &node->inputs) {
@ -167,6 +170,10 @@ void create_usd_preview_surface_material(const USDExporterContext &usd_export_co
preview_surface.CreateInput(input_spec.input_name, input_spec.input_type)
.ConnectToSource(created_shader.ConnectableAPI(), input_spec.source_name);
set_normal_texture_range(created_shader, input_spec);
if (input_spec.input_name == usdtokens::opacity) {
has_opacity = true;
}
}
else if (input_spec.set_default_value) {
/* Set hardcoded value. */
@ -203,6 +210,14 @@ void create_usd_preview_surface_material(const USDExporterContext &usd_export_co
create_uvmap_shader(
usd_export_context, input_node, usd_material, created_shader, default_uv_sampler);
}
/* Set opacityThreshold if an alpha cutout is used. */
if (has_opacity) {
if ((material->blend_method == MA_BM_CLIP) && (material->alpha_threshold > 0.0)) {
pxr::UsdShadeInput opacity_threshold_input = preview_surface.CreateInput(usdtokens::opacityThreshold, pxr::SdfValueTypeNames->Float);
opacity_threshold_input.GetAttr().Set(pxr::VtValue(material->alpha_threshold));
}
}
}
void set_normal_texture_range(pxr::UsdShadeShader &usd_shader, const InputSpec &input_spec)

View File

@ -22,6 +22,8 @@ extern "C" {
typedef struct Mask {
ID id;
struct AnimData *adt;
/* runtime (must be immediately after id for utilities to use it). */
DrawDataList drawdata;
/** Mask layers. */
ListBase masklayers;
/** Index of active mask layer (-1 == None). */

View File

@ -770,15 +770,15 @@ enum {
CMP_NODE_INPAINT_SIMPLE = 0,
};
enum {
typedef enum CMPNodeMaskFlags {
/* CMP_NODEFLAG_MASK_AA = (1 << 0), */ /* DEPRECATED */
CMP_NODEFLAG_MASK_NO_FEATHER = (1 << 1),
CMP_NODEFLAG_MASK_MOTION_BLUR = (1 << 2),
CMP_NODE_MASK_FLAG_NO_FEATHER = (1 << 1),
CMP_NODE_MASK_FLAG_MOTION_BLUR = (1 << 2),
/* We may want multiple aspect options, exposed as an rna enum. */
CMP_NODEFLAG_MASK_FIXED = (1 << 8),
CMP_NODEFLAG_MASK_FIXED_SCENE = (1 << 9),
};
CMP_NODE_MASK_FLAG_SIZE_FIXED = (1 << 8),
CMP_NODE_MASK_FLAG_SIZE_FIXED_SCENE = (1 << 9),
} CMPNodeMaskFlags;
enum {
CMP_NODEFLAG_BLUR_VARIABLE_SIZE = (1 << 0),

View File

@ -8317,8 +8317,8 @@ static void def_cmp_mask(StructRNA *srna)
static const EnumPropertyItem aspect_type_items[] = {
{0, "SCENE", 0, "Scene Size", ""},
{CMP_NODEFLAG_MASK_FIXED, "FIXED", 0, "Fixed", "Use pixel size for the buffer"},
{CMP_NODEFLAG_MASK_FIXED_SCENE,
{CMP_NODE_MASK_FLAG_SIZE_FIXED, "FIXED", 0, "Fixed", "Use pixel size for the buffer"},
{CMP_NODE_MASK_FLAG_SIZE_FIXED_SCENE,
"FIXED_SCENE",
0,
"Fixed/Scene",
@ -8334,12 +8334,12 @@ static void def_cmp_mask(StructRNA *srna)
RNA_def_property_ui_text(prop, "Mask", "");
prop = RNA_def_property(srna, "use_feather", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_negative_sdna(prop, NULL, "custom1", CMP_NODEFLAG_MASK_NO_FEATHER);
RNA_def_property_boolean_negative_sdna(prop, NULL, "custom1", CMP_NODE_MASK_FLAG_NO_FEATHER);
RNA_def_property_ui_text(prop, "Feather", "Use feather information from the mask");
RNA_def_property_update(prop, NC_NODE | NA_EDITED, "rna_Node_update");
prop = RNA_def_property(srna, "use_motion_blur", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, NULL, "custom1", CMP_NODEFLAG_MASK_MOTION_BLUR);
RNA_def_property_boolean_sdna(prop, NULL, "custom1", CMP_NODE_MASK_FLAG_MOTION_BLUR);
RNA_def_property_ui_text(prop, "Motion Blur", "Use multi-sampled motion blur of the mask");
RNA_def_property_update(prop, NC_NODE | NA_EDITED, "rna_Node_update");

View File

@ -12,7 +12,9 @@
#include "UI_interface.h"
#include "UI_resources.h"
#include "COM_cached_mask.hh"
#include "COM_node_operation.hh"
#include "COM_utilities.hh"
#include "node_composite_util.hh"
@ -20,6 +22,8 @@
namespace blender::nodes::node_composite_mask_cc {
NODE_STORAGE_FUNCS(NodeMask)
static void cmp_node_mask_declare(NodeDeclarationBuilder &b)
{
b.add_output<decl::Float>(N_("Mask"));
@ -66,13 +70,13 @@ static void node_composit_buts_mask(uiLayout *layout, bContext *C, PointerRNA *p
uiItemR(layout, ptr, "size_source", UI_ITEM_R_SPLIT_EMPTY_NAME, "", ICON_NONE);
if (node->custom1 & (CMP_NODEFLAG_MASK_FIXED | CMP_NODEFLAG_MASK_FIXED_SCENE)) {
if (node->custom1 & (CMP_NODE_MASK_FLAG_SIZE_FIXED | CMP_NODE_MASK_FLAG_SIZE_FIXED_SCENE)) {
uiItemR(layout, ptr, "size_x", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
uiItemR(layout, ptr, "size_y", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
}
uiItemR(layout, ptr, "use_motion_blur", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
if (node->custom1 & CMP_NODEFLAG_MASK_MOTION_BLUR) {
if (node->custom1 & CMP_NODE_MASK_FLAG_MOTION_BLUR) {
uiItemR(layout, ptr, "motion_blur_samples", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
uiItemR(layout, ptr, "motion_blur_shutter", UI_ITEM_R_SPLIT_EMPTY_NAME, nullptr, ICON_NONE);
}
@ -86,8 +90,76 @@ class MaskOperation : public NodeOperation {
void execute() override
{
get_result("Mask").allocate_invalid();
context().set_info_message("Viewport compositor setup not fully supported");
Result &output_mask = get_result("Mask");
if (!get_mask()) {
output_mask.allocate_invalid();
return;
}
const Domain domain = compute_domain();
CachedMask &cached_mask = context().cache_manager().cached_masks.get(
context(),
get_mask(),
domain.size,
get_use_feather(),
get_motion_blur_samples(),
get_motion_blur_shutter());
output_mask.allocate_texture(domain);
GPU_texture_copy(output_mask.texture(), cached_mask.texture());
}
Domain compute_domain() override
{
return Domain(compute_size());
}
int2 compute_size()
{
if (get_flags() & CMP_NODE_MASK_FLAG_SIZE_FIXED) {
return get_size();
}
if (get_flags() & CMP_NODE_MASK_FLAG_SIZE_FIXED_SCENE) {
return get_size() * context().get_render_percentage();
}
return context().get_compositing_region_size();
}
int2 get_size()
{
return int2(node_storage(bnode()).size_x, node_storage(bnode()).size_y);
}
bool get_use_feather()
{
return !bool(get_flags() & CMP_NODE_MASK_FLAG_NO_FEATHER);
}
int get_motion_blur_samples()
{
return use_motion_blur() ? bnode().custom2 : 1;
}
float get_motion_blur_shutter()
{
return bnode().custom3;
}
bool use_motion_blur()
{
return get_flags() & CMP_NODE_MASK_FLAG_MOTION_BLUR;
}
CMPNodeMaskFlags get_flags()
{
return static_cast<CMPNodeMaskFlags>(bnode().custom1);
}
Mask *get_mask()
{
return reinterpret_cast<Mask *>(bnode().id);
}
};
@ -110,8 +182,6 @@ void register_node_type_cmp_mask()
ntype.initfunc = file_ns::node_composit_init_mask;
ntype.labelfunc = file_ns::node_mask_label;
ntype.get_compositor_operation = file_ns::get_compositor_operation;
ntype.realtime_compositor_unsupported_message = N_(
"Node not supported in the Viewport compositor");
node_type_storage(&ntype, "NodeMask", node_free_standard_storage, node_copy_standard_storage);

View File

@ -126,7 +126,7 @@ class ScaleOperation : public NodeOperation {
/* Scale by the render resolution percentage. */
float2 get_scale_render_percent()
{
return float2(context().get_scene()->r.size / 100.0f);
return float2(context().get_render_percentage());
}
float2 get_scale_render_size()

View File

@ -81,7 +81,7 @@ class TextureOperation : public NodeOperation {
Tex *get_texture()
{
return (Tex *)bnode().id;
return reinterpret_cast<Tex *>(bnode().id);
}
};

View File

@ -8,6 +8,7 @@ import unittest
from pxr import Usd
from pxr import UsdUtils
from pxr import UsdGeom
from pxr import UsdShade
from pxr import Gf
import bpy
@ -124,6 +125,74 @@ class USDExportTest(AbstractUSDTest):
Gf.Vec3d(extent[1]), Gf.Vec3d(0.7515701, 0.5500924, 0.9027928)
)
def test_opacity_threshold(self):
# Note that the scene file used here is shared with a different test.
# Here we assume that it has a Principled BSDF material with
# a texture connected to its Base Color input.
bpy.ops.wm.open_mainfile(filepath=str(self.testdir / "usd_materials_export.blend"))
export_path = self.tempdir / "opaque_material.usda"
res = bpy.ops.wm.usd_export(
filepath=str(export_path),
export_materials=True,
evaluation_mode="RENDER",
)
self.assertEqual({'FINISHED'}, res, f"Unable to export to {export_path}")
# Inspect and validate the exported USD for the opaque blend case.
stage = Usd.Stage.Open(str(export_path))
shader_prim = stage.GetPrimAtPath("/_materials/Material/Principled_BSDF")
shader = UsdShade.Shader(shader_prim)
opacity_input = shader.GetInput('opacity')
self.assertEqual(opacity_input.HasConnectedSource(), False, "Opacity input should not be connected for opaque material")
self.assertAlmostEqual(opacity_input.Get(), 1.0, "Opacity input should be set to 1")
# The material already has a texture input to the Base Color.
# Now also link this texture to the Alpha input.
# Set an opacity threshold appropriate for alpha clipping.
mat = bpy.data.materials['Material']
bsdf = mat.node_tree.nodes['Principled BSDF']
tex_output = bsdf.inputs['Base Color'].links[0].from_node.outputs['Color']
alpha_input = bsdf.inputs['Alpha']
mat.node_tree.links.new(tex_output,alpha_input)
bpy.data.materials['Material'].blend_method = 'CLIP'
bpy.data.materials['Material'].alpha_threshold = 0.01
export_path = self.tempdir / "alphaclip_material.usda"
res = bpy.ops.wm.usd_export(
filepath=str(export_path),
export_materials=True,
evaluation_mode="RENDER",
)
self.assertEqual({'FINISHED'}, res, f"Unable to export to {export_path}")
# Inspect and validate the exported USD for the alpha clip case.
stage = Usd.Stage.Open(str(export_path))
shader_prim = stage.GetPrimAtPath("/_materials/Material/Principled_BSDF")
shader = UsdShade.Shader(shader_prim)
opacity_input = shader.GetInput('opacity')
opacity_thres_input = shader.GetInput('opacityThreshold')
self.assertEqual(opacity_input.HasConnectedSource(), True, "Alpha input should be connected")
self.assertGreater(opacity_thres_input.Get(), 0.0, "Opacity threshold input should be > 0")
# Modify material again, this time with alpha blend.
bpy.data.materials['Material'].blend_method = 'BLEND'
export_path = self.tempdir / "alphablend_material.usda"
res = bpy.ops.wm.usd_export(
filepath=str(export_path),
export_materials=True,
evaluation_mode="RENDER",
)
self.assertEqual({'FINISHED'}, res, f"Unable to export to {export_path}")
# Inspect and validate the exported USD for the alpha blend case.
stage = Usd.Stage.Open(str(export_path))
shader_prim = stage.GetPrimAtPath("/_materials/Material/Principled_BSDF")
shader = UsdShade.Shader(shader_prim)
opacity_input = shader.GetInput('opacity')
opacity_thres_input = shader.GetInput('opacityThreshold')
self.assertEqual(opacity_input.HasConnectedSource(), True, "Alpha input should be connected")
self.assertEqual(opacity_thres_input.Get(), None, "Opacity threshold should not be specified for alpha blend")
def main():
global args