Animation: Paste Keys in Graph Editor with value offset #104512

Merged
Christoph Lendenfeld merged 12 commits from ChrisLend/blender:copy_paste_frame_range into main 2023-02-23 09:47:00 +01:00
63 changed files with 1411 additions and 424 deletions
Showing only changes of commit 55bb3054af - Show all commits

View File

@ -10,7 +10,7 @@ ExternalProject_Add(external_epoxy
URL_HASH ${EPOXY_HASH_TYPE}=${EPOXY_HASH}
PREFIX ${BUILD_DIR}/epoxy
PATCH_COMMAND ${PATCH_CMD} -p 1 -N -d ${BUILD_DIR}/epoxy/src/external_epoxy/ < ${PATCH_DIR}/epoxy.diff
CONFIGURE_COMMAND ${CONFIGURE_ENV} && ${MESON} setup --prefix ${LIBDIR}/epoxy --default-library ${EPOXY_LIB_TYPE} --libdir lib ${BUILD_DIR}/epoxy/src/external_epoxy-build ${BUILD_DIR}/epoxy/src/external_epoxy -Dtests=false
CONFIGURE_COMMAND ${CONFIGURE_ENV} && ${MESON} setup --prefix ${LIBDIR}/epoxy --default-library ${EPOXY_LIB_TYPE} --libdir lib ${BUILD_DIR}/epoxy/src/external_epoxy-build ${BUILD_DIR}/epoxy/src/external_epoxy -Dtests=false ${MESON_BUILD_TYPE}
BUILD_COMMAND ninja
INSTALL_COMMAND ninja install
)

View File

@ -9,7 +9,7 @@ ExternalProject_Add(external_fribidi
URL_HASH ${FRIBIDI_HASH_TYPE}=${FRIBIDI_HASH}
DOWNLOAD_DIR ${DOWNLOAD_DIR}
PREFIX ${BUILD_DIR}/fribidi
CONFIGURE_COMMAND ${MESON} setup --prefix ${LIBDIR}/fribidi -Ddocs=false --default-library static --libdir lib ${BUILD_DIR}/fribidi/src/external_fribidi-build ${BUILD_DIR}/fribidi/src/external_fribidi
CONFIGURE_COMMAND ${MESON} setup --prefix ${LIBDIR}/fribidi ${MESON_BUILD_TYPE} -Ddocs=false --default-library static --libdir lib ${BUILD_DIR}/fribidi/src/external_fribidi-build ${BUILD_DIR}/fribidi/src/external_fribidi
BUILD_COMMAND ninja
INSTALL_COMMAND ninja install
INSTALL_DIR ${LIBDIR}/fribidi

View File

@ -21,6 +21,7 @@ set(HARFBUZZ_EXTRA_OPTIONS
# Only used for command line utilities,
# disable as this would add an addition & unnecessary build-dependency.
-Dcairo=disabled
${MESON_BUILD_TYPE}
)
ExternalProject_Add(external_harfbuzz
@ -59,3 +60,10 @@ if(BUILD_MODE STREQUAL Release AND WIN32)
DEPENDEES install
)
endif()
if(BUILD_MODE STREQUAL Debug AND WIN32)
ExternalProject_Add_Step(external_harfbuzz after_install
COMMAND ${CMAKE_COMMAND} -E copy ${LIBDIR}/harfbuzz/lib/libharfbuzz.a ${HARVEST_TARGET}/harfbuzz/lib/libharfbuzz_d.lib
DEPENDEES install
)
endif()

View File

@ -15,7 +15,7 @@ llvm-config = '${LIBDIR}/llvm/bin/llvm-config'"
)
set(MESA_EXTRA_FLAGS
-Dbuildtype=release
${MESON_BUILD_TYPE}
-Dc_args=${MESA_CFLAGS}
-Dcpp_args=${MESA_CXXFLAGS}
-Dc_link_args=${MESA_LDFLAGS}

View File

@ -16,8 +16,10 @@ message("BuildMode = ${BUILD_MODE}")
if(BUILD_MODE STREQUAL "Debug")
set(LIBDIR ${CMAKE_CURRENT_BINARY_DIR}/Debug)
set(MESON_BUILD_TYPE -Dbuildtype=debug)
else()
set(LIBDIR ${CMAKE_CURRENT_BINARY_DIR}/Release)
set(MESON_BUILD_TYPE -Dbuildtype=release)
endif()
set(DOWNLOAD_DIR "${CMAKE_CURRENT_BINARY_DIR}/downloads" CACHE STRING "Path for downloaded files")

View File

@ -13,7 +13,7 @@ ExternalProject_Add(external_wayland
# NOTE: `-lm` is needed for `libxml2` which is a static library that uses `libm.so`,
# without this, math symbols such as `floor` aren't found.
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env PKG_CONFIG_PATH=${LIBDIR}/expat/lib/pkgconfig:${LIBDIR}/xml2/lib/pkgconfig:${LIBDIR}/ffi/lib/pkgconfig:$PKG_CONFIG_PATH
${MESON} --prefix ${LIBDIR}/wayland -Ddocumentation=false -Dtests=false -D "c_link_args=-L${LIBDIR}/ffi/lib -lm" . ../external_wayland
${MESON} --prefix ${LIBDIR}/wayland ${MESON_BUILD_TYPE} -Ddocumentation=false -Dtests=false -D "c_link_args=-L${LIBDIR}/ffi/lib -lm" . ../external_wayland
BUILD_COMMAND ninja
INSTALL_COMMAND ninja install
)

View File

@ -7,7 +7,7 @@ ExternalProject_Add(external_wayland_protocols
PREFIX ${BUILD_DIR}/wayland-protocols
# Use `-E` so the `PKG_CONFIG_PATH` can be defined to link against our own WAYLAND.
CONFIGURE_COMMAND ${CMAKE_COMMAND} -E env PKG_CONFIG_PATH=${LIBDIR}/wayland/lib64/pkgconfig:$PKG_CONFIG_PATH
${MESON} --prefix ${LIBDIR}/wayland-protocols . ../external_wayland_protocols -Dtests=false
${MESON} --prefix ${LIBDIR}/wayland-protocols ${MESON_BUILD_TYPE} . ../external_wayland_protocols -Dtests=false
BUILD_COMMAND ninja
INSTALL_COMMAND ninja install
)

View File

@ -42,6 +42,7 @@ def parse_arguments() -> argparse.Namespace:
parser.add_argument("--svn-branch", default=None)
parser.add_argument("--git-command", default="git")
parser.add_argument("--use-linux-libraries", action="store_true")
parser.add_argument("--architecture", type=str, choices=("x86_64", "amd64", "arm64",))
return parser.parse_args()
@ -51,6 +52,17 @@ def get_blender_git_root() -> str:
# Setup for precompiled libraries and tests from svn.
def get_effective_architecture(args: argparse.Namespace):
if args.architecture:
return args.architecture
# Check platform.version to detect arm64 with x86_64 python binary.
if "ARM64" in platform.version():
return "arm64"
return platform.machine().lower()
def svn_update(args: argparse.Namespace, release_version: Optional[str]) -> None:
svn_non_interactive = [args.svn_command, '--non-interactive']
@ -58,11 +70,11 @@ def svn_update(args: argparse.Namespace, release_version: Optional[str]) -> None
svn_url = make_utils.svn_libraries_base_url(release_version, args.svn_branch)
# Checkout precompiled libraries
architecture = get_effective_architecture(args)
if sys.platform == 'darwin':
# Check platform.version to detect arm64 with x86_64 python binary.
if platform.machine() == 'arm64' or ('ARM64' in platform.version()):
if architecture == 'arm64':
lib_platform = "darwin_arm64"
elif platform.machine() == 'x86_64':
elif architecture == 'x86_64':
lib_platform = "darwin"
else:
lib_platform = None
@ -256,14 +268,15 @@ if __name__ == "__main__":
blender_skip_msg = ""
submodules_skip_msg = ""
# Test if we are building a specific release version.
branch = make_utils.git_branch(args.git_command)
if branch == 'HEAD':
sys.stderr.write('Blender git repository is in detached HEAD state, must be in a branch\n')
sys.exit(1)
tag = make_utils.git_tag(args.git_command)
release_version = make_utils.git_branch_release_version(branch, tag)
blender_version = make_utils. parse_blender_version()
if blender_version.cycle != 'alpha':
major = blender_version.version // 100
minor = blender_version.version % 100
branch = f"blender-v{major}.{minor}-release"
release_version = f"{major}.{minor}"
else:
branch = 'main'
release_version = None
if not args.no_libraries:
svn_update(args, release_version)

View File

@ -111,6 +111,7 @@ class QuickFur(ObjectModeOperator, Operator):
bpy.ops.wm.append(directory=assets_directory,
filename=name,
use_recursive=True,
clear_asset_data=True,
do_reuse_local_id=True)
generate_group = bpy.data.node_groups["Generate Hair Curves"]
interpolate_group = bpy.data.node_groups["Interpolate Hair Curves"]

View File

@ -23,8 +23,6 @@ extern "C" {
/** Blender release cycle stage: alpha/beta/rc/release. */
#define BLENDER_VERSION_CYCLE alpha
/* TODO proper version bump. */
/* Blender file format version. */
#define BLENDER_FILE_VERSION BLENDER_VERSION
#define BLENDER_FILE_SUBVERSION 0

View File

@ -108,7 +108,7 @@ BVHTree *bvhtree_from_editmesh_verts(
*/
BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
struct BMEditMesh *em,
const blender::BitVector<> &mask,
blender::BitSpan mask,
int verts_num_active,
float epsilon,
int tree_type,
@ -124,7 +124,7 @@ BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
BVHTree *bvhtree_from_mesh_verts_ex(struct BVHTreeFromMesh *data,
const float (*vert_positions)[3],
int verts_num,
const blender::BitVector<> &verts_mask,
blender::BitSpan verts_mask,
int verts_num_active,
float epsilon,
int tree_type,
@ -138,7 +138,7 @@ BVHTree *bvhtree_from_editmesh_edges(
*/
BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
struct BMEditMesh *em,
const blender::BitVector<> &edges_mask,
blender::BitSpan edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -156,7 +156,7 @@ BVHTree *bvhtree_from_mesh_edges_ex(struct BVHTreeFromMesh *data,
const float (*vert_positions)[3],
const struct MEdge *edge,
int edges_num,
const blender::BitVector<> &edges_mask,
blender::BitSpan edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -170,7 +170,7 @@ BVHTree *bvhtree_from_editmesh_looptri(
*/
BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
struct BMEditMesh *em,
const blender::BitVector<> &mask,
blender::BitSpan mask,
int looptri_num_active,
float epsilon,
int tree_type,
@ -184,7 +184,7 @@ BVHTree *bvhtree_from_mesh_looptri_ex(struct BVHTreeFromMesh *data,
const struct MLoop *mloop,
const struct MLoopTri *looptri,
int looptri_num,
const blender::BitVector<> &mask,
blender::BitSpan mask,
int looptri_num_active,
float epsilon,
int tree_type,

View File

@ -41,10 +41,10 @@ struct PropertyRNA;
*/
void BKE_nlastrip_free(struct NlaStrip *strip, bool do_id_user);
/**
* Remove the given NLA track from the set of NLA tracks, free the track's data,
* and the track itself.
* Remove & Frees all NLA strips from the given NLA track,
* then frees (doesn't remove) the track itself.
*/
void BKE_nlatrack_free(ListBase *tracks, struct NlaTrack *nlt, bool do_id_user);
void BKE_nlatrack_free(struct NlaTrack *nlt, bool do_id_user);
/**
* Free the elements of type NLA Tracks provided in the given list, but do not free
* the list itself since that is not free-standing
@ -95,6 +95,17 @@ struct NlaTrack *BKE_nlatrack_add(struct AnimData *adt,
struct NlaTrack *prev,
bool is_liboverride);
/**
* Removes the given NLA track from the list of tracks provided.
*/
void BKE_nlatrack_remove(ListBase *tracks, struct NlaTrack *nlt);
/**
* Remove the given NLA track from the list of NLA tracks, free the track's data,
* and the track itself.
*/
void BKE_nlatrack_remove_and_free(ListBase *tracks, struct NlaTrack *nlt, bool do_id_user);
/**
* Create a NLA Strip referencing the given Action.
*/

View File

@ -29,6 +29,7 @@
#include "MEM_guardedalloc.h"
using blender::BitSpan;
using blender::BitVector;
using blender::float3;
using blender::IndexRange;
@ -672,7 +673,7 @@ static BVHTree *bvhtree_from_editmesh_verts_create_tree(float epsilon,
int tree_type,
int axis,
BMEditMesh *em,
const BitVector<> &verts_mask,
const BitSpan verts_mask,
int verts_num_active)
{
BM_mesh_elem_table_ensure(em->bm, BM_VERT);
@ -706,7 +707,7 @@ static BVHTree *bvhtree_from_mesh_verts_create_tree(float epsilon,
int axis,
const float (*positions)[3],
const int verts_num,
const BitVector<> &verts_mask,
const BitSpan verts_mask,
int verts_num_active)
{
if (!verts_mask.is_empty()) {
@ -737,7 +738,7 @@ static BVHTree *bvhtree_from_mesh_verts_create_tree(float epsilon,
BVHTree *bvhtree_from_editmesh_verts_ex(BVHTreeFromEditMesh *data,
BMEditMesh *em,
const BitVector<> &verts_mask,
const BitSpan verts_mask,
int verts_num_active,
float epsilon,
int tree_type,
@ -764,7 +765,7 @@ BVHTree *bvhtree_from_editmesh_verts(
BVHTree *bvhtree_from_mesh_verts_ex(BVHTreeFromMesh *data,
const float (*vert_positions)[3],
const int verts_num,
const BitVector<> &verts_mask,
const BitSpan verts_mask,
int verts_num_active,
float epsilon,
int tree_type,
@ -794,7 +795,7 @@ static BVHTree *bvhtree_from_editmesh_edges_create_tree(float epsilon,
int tree_type,
int axis,
BMEditMesh *em,
const BitVector<> &edges_mask,
const BitSpan edges_mask,
int edges_num_active)
{
BM_mesh_elem_table_ensure(em->bm, BM_EDGE);
@ -833,7 +834,7 @@ static BVHTree *bvhtree_from_editmesh_edges_create_tree(float epsilon,
static BVHTree *bvhtree_from_mesh_edges_create_tree(const float (*positions)[3],
const MEdge *edge,
const int edge_num,
const BitVector<> &edges_mask,
const BitSpan edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -871,7 +872,7 @@ static BVHTree *bvhtree_from_mesh_edges_create_tree(const float (*positions)[3],
BVHTree *bvhtree_from_editmesh_edges_ex(BVHTreeFromEditMesh *data,
BMEditMesh *em,
const BitVector<> &edges_mask,
const BitSpan edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -899,7 +900,7 @@ BVHTree *bvhtree_from_mesh_edges_ex(BVHTreeFromMesh *data,
const float (*vert_positions)[3],
const MEdge *edge,
const int edges_num,
const BitVector<> &edges_mask,
const BitSpan edges_mask,
int edges_num_active,
float epsilon,
int tree_type,
@ -931,7 +932,7 @@ static BVHTree *bvhtree_from_mesh_faces_create_tree(float epsilon,
const float (*positions)[3],
const MFace *face,
const int faces_num,
const BitVector<> &faces_mask,
const BitSpan faces_mask,
int faces_num_active)
{
if (faces_num == 0) {
@ -984,7 +985,7 @@ static BVHTree *bvhtree_from_editmesh_looptri_create_tree(float epsilon,
int tree_type,
int axis,
BMEditMesh *em,
const BitVector<> &looptri_mask,
const BitSpan looptri_mask,
int looptri_num_active)
{
const int looptri_num = em->tottri;
@ -1038,7 +1039,7 @@ static BVHTree *bvhtree_from_mesh_looptri_create_tree(float epsilon,
const MLoop *mloop,
const MLoopTri *looptri,
const int looptri_num,
const BitVector<> &looptri_mask,
const BitSpan looptri_mask,
int looptri_num_active)
{
if (!looptri_mask.is_empty()) {
@ -1079,7 +1080,7 @@ static BVHTree *bvhtree_from_mesh_looptri_create_tree(float epsilon,
BVHTree *bvhtree_from_editmesh_looptri_ex(BVHTreeFromEditMesh *data,
BMEditMesh *em,
const BitVector<> &looptri_mask,
const BitSpan looptri_mask,
int looptri_num_active,
float epsilon,
int tree_type,
@ -1109,7 +1110,7 @@ BVHTree *bvhtree_from_mesh_looptri_ex(BVHTreeFromMesh *data,
const struct MLoop *mloop,
const struct MLoopTri *looptri,
const int looptri_num,
const BitVector<> &looptri_mask,
const BitSpan looptri_mask,
int looptri_num_active,
float epsilon,
int tree_type,

View File

@ -159,12 +159,8 @@ static void camera_blend_read_expand(BlendExpander *expander, ID *id)
BLO_expand(expander, ca->ipo); // XXX deprecated - old animation system
LISTBASE_FOREACH (CameraBGImage *, bgpic, &ca->bg_images) {
if (bgpic->source == CAM_BGIMG_SOURCE_IMAGE) {
BLO_expand(expander, bgpic->ima);
}
else if (bgpic->source == CAM_BGIMG_SOURCE_MOVIE) {
BLO_expand(expander, bgpic->ima);
}
BLO_expand(expander, bgpic->ima);
BLO_expand(expander, bgpic->clip);
}
}

View File

@ -17,7 +17,6 @@
#include "DNA_meshdata_types.h"
#include "DNA_object_types.h"
#include "BLI_bit_vector.hh"
#include "BLI_bounds.hh"
#include "BLI_edgehash.h"
#include "BLI_endian_switch.h"
@ -66,7 +65,6 @@
#include "BLO_read_write.h"
using blender::BitVector;
using blender::float3;
using blender::MutableSpan;
using blender::Span;

View File

@ -316,7 +316,7 @@ void BKE_mesh_foreach_mapped_subdiv_face_center(
BKE_mesh_vertex_normals_ensure(mesh) :
nullptr;
const int *index = static_cast<const int *>(CustomData_get_layer(&mesh->pdata, CD_ORIGINDEX));
const blender::BitVector<> &facedot_tags = mesh->runtime->subsurf_face_dot_tags;
const blender::BitSpan facedot_tags = mesh->runtime->subsurf_face_dot_tags;
if (index) {
for (int i = 0; i < mesh->totpoly; i++, mp++) {

View File

@ -42,6 +42,7 @@
using blender::BitVector;
using blender::float3;
using blender::int2;
using blender::MutableBitSpan;
using blender::MutableSpan;
using blender::short2;
using blender::Span;
@ -1238,7 +1239,7 @@ static bool loop_split_generator_check_cyclic_smooth_fan(const Span<MLoop> mloop
const Span<int2> edge_to_loops,
const Span<int> loop_to_poly,
const int *e2l_prev,
BitVector<> &skip_loops,
MutableBitSpan skip_loops,
const int ml_curr_index,
const int ml_prev_index,
const int mp_curr_index)

View File

@ -91,7 +91,7 @@ void BKE_nlastrip_free(NlaStrip *strip, const bool do_id_user)
MEM_freeN(strip);
}
void BKE_nlatrack_free(ListBase *tracks, NlaTrack *nlt, bool do_id_user)
void BKE_nlatrack_free(NlaTrack *nlt, const bool do_id_user)
{
NlaStrip *strip, *stripn;
@ -107,12 +107,7 @@ void BKE_nlatrack_free(ListBase *tracks, NlaTrack *nlt, bool do_id_user)
}
/* free NLA track itself now */
if (tracks) {
BLI_freelinkN(tracks, nlt);
}
else {
MEM_freeN(nlt);
}
MEM_freeN(nlt);
}
void BKE_nla_tracks_free(ListBase *tracks, bool do_id_user)
@ -127,7 +122,7 @@ void BKE_nla_tracks_free(ListBase *tracks, bool do_id_user)
/* free tracks one by one */
for (nlt = tracks->first; nlt; nlt = nltn) {
nltn = nlt->next;
BKE_nlatrack_free(tracks, nlt, do_id_user);
BKE_nlatrack_remove_and_free(tracks, nlt, do_id_user);
}
/* clear the list's pointers to be safe */
@ -514,6 +509,20 @@ void BKE_nla_strip_foreach_id(NlaStrip *strip, LibraryForeachIDData *data)
}
}
/* Removing ------------------------------------------ */
void BKE_nlatrack_remove(ListBase *tracks, struct NlaTrack *nlt)
{
BLI_assert(tracks);
BLI_remlink(tracks, nlt);
}
void BKE_nlatrack_remove_and_free(ListBase *tracks, struct NlaTrack *nlt, bool do_id_user)
{
BKE_nlatrack_remove(tracks, nlt);
BKE_nlatrack_free(nlt, do_id_user);
}
/* *************************************************** */
/* NLA Evaluation <-> Editing Stuff */

View File

@ -91,4 +91,29 @@ TEST(nla_track, BKE_nlatrack_remove_strip)
EXPECT_EQ(-1, BLI_findindex(&track.strips, &strip2));
}
TEST(nla_track, BKE_nlatrack_remove_and_free)
{
AnimData adt{};
NlaTrack *track1;
NlaTrack *track2;
// Add NLA tracks to the Animation Data.
track1 = BKE_nlatrack_add(&adt, NULL, false);
track2 = BKE_nlatrack_add(&adt, track1, false);
// ensure we have 2 tracks in the track.
EXPECT_EQ(2, BLI_listbase_count(&adt.nla_tracks));
BKE_nlatrack_remove_and_free(&adt.nla_tracks, track2, false);
EXPECT_EQ(1, BLI_listbase_count(&adt.nla_tracks));
// ensure the correct track was removed.
EXPECT_EQ(-1, BLI_findindex(&adt.nla_tracks, track2));
// free the rest of the tracks, and ensure they are removed.
BKE_nlatrack_remove_and_free(&adt.nla_tracks, track1, false);
EXPECT_EQ(0, BLI_listbase_count(&adt.nla_tracks));
EXPECT_EQ(-1, BLI_findindex(&adt.nla_tracks, track1));
}
} // namespace blender::bke::tests

View File

@ -2266,6 +2266,8 @@ PBVH *BKE_sculpt_object_pbvh_ensure(Depsgraph *depsgraph, Object *ob)
return pbvh;
}
ob->sculpt->islands_valid = false;
if (ob->sculpt->bm != nullptr) {
/* Sculpting on a BMesh (dynamic-topology) gets a special PBVH. */
pbvh = build_pbvh_for_dynamic_topology(ob);

View File

@ -0,0 +1,234 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
/** \file
* \ingroup bli
*
* This file provides the basis for processing "indexed bits" (i.e. every bit has an index).
* The main purpose of this file is to define how bits are indexed within a memory buffer.
* For example, one has to define whether the first bit is the least or most significant bit and
* how endianness affect the bit order.
*
* The order is defined as follows:
* - Every indexed bit is part of an #BitInt. These ints are ordered by their address as usual.
* - Within each #BitInt, the bits are ordered from least to most significant.
*/
#include "BLI_index_range.hh"
#include "BLI_utildefines.h"
#include <ostream>
namespace blender::bits {
/** Using a large integer type is better because then it's easier to process many bits at once. */
using BitInt = uint64_t;
/** Number of bits that fit into #BitInt. */
static constexpr int64_t BitsPerInt = int64_t(sizeof(BitInt) * 8);
/** Shift amount to get from a bit index to an int index. Equivalent to `log(BitsPerInt, 2)`. */
static constexpr int64_t BitToIntIndexShift = 3 + (sizeof(BitInt) >= 2) + (sizeof(BitInt) >= 4) +
(sizeof(BitInt) >= 8);
/** Bit mask containing a 1 for the last few bits that index a bit inside of an #BitInt. */
static constexpr BitInt BitIndexMask = (BitInt(1) << BitToIntIndexShift) - 1;
inline BitInt mask_first_n_bits(const int64_t n)
{
BLI_assert(n >= 0);
BLI_assert(n <= BitsPerInt);
if (n == BitsPerInt) {
return BitInt(-1);
}
return (BitInt(1) << n) - 1;
}
inline BitInt mask_last_n_bits(const int64_t n)
{
return ~mask_first_n_bits(BitsPerInt - n);
}
inline BitInt mask_range_bits(const int64_t start, const int64_t size)
{
BLI_assert(start >= 0);
BLI_assert(size >= 0);
const int64_t end = start + size;
BLI_assert(end <= BitsPerInt);
if (end == BitsPerInt) {
return mask_last_n_bits(size);
}
return ((BitInt(1) << end) - 1) & ~((BitInt(1) << start) - 1);
}
inline BitInt mask_single_bit(const int64_t bit_index)
{
BLI_assert(bit_index >= 0);
BLI_assert(bit_index < BitsPerInt);
return BitInt(1) << bit_index;
}
inline BitInt *int_containing_bit(BitInt *data, const int64_t bit_index)
{
return data + (bit_index >> BitToIntIndexShift);
}
inline const BitInt *int_containing_bit(const BitInt *data, const int64_t bit_index)
{
return data + (bit_index >> BitToIntIndexShift);
}
/**
* This is a read-only pointer to a specific bit. The value of the bit can be retrieved, but
* not changed.
*/
class BitRef {
private:
/** Points to the exact integer that the bit is in. */
const BitInt *int_;
/** All zeros except for a single one at the bit that is referenced. */
BitInt mask_;
friend class MutableBitRef;
public:
BitRef() = default;
/**
* Reference a specific bit in an array. Note that #data does *not* have to point to the
* exact integer the bit is in.
*/
BitRef(const BitInt *data, const int64_t bit_index)
{
int_ = int_containing_bit(data, bit_index);
mask_ = mask_single_bit(bit_index & BitIndexMask);
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const BitInt value = *int_;
const BitInt masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
};
/**
* Similar to #BitRef, but also allows changing the referenced bit.
*/
class MutableBitRef {
private:
/** Points to the integer that the bit is in. */
BitInt *int_;
/** All zeros except for a single one at the bit that is referenced. */
BitInt mask_;
public:
MutableBitRef() = default;
/**
* Reference a specific bit in an array. Note that #data does *not* have to point to the
* exact int the bit is in.
*/
MutableBitRef(BitInt *data, const int64_t bit_index)
{
int_ = int_containing_bit(data, bit_index);
mask_ = mask_single_bit(bit_index & BitIndexMask);
}
/**
* Support implicitly casting to a read-only #BitRef.
*/
operator BitRef() const
{
BitRef bit_ref;
bit_ref.int_ = int_;
bit_ref.mask_ = mask_;
return bit_ref;
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const BitInt value = *int_;
const BitInt masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
/**
* Change the bit to a 1.
*/
void set()
{
*int_ |= mask_;
}
/**
* Change the bit to a 0.
*/
void reset()
{
*int_ &= ~mask_;
}
/**
* Change the bit to a 1 if #value is true and 0 otherwise. If the value is highly unpredictable
* by the CPU branch predictor, it can be faster to use #set_branchless instead.
*/
void set(const bool value)
{
if (value) {
this->set();
}
else {
this->reset();
}
}
/**
* Does the same as #set, but does not use a branch. This is faster when the input value is
* unpredictable for the CPU branch predictor (best case for this function is a uniform random
* distribution with 50% probability for true and false). If the value is predictable, this is
* likely slower than #set.
*/
void set_branchless(const bool value)
{
const BitInt value_int = BitInt(value);
BLI_assert(ELEM(value_int, 0, 1));
const BitInt old = *int_;
*int_ =
/* Unset bit. */
(~mask_ & old)
/* Optionally set it again. The -1 turns a 1 into `0x00...` and a 0 into `0xff...`. */
| (mask_ & ~(value_int - 1));
}
};
inline std::ostream &operator<<(std::ostream &stream, const BitRef &bit)
{
return stream << (bit ? "1" : "0");
}
inline std::ostream &operator<<(std::ostream &stream, const MutableBitRef &bit)
{
return stream << BitRef(bit);
}
} // namespace blender::bits
namespace blender {
using bits::BitRef;
using bits::MutableBitRef;
} // namespace blender

View File

@ -0,0 +1,290 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include "BLI_bit_ref.hh"
#include "BLI_index_range.hh"
#include "BLI_memory_utils.hh"
namespace blender::bits {
/** Base class for a const and non-const bit-iterator. */
class BitIteratorBase {
protected:
const BitInt *data_;
int64_t bit_index_;
public:
BitIteratorBase(const BitInt *data, const int64_t bit_index) : data_(data), bit_index_(bit_index)
{
}
BitIteratorBase &operator++()
{
bit_index_++;
return *this;
}
friend bool operator!=(const BitIteratorBase &a, const BitIteratorBase &b)
{
BLI_assert(a.data_ == b.data_);
return a.bit_index_ != b.bit_index_;
}
};
/** Allows iterating over the bits in a memory buffer. */
class BitIterator : public BitIteratorBase {
public:
BitIterator(const BitInt *data, const int64_t bit_index) : BitIteratorBase(data, bit_index)
{
}
BitRef operator*() const
{
return BitRef(data_, bit_index_);
}
};
/** Allows iterating over the bits in a memory buffer. */
class MutableBitIterator : public BitIteratorBase {
public:
MutableBitIterator(BitInt *data, const int64_t bit_index) : BitIteratorBase(data, bit_index)
{
}
MutableBitRef operator*() const
{
return MutableBitRef(const_cast<BitInt *>(data_), bit_index_);
}
};
/**
* Similar to #Span, but references a range of bits instead of normal C++ types (which must be at
* least one byte large). Use #MutableBitSpan if the values are supposed to be modified.
*
* The beginning and end of a #BitSpan does *not* have to be at byte/int boundaries. It can start
* and end at any bit.
*/
class BitSpan {
private:
/** Base pointer to the integers containing the bits. The actual bit span might start at a much
* higher address when `bit_range_.start()` is large. */
const BitInt *data_ = nullptr;
/** The range of referenced bits. */
IndexRange bit_range_ = {0, 0};
public:
/** Construct an empty span. */
BitSpan() = default;
BitSpan(const BitInt *data, const int64_t size_in_bits) : data_(data), bit_range_(size_in_bits)
{
}
BitSpan(const BitInt *data, const IndexRange bit_range) : data_(data), bit_range_(bit_range)
{
}
/** Number of bits referenced by the span. */
int64_t size() const
{
return bit_range_.size();
}
bool is_empty() const
{
return bit_range_.is_empty();
}
IndexRange index_range() const
{
return IndexRange(bit_range_.size());
}
BitRef operator[](const int64_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < bit_range_.size());
return {data_, bit_range_.start() + index};
}
BitSpan slice(const IndexRange range) const
{
return {data_, bit_range_.slice(range)};
}
const BitInt *data() const
{
return data_;
}
const IndexRange &bit_range() const
{
return bit_range_;
}
BitIterator begin() const
{
return {data_, bit_range_.start()};
}
BitIterator end() const
{
return {data_, bit_range_.one_after_last()};
}
};
/** Same as #BitSpan, but also allows modifying the referenced bits. */
class MutableBitSpan {
private:
BitInt *data_ = nullptr;
IndexRange bit_range_ = {0, 0};
public:
MutableBitSpan() = default;
MutableBitSpan(BitInt *data, const int64_t size) : data_(data), bit_range_(size)
{
}
MutableBitSpan(BitInt *data, const IndexRange bit_range) : data_(data), bit_range_(bit_range)
{
}
int64_t size() const
{
return bit_range_.size();
}
bool is_empty() const
{
return bit_range_.is_empty();
}
IndexRange index_range() const
{
return IndexRange(bit_range_.size());
}
MutableBitRef operator[](const int64_t index) const
{
BLI_assert(index >= 0);
BLI_assert(index < bit_range_.size());
return {data_, bit_range_.start() + index};
}
MutableBitSpan slice(const IndexRange range) const
{
return {data_, bit_range_.slice(range)};
}
BitInt *data() const
{
return data_;
}
const IndexRange &bit_range() const
{
return bit_range_;
}
MutableBitIterator begin() const
{
return {data_, bit_range_.start()};
}
MutableBitIterator end() const
{
return {data_, bit_range_.one_after_last()};
}
operator BitSpan() const
{
return {data_, bit_range_};
}
/** Sets all referenced bits to 1. */
void set_all()
{
const AlignedIndexRanges ranges = split_index_range_by_alignment(bit_range_, BitsPerInt);
{
BitInt &first_int = *int_containing_bit(data_, bit_range_.start());
const BitInt first_int_mask = mask_range_bits(ranges.prefix.start() & BitIndexMask,
ranges.prefix.size());
first_int |= first_int_mask;
}
{
BitInt *start = int_containing_bit(data_, ranges.aligned.start());
const int64_t ints_to_fill = ranges.aligned.size() / BitsPerInt;
constexpr BitInt fill_value = BitInt(-1);
initialized_fill_n(start, ints_to_fill, fill_value);
}
{
BitInt &last_int = *int_containing_bit(data_, bit_range_.one_after_last() - 1);
const BitInt last_int_mask = mask_first_n_bits(ranges.suffix.size());
last_int |= last_int_mask;
}
}
/** Sets all referenced bits to 0. */
void reset_all()
{
const AlignedIndexRanges ranges = split_index_range_by_alignment(bit_range_, BitsPerInt);
{
BitInt &first_int = *int_containing_bit(data_, bit_range_.start());
const BitInt first_int_mask = mask_range_bits(ranges.prefix.start() & BitIndexMask,
ranges.prefix.size());
first_int &= ~first_int_mask;
}
{
BitInt *start = int_containing_bit(data_, ranges.aligned.start());
const int64_t ints_to_fill = ranges.aligned.size() / BitsPerInt;
constexpr BitInt fill_value = 0;
initialized_fill_n(start, ints_to_fill, fill_value);
}
{
BitInt &last_int = *int_containing_bit(data_, bit_range_.one_after_last() - 1);
const BitInt last_int_mask = mask_first_n_bits(ranges.suffix.size());
last_int &= ~last_int_mask;
}
}
/** Sets all referenced bits to either 0 or 1. */
void set_all(const bool value)
{
if (value) {
this->set_all();
}
else {
this->reset_all();
}
}
/** Same as #set_all to mirror #MutableSpan. */
void fill(const bool value)
{
this->set_all(value);
}
};
inline std::ostream &operator<<(std::ostream &stream, const BitSpan &span)
{
stream << "(Size: " << span.size() << ", ";
for (const BitRef bit : span) {
stream << bit;
}
stream << ")";
return stream;
}
inline std::ostream &operator<<(std::ostream &stream, const MutableBitSpan &span)
{
return stream << BitSpan(span);
}
} // namespace blender::bits
namespace blender {
using bits::BitSpan;
using bits::MutableBitSpan;
} // namespace blender

View File

@ -38,142 +38,11 @@
#include <cstring>
#include "BLI_allocator.hh"
#include "BLI_index_range.hh"
#include "BLI_memory_utils.hh"
#include "BLI_bit_span.hh"
#include "BLI_span.hh"
namespace blender::bits {
/**
* Using a large integer type is better because then it's easier to process many bits at once.
*/
using IntType = uint64_t;
static constexpr int64_t BitsPerInt = int64_t(sizeof(IntType) * 8);
static constexpr int64_t BitToIntIndexShift = 3 + (sizeof(IntType) >= 2) + (sizeof(IntType) >= 4) +
(sizeof(IntType) >= 8);
static constexpr IntType BitIndexMask = (IntType(1) << BitToIntIndexShift) - 1;
/**
* This is a read-only pointer to a specific bit. The value of the bit can be retrieved, but
* not changed.
*/
class BitRef {
private:
/** Points to the integer that the bit is in. */
const IntType *ptr_;
/** All zeros except for a single one at the bit that is referenced. */
IntType mask_;
friend class MutableBitRef;
public:
BitRef() = default;
/**
* Reference a specific bit in an array. Note that #ptr does *not* have to point to the
* exact integer the bit is in.
*/
BitRef(const IntType *ptr, const int64_t bit_index)
{
ptr_ = ptr + (bit_index >> BitToIntIndexShift);
mask_ = IntType(1) << (bit_index & BitIndexMask);
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const IntType value = *ptr_;
const IntType masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
};
/**
* Similar to #BitRef, but also allows changing the referenced bit.
*/
class MutableBitRef {
private:
/** Points to the integer that the bit is in. */
IntType *ptr_;
/** All zeros except for a single one at the bit that is referenced. */
IntType mask_;
public:
MutableBitRef() = default;
/**
* Reference a specific bit in an array. Note that #ptr does *not* have to point to the
* exact int the bit is in.
*/
MutableBitRef(IntType *ptr, const int64_t bit_index)
{
ptr_ = ptr + (bit_index >> BitToIntIndexShift);
mask_ = IntType(1) << IntType(bit_index & BitIndexMask);
}
/**
* Support implicitly casting to a read-only #BitRef.
*/
operator BitRef() const
{
BitRef bit_ref;
bit_ref.ptr_ = ptr_;
bit_ref.mask_ = mask_;
return bit_ref;
}
/**
* Return true when the bit is currently 1 and false otherwise.
*/
bool test() const
{
const IntType value = *ptr_;
const IntType masked_value = value & mask_;
return masked_value != 0;
}
operator bool() const
{
return this->test();
}
/**
* Change the bit to a 1.
*/
void set()
{
*ptr_ |= mask_;
}
/**
* Change the bit to a 0.
*/
void reset()
{
*ptr_ &= ~mask_;
}
/**
* Change the bit to a 1 if #value is true and 0 otherwise.
*/
void set(const bool value)
{
if (value) {
this->set();
}
else {
this->reset();
}
}
};
template<
/**
* Number of bits that can be stored in the vector without doing an allocation.
@ -193,13 +62,13 @@ class BitVector {
static constexpr int64_t IntsInInlineBuffer = required_ints_for_bits(InlineBufferCapacity);
static constexpr int64_t BitsInInlineBuffer = IntsInInlineBuffer * BitsPerInt;
static constexpr int64_t AllocationAlignment = alignof(IntType);
static constexpr int64_t AllocationAlignment = alignof(BitInt);
/**
* Points to the first integer used by the vector. It might point to the memory in the inline
* buffer.
*/
IntType *data_;
BitInt *data_;
/** Current size of the vector in bits. */
int64_t size_in_bits_;
@ -211,7 +80,7 @@ class BitVector {
BLI_NO_UNIQUE_ADDRESS Allocator allocator_;
/** Contains the bits as long as the vector is small enough. */
BLI_NO_UNIQUE_ADDRESS TypedBuffer<IntType, IntsInInlineBuffer> inline_buffer_;
BLI_NO_UNIQUE_ADDRESS TypedBuffer<BitInt, IntsInInlineBuffer> inline_buffer_;
public:
BitVector(Allocator allocator = {}) noexcept : allocator_(allocator)
@ -219,7 +88,7 @@ class BitVector {
data_ = inline_buffer_;
size_in_bits_ = 0;
capacity_in_bits_ = BitsInInlineBuffer;
uninitialized_fill_n(data_, IntsInInlineBuffer, IntType(0));
uninitialized_fill_n(data_, IntsInInlineBuffer, BitInt(0));
}
BitVector(NoExceptConstructor, Allocator allocator = {}) noexcept : BitVector(allocator)
@ -236,8 +105,8 @@ class BitVector {
}
else {
/* Allocate a new array because the inline buffer is too small. */
data_ = static_cast<IntType *>(
allocator_.allocate(ints_to_copy * sizeof(IntType), AllocationAlignment, __func__));
data_ = static_cast<BitInt *>(
allocator_.allocate(ints_to_copy * sizeof(BitInt), AllocationAlignment, __func__));
capacity_in_bits_ = ints_to_copy * BitsPerInt;
}
size_in_bits_ = other.size_in_bits_;
@ -303,6 +172,16 @@ class BitVector {
return move_assign_container(*this, std::move(other));
}
operator BitSpan() const
{
return {data_, IndexRange(size_in_bits_)};
}
operator MutableBitSpan()
{
return {data_, IndexRange(size_in_bits_)};
}
/**
* Number of bits in the bit vector.
*/
@ -352,80 +231,24 @@ class BitVector {
size_in_bits_++;
}
class Iterator {
private:
const BitVector *vector_;
int64_t index_;
public:
Iterator(const BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
{
}
Iterator &operator++()
{
index_++;
return *this;
}
friend bool operator!=(const Iterator &a, const Iterator &b)
{
BLI_assert(a.vector_ == b.vector_);
return a.index_ != b.index_;
}
BitRef operator*() const
{
return (*vector_)[index_];
}
};
class MutableIterator {
private:
BitVector *vector_;
int64_t index_;
public:
MutableIterator(BitVector &vector, const int64_t index) : vector_(&vector), index_(index)
{
}
MutableIterator &operator++()
{
index_++;
return *this;
}
friend bool operator!=(const MutableIterator &a, const MutableIterator &b)
{
BLI_assert(a.vector_ == b.vector_);
return a.index_ != b.index_;
}
MutableBitRef operator*() const
{
return (*vector_)[index_];
}
};
Iterator begin() const
BitIterator begin() const
{
return {*this, 0};
return {data_, 0};
}
Iterator end() const
BitIterator end() const
{
return {*this, size_in_bits_};
return {data_, size_in_bits_};
}
MutableIterator begin()
MutableBitIterator begin()
{
return {*this, 0};
return {data_, 0};
}
MutableIterator end()
MutableBitIterator end()
{
return {*this, size_in_bits_};
return {data_, size_in_bits_};
}
/**
@ -441,31 +264,8 @@ class BitVector {
}
size_in_bits_ = new_size_in_bits;
if (old_size_in_bits < new_size_in_bits) {
this->fill_range(IndexRange(old_size_in_bits, new_size_in_bits - old_size_in_bits), value);
}
}
/**
* Set #value for every element in #range.
*/
void fill_range(const IndexRange range, const bool value)
{
const AlignedIndexRanges aligned_ranges = split_index_range_by_alignment(range, BitsPerInt);
/* Fill first few bits. */
for (const int64_t i : aligned_ranges.prefix) {
(*this)[i].set(value);
}
/* Fill entire ints at once. */
const int64_t start_fill_int_index = aligned_ranges.aligned.start() / BitsPerInt;
const int64_t ints_to_fill = aligned_ranges.aligned.size() / BitsPerInt;
const IntType fill_value = value ? IntType(-1) : IntType(0);
initialized_fill_n(data_ + start_fill_int_index, ints_to_fill, fill_value);
/* Fill bits in the end that don't cover a full int. */
for (const int64_t i : aligned_ranges.suffix) {
(*this)[i].set(value);
MutableBitSpan(data_, IndexRange(old_size_in_bits, new_size_in_bits - old_size_in_bits))
.set_all(value);
}
}
@ -474,7 +274,7 @@ class BitVector {
*/
void fill(const bool value)
{
this->fill_range(IndexRange(0, size_in_bits_), value);
MutableBitSpan(data_, size_in_bits_).set_all(value);
}
/**
@ -517,7 +317,7 @@ class BitVector {
}
BLI_NOINLINE void realloc_to_at_least(const int64_t min_capacity_in_bits,
const IntType initial_value_for_new_ints = 0x00)
const BitInt initial_value_for_new_ints = 0)
{
if (capacity_in_bits_ >= min_capacity_in_bits) {
return;
@ -531,8 +331,8 @@ class BitVector {
const int64_t new_capacity_in_ints = std::max(min_capacity_in_ints, min_new_capacity_in_ints);
const int64_t ints_to_copy = this->used_ints_amount();
IntType *new_data = static_cast<IntType *>(allocator_.allocate(
new_capacity_in_ints * sizeof(IntType), AllocationAlignment, __func__));
BitInt *new_data = static_cast<BitInt *>(
allocator_.allocate(new_capacity_in_ints * sizeof(BitInt), AllocationAlignment, __func__));
uninitialized_copy_n(data_, ints_to_copy, new_data);
/* Always initialize new capacity even if it isn't used yet. That's necessary to avoid warnings
* caused by using uninitialized memory. This happens when e.g. setting a clearing a bit in an
@ -562,7 +362,5 @@ class BitVector {
} // namespace blender::bits
namespace blender {
using bits::BitRef;
using bits::BitVector;
using bits::MutableBitRef;
} // namespace blender

View File

@ -304,6 +304,13 @@ void rotate_eul(float beul[3], char axis, float angle);
/* Order independent. */
/**
* Manipulate `eul` so it's close to `oldrot` while representing the same rotation
* with the aim of having the minimum difference between all axes.
*
* This is typically done so interpolating the values between two euler rotations
* doesn't add undesired rotation (even rotating multiple times around one axis).
*/
void compatible_eul(float eul[3], const float oldrot[3]);
void add_eul_euleul(float r_eul[3], float a[3], float b[3], short order);

View File

@ -457,6 +457,8 @@ if(WITH_GTESTS)
tests/BLI_array_store_test.cc
tests/BLI_array_test.cc
tests/BLI_array_utils_test.cc
tests/BLI_bit_ref_test.cc
tests/BLI_bit_span_test.cc
tests/BLI_bit_vector_test.cc
tests/BLI_bitmap_test.cc
tests/BLI_bounds_test.cc

View File

@ -1490,15 +1490,18 @@ void rotate_eul(float beul[3], const char axis, const float angle)
void compatible_eul(float eul[3], const float oldrot[3])
{
/* we could use M_PI as pi_thresh: which is correct but 5.1 gives better results.
* Checked with baking actions to fcurves - campbell */
const float pi_thresh = (5.1f);
/* When the rotation exceeds 180 degrees, it can be wrapped by 360 degrees
* to produce a closer match.
* NOTE: Values between `pi` & `2 * pi` work, where `pi` has the lowest number of
* discontinuities and values approaching `2 * pi` center the resulting rotation around zero,
* at the expense of the result being less compatible, see !104856. */
const float pi_thresh = (float)M_PI;
const float pi_x2 = (2.0f * (float)M_PI);
float deul[3];
uint i;
/* correct differences of about 360 degrees first */
/* Correct differences around 360 degrees first. */
for (i = 0; i < 3; i++) {
deul[i] = eul[i] - oldrot[i];
if (deul[i] > pi_thresh) {
@ -1511,29 +1514,17 @@ void compatible_eul(float eul[3], const float oldrot[3])
}
}
/* is 1 of the axis rotations larger than 180 degrees and the other small? NO ELSE IF!! */
if (fabsf(deul[0]) > 3.2f && fabsf(deul[1]) < 1.6f && fabsf(deul[2]) < 1.6f) {
if (deul[0] > 0.0f) {
eul[0] -= pi_x2;
}
else {
eul[0] += pi_x2;
}
}
if (fabsf(deul[1]) > 3.2f && fabsf(deul[2]) < 1.6f && fabsf(deul[0]) < 1.6f) {
if (deul[1] > 0.0f) {
eul[1] -= pi_x2;
}
else {
eul[1] += pi_x2;
}
}
if (fabsf(deul[2]) > 3.2f && fabsf(deul[0]) < 1.6f && fabsf(deul[1]) < 1.6f) {
if (deul[2] > 0.0f) {
eul[2] -= pi_x2;
}
else {
eul[2] += pi_x2;
uint j = 1, k = 2;
for (i = 0; i < 3; j = k, k = i++) {
/* Check if this axis of rotations larger than 180 degrees and
* the others are smaller than 90 degrees. */
if (fabsf(deul[i]) > M_PI && fabsf(deul[j]) < M_PI_2 && fabsf(deul[k]) < M_PI_2) {
if (deul[i] > 0.0f) {
eul[i] -= pi_x2;
}
else {
eul[i] += pi_x2;
}
}
}
}

View File

@ -0,0 +1,160 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include <array>
#include "BLI_bit_ref.hh"
#include "testing/testing.h"
namespace blender::bits::tests {
TEST(bit_ref, MaskFirstNBits)
{
EXPECT_EQ(mask_first_n_bits(0), 0);
EXPECT_EQ(mask_first_n_bits(1),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001);
EXPECT_EQ(mask_first_n_bits(5),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001'1111);
EXPECT_EQ(mask_first_n_bits(63),
0b0111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
EXPECT_EQ(mask_first_n_bits(64),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
}
TEST(bit_ref, MaskLastNBits)
{
EXPECT_EQ(mask_last_n_bits(0),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_last_n_bits(1),
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_last_n_bits(5),
0b1111'1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_last_n_bits(63),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1110);
EXPECT_EQ(mask_last_n_bits(64),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
}
TEST(bit_ref, MaskSingleBit)
{
EXPECT_EQ(mask_single_bit(0), 1);
EXPECT_EQ(mask_single_bit(1),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010);
EXPECT_EQ(mask_single_bit(5),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
EXPECT_EQ(mask_single_bit(63),
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
}
TEST(bit_ref, IntContainingBit)
{
std::array<uint64_t, 5> array;
uint64_t *data = array.data();
EXPECT_EQ(int_containing_bit(data, 0), data);
EXPECT_EQ(int_containing_bit(data, 1), data);
EXPECT_EQ(int_containing_bit(data, 63), data);
EXPECT_EQ(int_containing_bit(data, 64), data + 1);
EXPECT_EQ(int_containing_bit(data, 65), data + 1);
EXPECT_EQ(int_containing_bit(data, 100), data + 1);
EXPECT_EQ(int_containing_bit(data, 127), data + 1);
EXPECT_EQ(int_containing_bit(data, 128), data + 2);
const uint64_t *data_const = data;
EXPECT_EQ(int_containing_bit(data_const, 0), data_const);
EXPECT_EQ(int_containing_bit(data_const, 1), data_const);
EXPECT_EQ(int_containing_bit(data_const, 63), data_const);
EXPECT_EQ(int_containing_bit(data_const, 64), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 65), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 100), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 127), data_const + 1);
EXPECT_EQ(int_containing_bit(data_const, 128), data_const + 2);
}
TEST(bit_ref, Test)
{
uint64_t data = (1 << 3) | (1 << 7);
EXPECT_FALSE(BitRef(&data, 0).test());
EXPECT_FALSE(BitRef(&data, 1).test());
EXPECT_FALSE(BitRef(&data, 2).test());
EXPECT_TRUE(BitRef(&data, 3).test());
EXPECT_FALSE(BitRef(&data, 4));
EXPECT_FALSE(BitRef(&data, 5));
EXPECT_FALSE(BitRef(&data, 6));
EXPECT_TRUE(BitRef(&data, 7));
EXPECT_FALSE(MutableBitRef(&data, 0).test());
EXPECT_FALSE(MutableBitRef(&data, 1).test());
EXPECT_FALSE(MutableBitRef(&data, 2).test());
EXPECT_TRUE(MutableBitRef(&data, 3).test());
EXPECT_FALSE(MutableBitRef(&data, 4));
EXPECT_FALSE(MutableBitRef(&data, 5));
EXPECT_FALSE(MutableBitRef(&data, 6));
EXPECT_TRUE(MutableBitRef(&data, 7));
}
TEST(bit_ref, Set)
{
uint64_t data = 0;
MutableBitRef(&data, 0).set();
MutableBitRef(&data, 1).set();
MutableBitRef(&data, 1).set();
MutableBitRef(&data, 4).set();
EXPECT_EQ(data, (1 << 0) | (1 << 1) | (1 << 4));
MutableBitRef(&data, 5).set(true);
MutableBitRef(&data, 1).set(false);
EXPECT_EQ(data, (1 << 0) | (1 << 4) | (1 << 5));
}
TEST(bit_ref, Reset)
{
uint64_t data = -1;
MutableBitRef(&data, 0).reset();
MutableBitRef(&data, 2).reset();
EXPECT_EQ(data, uint64_t(-1) & ~(1 << 0) & ~(1 << 2));
}
TEST(bit_ref, SetBranchless)
{
uint64_t data = 0;
MutableBitRef(&data, 0).set_branchless(true);
EXPECT_EQ(data, 1);
MutableBitRef(&data, 0).set_branchless(false);
EXPECT_EQ(data, 0);
MutableBitRef(&data, 3).set_branchless(false);
MutableBitRef(&data, 4).set_branchless(true);
EXPECT_EQ(data, 16);
MutableBitRef(&data, 3).set_branchless(true);
MutableBitRef(&data, 4).set_branchless(true);
EXPECT_EQ(data, 24);
}
TEST(bit_ref, Cast)
{
uint64_t data = 0;
MutableBitRef mutable_ref(&data, 3);
BitRef ref = mutable_ref;
EXPECT_FALSE(ref);
mutable_ref.set();
EXPECT_TRUE(ref);
}
TEST(bit_ref, MaskRangeBits)
{
EXPECT_EQ(mask_range_bits(0, 0),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(0, 1),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001);
EXPECT_EQ(mask_range_bits(0, 5),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0001'1111);
EXPECT_EQ(mask_range_bits(64, 0),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(63, 1),
0b1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(59, 5),
0b1111'1000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
EXPECT_EQ(mask_range_bits(8, 3),
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0111'0000'0000);
EXPECT_EQ(mask_range_bits(0, 64),
0b1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111'1111);
}
} // namespace blender::bits::tests

View File

@ -0,0 +1,139 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include <array>
#include "BLI_bit_span.hh"
#include "testing/testing.h"
namespace blender::bits::tests {
TEST(bit_span, DefaultConstructor)
{
{
char buffer[sizeof(BitSpan)];
memset(buffer, 0xff, sizeof(BitSpan));
BitSpan &span = *new (buffer) BitSpan();
EXPECT_TRUE(span.is_empty());
EXPECT_EQ(span.size(), 0);
}
{
char buffer[sizeof(MutableBitSpan)];
memset(buffer, 0xff, sizeof(MutableBitSpan));
MutableBitSpan &span = *new (buffer) MutableBitSpan();
EXPECT_TRUE(span.is_empty());
EXPECT_EQ(span.size(), 0);
}
}
TEST(bit_span, Iteration)
{
uint64_t data = (1 << 2) | (1 << 3);
const BitSpan span(&data, 30);
EXPECT_EQ(span.size(), 30);
int index = 0;
for (const BitRef bit : span) {
EXPECT_EQ(bit.test(), ELEM(index, 2, 3));
index++;
}
}
TEST(bit_span, MutableIteration)
{
uint64_t data = 0;
MutableBitSpan span(&data, 40);
EXPECT_EQ(span.size(), 40);
int index = 0;
for (MutableBitRef bit : span) {
bit.set(index % 4 == 0);
index++;
}
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0001'0001'0001'0001'0001'0001'0001'0001'0001'0001);
}
TEST(bit_span, SubscriptOperator)
{
uint64_t data[2] = {0, 0};
MutableBitSpan mutable_span(data, 128);
BitSpan span = mutable_span;
EXPECT_EQ(mutable_span.data(), data);
EXPECT_EQ(mutable_span.bit_range(), IndexRange(128));
EXPECT_EQ(span.data(), data);
EXPECT_EQ(span.bit_range(), IndexRange(128));
EXPECT_FALSE(mutable_span[5].test());
EXPECT_FALSE(span[5].test());
mutable_span[5].set(5);
EXPECT_TRUE(mutable_span[5].test());
EXPECT_TRUE(span[5].test());
EXPECT_FALSE(mutable_span[120].test());
EXPECT_FALSE(span[120].test());
mutable_span[120].set(120);
EXPECT_TRUE(mutable_span[120].test());
EXPECT_TRUE(span[120].test());
EXPECT_EQ(data[0],
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
EXPECT_EQ(data[1],
0b0000'0001'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000);
}
TEST(bit_span, RangeConstructor)
{
uint64_t data = 0;
MutableBitSpan mutable_span(&data, IndexRange(4, 3));
BitSpan span = mutable_span;
EXPECT_FALSE(mutable_span[1].test());
EXPECT_FALSE(span[1].test());
mutable_span[0].set(true);
mutable_span[1].set(true);
mutable_span[2].set(true);
mutable_span[0].set(false);
mutable_span[2].set(false);
EXPECT_TRUE(mutable_span[1].test());
EXPECT_TRUE(span[1].test());
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0010'0000);
}
TEST(bit_span, Set)
{
uint64_t data = 0;
MutableBitSpan(&data, 64).set_all(true);
EXPECT_EQ(data, uint64_t(-1));
MutableBitSpan(&data, 64).set_all(false);
EXPECT_EQ(data, uint64_t(0));
MutableBitSpan(&data, IndexRange(4, 8)).set_all(true);
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'1111'0000);
MutableBitSpan(&data, IndexRange(8, 30)).set_all(false);
EXPECT_EQ(data,
0b0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'0000'1111'0000);
}
TEST(bit_span, SetSliced)
{
std::array<uint64_t, 10> data;
memset(data.data(), 0, sizeof(data));
MutableBitSpan span{data.data(), 640};
span.slice(IndexRange(5, 500)).set_all(true);
for (const int64_t i : IndexRange(640)) {
EXPECT_EQ(span[i], i >= 5 && i < 505);
}
span.slice(IndexRange(10, 190)).set_all(false);
for (const int64_t i : IndexRange(640)) {
EXPECT_EQ(span[i], (i >= 5 && i < 10) || (i >= 200 && i < 505));
}
}
} // namespace blender::bits::tests

View File

@ -6,7 +6,7 @@
#include "testing/testing.h"
namespace blender::tests {
namespace blender::bits::tests {
TEST(bit_vector, DefaultConstructor)
{
@ -183,4 +183,4 @@ TEST(bit_vector, AppendMany)
EXPECT_TRUE(vec[5]);
}
} // namespace blender::tests
} // namespace blender::bits::tests

View File

@ -290,6 +290,24 @@ TEST(index_range, SplitByAlignment)
EXPECT_EQ(ranges.aligned, IndexRange());
EXPECT_EQ(ranges.suffix, IndexRange());
}
{
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(64), 64);
EXPECT_EQ(ranges.prefix, IndexRange());
EXPECT_EQ(ranges.aligned, IndexRange(64));
EXPECT_EQ(ranges.suffix, IndexRange());
}
{
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(64, 64), 64);
EXPECT_EQ(ranges.prefix, IndexRange());
EXPECT_EQ(ranges.aligned, IndexRange(64, 64));
EXPECT_EQ(ranges.suffix, IndexRange());
}
{
AlignedIndexRanges ranges = split_index_range_by_alignment(IndexRange(4, 8), 64);
EXPECT_EQ(ranges.prefix, IndexRange(4, 8));
EXPECT_EQ(ranges.aligned, IndexRange());
EXPECT_EQ(ranges.suffix, IndexRange());
}
}
} // namespace blender::tests

View File

@ -211,7 +211,7 @@ void *BMO_iter_as_arrayN(BMOpSlot slot_args[BMO_OP_MAX_SLOTS],
int BM_iter_mesh_bitmap_from_filter(const char itype,
BMesh *bm,
blender::BitVector<> &bitmap,
blender::MutableBitSpan bitmap,
bool (*test_fn)(BMElem *, void *user_data),
void *user_data)
{
@ -234,7 +234,7 @@ int BM_iter_mesh_bitmap_from_filter(const char itype,
}
int BM_iter_mesh_bitmap_from_filter_tessface(BMesh *bm,
blender::BitVector<> &bitmap,
blender::MutableBitSpan bitmap,
bool (*test_fn)(BMFace *, void *user_data),
void *user_data)
{

View File

@ -21,7 +21,7 @@
#include "BLI_mempool.h"
#ifdef __cplusplus
# include "BLI_bit_vector.hh"
# include "BLI_bit_span.hh"
#endif
#ifdef __cplusplus
@ -228,14 +228,14 @@ void *BMO_iter_as_arrayN(BMOpSlot slot_args[BMO_OP_MAX_SLOTS],
int BM_iter_mesh_bitmap_from_filter(char itype,
BMesh *bm,
blender::BitVector<> &bitmap,
blender::MutableBitSpan bitmap,
bool (*test_fn)(BMElem *, void *user_data),
void *user_data);
/**
* Needed when we want to check faces, but return a loop aligned array.
*/
int BM_iter_mesh_bitmap_from_filter_tessface(BMesh *bm,
blender::BitVector<> &bitmap,
blender::MutableBitSpan bitmap,
bool (*test_fn)(BMFace *, void *user_data),
void *user_data);

View File

@ -318,11 +318,6 @@ void BM_mesh_bm_from_me(BMesh *bm, const Mesh *me, const struct BMeshFromMeshPar
CustomData_bmesh_merge(&mesh_ldata, &bm->ldata, mask.lmask, CD_SET_DEFAULT, bm, BM_LOOP);
}
const Vector<MeshToBMeshLayerInfo> vert_info = mesh_to_bm_copy_info_calc(mesh_vdata, bm->vdata);
const Vector<MeshToBMeshLayerInfo> edge_info = mesh_to_bm_copy_info_calc(mesh_edata, bm->edata);
const Vector<MeshToBMeshLayerInfo> poly_info = mesh_to_bm_copy_info_calc(mesh_pdata, bm->pdata);
const Vector<MeshToBMeshLayerInfo> loop_info = mesh_to_bm_copy_info_calc(mesh_ldata, bm->ldata);
/* -------------------------------------------------------------------- */
/* Shape Key */
int tot_shape_keys = 0;
@ -407,6 +402,10 @@ void BM_mesh_bm_from_me(BMesh *bm, const Mesh *me, const struct BMeshFromMeshPar
}
}
const Vector<MeshToBMeshLayerInfo> vert_info = mesh_to_bm_copy_info_calc(mesh_vdata, bm->vdata);
const Vector<MeshToBMeshLayerInfo> edge_info = mesh_to_bm_copy_info_calc(mesh_edata, bm->edata);
const Vector<MeshToBMeshLayerInfo> poly_info = mesh_to_bm_copy_info_calc(mesh_pdata, bm->pdata);
const Vector<MeshToBMeshLayerInfo> loop_info = mesh_to_bm_copy_info_calc(mesh_ldata, bm->ldata);
if (is_new) {
CustomData_bmesh_init_pool(&bm->vdata, me->totvert, BM_VERT);
CustomData_bmesh_init_pool(&bm->edata, me->totedge, BM_EDGE);

View File

@ -557,7 +557,7 @@ static void extract_edituv_fdots_iter_poly_mesh(const MeshRenderData *mr,
const bool mp_select = (efa) ? BM_elem_flag_test_bool(efa, BM_ELEM_SELECT) : false;
if (mr->use_subsurf_fdots) {
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;

View File

@ -46,7 +46,7 @@ static void extract_fdots_iter_poly_mesh(const MeshRenderData *mr,
GPUIndexBufBuilder *elb = static_cast<GPUIndexBufBuilder *>(_userdata);
if (mr->use_subsurf_fdots) {
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;

View File

@ -102,7 +102,7 @@ static void extract_edge_fac_iter_poly_mesh(const MeshRenderData *mr,
void *_data)
{
MeshExtract_EdgeFac_Data *data = static_cast<MeshExtract_EdgeFac_Data *>(_data);
const BitVector<> &optimal_display_edges = mr->me->runtime->subsurf_optimal_display_edges;
const BitSpan optimal_display_edges = mr->me->runtime->subsurf_optimal_display_edges;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;

View File

@ -76,7 +76,7 @@ static void extract_fdots_pos_iter_poly_mesh(const MeshRenderData *mr,
zero_v3(co);
const MLoop *mloop = mr->mloop;
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const int ml_index_end = mp->loopstart + mp->totloop;
for (int ml_index = mp->loopstart; ml_index < ml_index_end; ml_index += 1) {

View File

@ -74,7 +74,7 @@ static void extract_fdots_uv_iter_poly_mesh(const MeshRenderData *mr,
void *_data)
{
MeshExtract_FdotUV_Data *data = static_cast<MeshExtract_FdotUV_Data *>(_data);
const BitVector<> &facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const BitSpan facedot_tags = mr->me->runtime->subsurf_face_dot_tags;
const MLoop *mloop = mr->mloop;
const int ml_index_end = mp->loopstart + mp->totloop;

View File

@ -1027,7 +1027,7 @@ static void CURVES_OT_select_more(wmOperatorType *ot)
{
ot->name = "Select More";
ot->idname = __func__;
ot->description = "Grow the selection by one point.";
ot->description = "Grow the selection by one point";
ot->exec = select_more_exec;
ot->poll = editable_curves_point_domain_poll;

View File

@ -1534,8 +1534,13 @@ void ED_mesh_split_faces(Mesh *mesh)
const Span<MPoly> polys = mesh->polys();
const Span<MLoop> loops = mesh->loops();
const float split_angle = (mesh->flag & ME_AUTOSMOOTH) != 0 ? mesh->smoothresh : float(M_PI);
const bke::AttributeAccessor attributes = mesh->attributes();
const VArray<bool> mesh_sharp_edges = attributes.lookup_or_default<bool>(
"sharp_edge", ATTR_DOMAIN_EDGE, false);
Array<bool> sharp_edges(mesh->totedge);
mesh_sharp_edges.materialize(sharp_edges);
Array<bool> sharp_edges(mesh->totedge, false);
BKE_edges_sharp_from_angle_set(mesh->totedge,
loops.data(),
loops.size(),

View File

@ -83,7 +83,7 @@ bool SCULPT_is_automasking_mode_enabled(const Sculpt *sd,
bool SCULPT_is_automasking_enabled(const Sculpt *sd, const SculptSession *ss, const Brush *br)
{
if (br && SCULPT_stroke_is_dynamic_topology(ss, br)) {
if (ss && br && SCULPT_stroke_is_dynamic_topology(ss, br)) {
return false;
}
if (SCULPT_is_automasking_mode_enabled(sd, br, BRUSH_AUTOMASKING_TOPOLOGY)) {

View File

@ -1571,8 +1571,13 @@ static int sculpt_cloth_filter_invoke(bContext *C, wmOperator *op, const wmEvent
SCULPT_stroke_id_next(ob);
SCULPT_undo_push_begin(ob, op);
SCULPT_filter_cache_init(
C, ob, sd, SCULPT_UNDO_COORDS, event->mval, RNA_float_get(op->ptr, "area_normal_radius"));
SCULPT_filter_cache_init(C,
ob,
sd,
SCULPT_UNDO_COORDS,
event->mval,
RNA_float_get(op->ptr, "area_normal_radius"),
RNA_float_get(op->ptr, "strength"));
ss->filter_cache->automasking = SCULPT_automasking_cache_init(sd, nullptr, ob);

View File

@ -347,8 +347,13 @@ static int sculpt_color_filter_invoke(bContext *C, wmOperator *op, const wmEvent
return OPERATOR_CANCELLED;
}
SCULPT_filter_cache_init(
C, ob, sd, SCULPT_UNDO_COLOR, event->mval, RNA_float_get(op->ptr, "area_normal_radius"));
SCULPT_filter_cache_init(C,
ob,
sd,
SCULPT_UNDO_COLOR,
event->mval,
RNA_float_get(op->ptr, "area_normal_radius"),
RNA_float_get(op->ptr, "strength"));
FilterCache *filter_cache = ss->filter_cache;
filter_cache->active_face_set = SCULPT_FACE_SET_NONE;
filter_cache->automasking = SCULPT_automasking_cache_init(sd, nullptr, ob);

View File

@ -8,7 +8,9 @@
#include "MEM_guardedalloc.h"
#include "BLI_hash.h"
#include "BLI_index_range.hh"
#include "BLI_math.h"
#include "BLI_math_vector_types.hh"
#include "BLI_task.h"
#include "DNA_meshdata_types.h"
@ -23,6 +25,7 @@
#include "WM_api.h"
#include "WM_types.h"
#include "ED_screen.h"
#include "ED_view3d.h"
#include "paint_intern.h"
@ -30,14 +33,20 @@
#include "RNA_access.h"
#include "RNA_define.h"
#include "RNA_prototypes.h"
#include "UI_interface.h"
#include "UI_resources.h"
#include "bmesh.h"
#include <cmath>
#include <cstdlib>
using blender::float2;
using blender::float3;
using blender::IndexRange;
void SCULPT_filter_to_orientation_space(float r_v[3], FilterCache *filter_cache)
{
switch (filter_cache->orientation) {
@ -96,13 +105,14 @@ void SCULPT_filter_cache_init(bContext *C,
Sculpt *sd,
const int undo_type,
const int mval[2],
float area_normal_radius)
float area_normal_radius,
float start_strength)
{
SculptSession *ss = ob->sculpt;
PBVH *pbvh = ob->sculpt->pbvh;
ss->filter_cache = MEM_cnew<FilterCache>(__func__);
ss->filter_cache->start_filter_strength = start_strength;
ss->filter_cache->random_seed = rand();
if (undo_type == SCULPT_UNDO_COLOR) {
@ -338,6 +348,15 @@ static bool sculpt_mesh_filter_needs_pmap(eSculptMeshFilterType filter_type)
MESH_FILTER_SHARPEN);
}
static bool sculpt_mesh_filter_is_continuous(eSculptMeshFilterType type)
{
return (ELEM(type,
MESH_FILTER_SHARPEN,
MESH_FILTER_SMOOTH,
MESH_FILTER_RELAX,
MESH_FILTER_RELAX_FACE_SETS));
}
static void mesh_filter_task_cb(void *__restrict userdata,
const int i,
const TaskParallelTLS *__restrict /*tls*/)
@ -379,7 +398,8 @@ static void mesh_filter_task_cb(void *__restrict userdata,
continue;
}
if (ELEM(filter_type, MESH_FILTER_RELAX, MESH_FILTER_RELAX_FACE_SETS)) {
if (ELEM(filter_type, MESH_FILTER_RELAX, MESH_FILTER_RELAX_FACE_SETS) ||
ss->filter_cache->no_orig_co) {
copy_v3_v3(orig_co, vd.co);
}
else {
@ -681,34 +701,16 @@ static void mesh_filter_surface_smooth_displace_task_cb(void *__restrict userdat
BKE_pbvh_vertex_iter_end;
}
static int sculpt_mesh_filter_modal(bContext *C, wmOperator *op, const wmEvent *event)
static void sculpt_mesh_filter_apply(bContext *C, wmOperator *op)
{
Object *ob = CTX_data_active_object(C);
Depsgraph *depsgraph = CTX_data_depsgraph_pointer(C);
SculptSession *ss = ob->sculpt;
Sculpt *sd = CTX_data_tool_settings(C)->sculpt;
eSculptMeshFilterType filter_type = eSculptMeshFilterType(RNA_enum_get(op->ptr, "type"));
float filter_strength = RNA_float_get(op->ptr, "strength");
if (event->type == LEFTMOUSE && event->val == KM_RELEASE) {
SCULPT_filter_cache_free(ss);
SCULPT_undo_push_end(ob);
SCULPT_flush_update_done(C, ob, SCULPT_UPDATE_COORDS);
return OPERATOR_FINISHED;
}
if (event->type != MOUSEMOVE) {
return OPERATOR_RUNNING_MODAL;
}
const float len = event->prev_press_xy[0] - event->xy[0];
filter_strength = filter_strength * -len * 0.001f * UI_DPI_FAC;
SCULPT_vertex_random_access_ensure(ss);
bool needs_pmap = sculpt_mesh_filter_needs_pmap(filter_type);
BKE_sculpt_update_object_for_edit(depsgraph, ob, needs_pmap, false, false);
SculptThreadedTaskData data{};
data.sd = sd;
data.ob = ob;
@ -740,22 +742,136 @@ static int sculpt_mesh_filter_modal(bContext *C, wmOperator *op, const wmEvent *
}
SCULPT_flush_update_step(C, SCULPT_UPDATE_COORDS);
}
static void sculpt_mesh_update_strength(wmOperator *op,
SculptSession *ss,
float2 prev_press_mouse,
float2 mouse)
{
const float len = prev_press_mouse[0] - mouse[0];
float filter_strength = ss->filter_cache->start_filter_strength * -len * 0.001f * UI_DPI_FAC;
RNA_float_set(op->ptr, "strength", filter_strength);
}
static void sculpt_mesh_filter_apply_with_history(bContext *C, wmOperator *op)
{
/* Event history is only stored for smooth and relax filters. */
if (!RNA_collection_length(op->ptr, "event_history")) {
sculpt_mesh_filter_apply(C, op);
return;
}
Object *ob = CTX_data_active_object(C);
SculptSession *ss = ob->sculpt;
float2 start_mouse;
bool first = true;
float initial_strength = ss->filter_cache->start_filter_strength;
RNA_BEGIN (op->ptr, item, "event_history") {
float2 mouse;
RNA_float_get_array(&item, "mouse_event", mouse);
if (first) {
first = false;
start_mouse = mouse;
continue;
}
sculpt_mesh_update_strength(op, ss, start_mouse, mouse);
sculpt_mesh_filter_apply(C, op);
}
RNA_END;
RNA_float_set(op->ptr, "strength", initial_strength);
}
static void sculpt_mesh_filter_end(bContext *C, wmOperator * /*op*/)
{
Object *ob = CTX_data_active_object(C);
SculptSession *ss = ob->sculpt;
SCULPT_filter_cache_free(ss);
SCULPT_undo_push_end(ob);
SCULPT_flush_update_done(C, ob, SCULPT_UPDATE_COORDS);
}
static int sculpt_mesh_filter_modal(bContext *C, wmOperator *op, const wmEvent *event)
{
Object *ob = CTX_data_active_object(C);
Depsgraph *depsgraph = CTX_data_depsgraph_pointer(C);
SculptSession *ss = ob->sculpt;
const eSculptMeshFilterType filter_type = eSculptMeshFilterType(RNA_enum_get(op->ptr, "type"));
if (event->type == LEFTMOUSE && event->val == KM_RELEASE) {
float initial_strength = ss->filter_cache->start_filter_strength;
sculpt_mesh_filter_end(C, op);
/* Don't update strength property if we're storing an event history. */
if (sculpt_mesh_filter_is_continuous(filter_type)) {
RNA_float_set(op->ptr, "strength", initial_strength);
}
return OPERATOR_FINISHED;
}
if (event->type != MOUSEMOVE) {
return OPERATOR_RUNNING_MODAL;
}
/* Note: some filter types are continuous, for these we store an
* event history in RNA for continuous.
* This way the user can tweak the last operator properties
* or repeat the op and get expected results. */
if (sculpt_mesh_filter_is_continuous(filter_type)) {
if (RNA_collection_length(op->ptr, "event_history") == 0) {
/* First entry is the start mouse position, event->prev_press_xy. */
PointerRNA startptr;
RNA_collection_add(op->ptr, "event_history", &startptr);
float2 mouse_start(float(event->prev_press_xy[0]), float(event->prev_press_xy[1]));
RNA_float_set_array(&startptr, "mouse_event", mouse_start);
}
PointerRNA itemptr;
RNA_collection_add(op->ptr, "event_history", &itemptr);
float2 mouse(float(event->xy[0]), float(event->xy[1]));
RNA_float_set_array(&itemptr, "mouse_event", mouse);
RNA_float_set(&itemptr, "pressure", WM_event_tablet_data(event, nullptr, nullptr));
}
float2 prev_mval(float(event->prev_press_xy[0]), float(event->prev_press_xy[1]));
float2 mval(float(event->xy[0]), float(event->xy[1]));
sculpt_mesh_update_strength(op, ss, prev_mval, mval);
bool needs_pmap = sculpt_mesh_filter_needs_pmap(filter_type);
BKE_sculpt_update_object_for_edit(depsgraph, ob, needs_pmap, false, false);
sculpt_mesh_filter_apply(C, op);
return OPERATOR_RUNNING_MODAL;
}
static int sculpt_mesh_filter_invoke(bContext *C, wmOperator *op, const wmEvent *event)
/* Returns OPERATOR_PASS_THROUGH on success. */
static int sculpt_mesh_filter_start(bContext *C, wmOperator *op)
{
Object *ob = CTX_data_active_object(C);
Depsgraph *depsgraph = CTX_data_depsgraph_pointer(C);
Sculpt *sd = CTX_data_tool_settings(C)->sculpt;
int mval[2];
RNA_int_get_array(op->ptr, "start_mouse", mval);
const eSculptMeshFilterType filter_type = eSculptMeshFilterType(RNA_enum_get(op->ptr, "type"));
const bool use_automasking = SCULPT_is_automasking_enabled(sd, nullptr, nullptr);
const bool needs_topology_info = sculpt_mesh_filter_needs_pmap(filter_type) || use_automasking;
BKE_sculpt_update_object_for_edit(depsgraph, ob, needs_topology_info, false, false);
SculptSession *ss = ob->sculpt;
const eMeshFilterDeformAxis deform_axis = eMeshFilterDeformAxis(
RNA_enum_get(op->ptr, "deform_axis"));
const eSculptMeshFilterType filter_type = eSculptMeshFilterType(RNA_enum_get(op->ptr, "type"));
const bool use_automasking = SCULPT_is_automasking_enabled(sd, ss, nullptr);
const bool needs_topology_info = sculpt_mesh_filter_needs_pmap(filter_type) || use_automasking;
if (deform_axis == 0) {
/* All axis are disabled, so the filter is not going to produce any deformation. */
@ -768,21 +884,25 @@ static int sculpt_mesh_filter_invoke(bContext *C, wmOperator *op, const wmEvent
/* Update the active face set manually as the paint cursor is not enabled when using the Mesh
* Filter Tool. */
float mval_fl[2] = {float(event->mval[0]), float(event->mval[1])};
float mval_fl[2] = {float(mval[0]), float(mval[1])};
SculptCursorGeometryInfo sgi;
SCULPT_cursor_geometry_info_update(C, &sgi, mval_fl, false);
}
SCULPT_vertex_random_access_ensure(ss);
BKE_sculpt_update_object_for_edit(depsgraph, ob, needs_topology_info, false, false);
if (needs_topology_info) {
SCULPT_boundary_info_ensure(ob);
}
SCULPT_undo_push_begin(ob, op);
SCULPT_filter_cache_init(
C, ob, sd, SCULPT_UNDO_COORDS, event->mval, RNA_float_get(op->ptr, "area_normal_radius"));
SCULPT_filter_cache_init(C,
ob,
sd,
SCULPT_UNDO_COORDS,
mval,
RNA_float_get(op->ptr, "area_normal_radius"),
RNA_float_get(op->ptr, "strength"));
FilterCache *filter_cache = ss->filter_cache;
filter_cache->active_face_set = SCULPT_FACE_SET_NONE;
@ -826,12 +946,56 @@ static int sculpt_mesh_filter_invoke(bContext *C, wmOperator *op, const wmEvent
RNA_enum_get(op->ptr, "orientation"));
ss->filter_cache->orientation = orientation;
WM_event_add_modal_handler(C, op);
return OPERATOR_RUNNING_MODAL;
return OPERATOR_PASS_THROUGH;
}
static int sculpt_mesh_filter_invoke(bContext *C, wmOperator *op, const wmEvent *event)
{
RNA_int_set_array(op->ptr, "start_mouse", event->mval);
int ret = sculpt_mesh_filter_start(C, op);
if (ret == OPERATOR_PASS_THROUGH) {
WM_event_add_modal_handler(C, op);
return OPERATOR_RUNNING_MODAL;
}
return ret;
}
static int sculpt_mesh_filter_exec(bContext *C, wmOperator *op)
{
int ret = sculpt_mesh_filter_start(C, op);
if (ret == OPERATOR_PASS_THROUGH) {
Object *ob = CTX_data_active_object(C);
SculptSession *ss = ob->sculpt;
int iterations = RNA_int_get(op->ptr, "iteration_count");
bool has_history = RNA_collection_length(op->ptr, "event_history") > 0;
if (!has_history) {
ss->filter_cache->no_orig_co = true;
}
for (int i = 0; i < iterations; i++) {
sculpt_mesh_filter_apply_with_history(C, op);
ss->filter_cache->no_orig_co = true;
}
sculpt_mesh_filter_end(C, op);
return OPERATOR_FINISHED;
}
return ret;
}
void SCULPT_mesh_filter_properties(wmOperatorType *ot)
{
RNA_def_int_array(
ot->srna, "start_mouse", 2, nullptr, 0, 1 << 14, "Starting Mouse", "", 0, 1 << 14);
RNA_def_float(
ot->srna,
"area_normal_radius",
@ -844,6 +1008,31 @@ void SCULPT_mesh_filter_properties(wmOperatorType *ot)
1.0);
RNA_def_float(
ot->srna, "strength", 1.0f, -10.0f, 10.0f, "Strength", "Filter strength", -10.0f, 10.0f);
RNA_def_int(ot->srna,
"iteration_count",
1,
1,
10000,
"Repeat",
"How many times to repeat the filter",
1,
100);
/* Smooth filter requires entire event history. */
PropertyRNA *prop = RNA_def_collection_runtime(
ot->srna, "event_history", &RNA_OperatorStrokeElement, "", "");
RNA_def_property_flag(prop, PropertyFlag(int(PROP_HIDDEN) | int(PROP_SKIP_SAVE)));
}
static void sculpt_mesh_ui_exec(bContext * /*C*/, wmOperator *op)
{
uiLayout *layout = op->layout;
uiItemR(layout, op->ptr, "strength", 0, nullptr, ICON_NONE);
uiItemR(layout, op->ptr, "iteration_count", 0, nullptr, ICON_NONE);
uiItemR(layout, op->ptr, "orientation", 0, nullptr, ICON_NONE);
layout = uiLayoutRow(layout, true);
uiItemR(layout, op->ptr, "deform_axis", UI_ITEM_R_EXPAND, nullptr, ICON_NONE);
}
void SCULPT_OT_mesh_filter(wmOperatorType *ot)
@ -857,6 +1046,8 @@ void SCULPT_OT_mesh_filter(wmOperatorType *ot)
ot->invoke = sculpt_mesh_filter_invoke;
ot->modal = sculpt_mesh_filter_modal;
ot->poll = SCULPT_mode_poll;
ot->exec = sculpt_mesh_filter_exec;
ot->ui = sculpt_mesh_ui_exec;
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;

View File

@ -494,6 +494,8 @@ struct FilterCache {
float (*pre_smoothed_color)[4];
ViewContext vc;
float start_filter_strength;
bool no_orig_co;
};
/**
@ -1427,7 +1429,8 @@ void SCULPT_filter_cache_init(bContext *C,
Sculpt *sd,
int undo_type,
const int mval[2],
float area_normal_radius);
float area_normal_radius,
float start_strength);
void SCULPT_filter_cache_free(SculptSession *ss);
void SCULPT_mesh_filter_properties(wmOperatorType *ot);

View File

@ -229,6 +229,8 @@ static int sculpt_symmetrize_exec(bContext *C, wmOperator *op)
return OPERATOR_CANCELLED;
}
SCULPT_topology_islands_invalidate(ss);
/* Redraw. */
SCULPT_pbvh_clear(ob);
WM_event_add_notifier(C, NC_OBJECT | ND_DRAW, ob);

View File

@ -58,7 +58,7 @@ void ED_sculpt_init_transform(bContext *C, Object *ob, const int mval[2], const
SCULPT_vertex_random_access_ensure(ss);
SCULPT_filter_cache_init(C, ob, sd, SCULPT_UNDO_COORDS, mval, 5.0);
SCULPT_filter_cache_init(C, ob, sd, SCULPT_UNDO_COORDS, mval, 5.0, 1.0f);
if (sd->transform_mode == SCULPT_TRANSFORM_MODE_RADIUS_ELASTIC) {
ss->filter_cache->transform_displacement_mode = SCULPT_TRANSFORM_DISPLACEMENT_INCREMENTAL;

View File

@ -590,7 +590,7 @@ void ED_animedit_unlink_action(
if (nlt->strips.first == NULL) {
BLI_assert(nstrip == NULL);
BKE_nlatrack_free(&adt->nla_tracks, nlt, true);
BKE_nlatrack_remove_and_free(&adt->nla_tracks, nlt, true);
}
}
}

View File

@ -400,6 +400,21 @@ static void saction_channel_region_message_subscribe(const wmRegionMessageSubscr
}
}
static void action_clamp_scroll(ARegion *region)
{
View2D *v2d = &region->v2d;
const float cur_height_y = BLI_rctf_size_y(&v2d->cur);
if (BLI_rctf_size_y(&v2d->cur) > BLI_rctf_size_y(&v2d->tot)) {
v2d->cur.ymin = -cur_height_y;
v2d->cur.ymax = 0;
}
else if (v2d->cur.ymin < v2d->tot.ymin) {
v2d->cur.ymin = v2d->tot.ymin;
v2d->cur.ymax = v2d->cur.ymin + cur_height_y;
}
}
static void action_main_region_listener(const wmRegionListenerParams *params)
{
ARegion *region = params->region;
@ -860,6 +875,13 @@ static void action_blend_write(BlendWriter *writer, SpaceLink *sl)
BLO_write_struct(writer, SpaceAction, sl);
}
static void action_main_region_view2d_changed(const bContext *UNUSED(C), ARegion *region)
{
/* V2D_KEEPTOT_STRICT cannot be used to clamp scrolling
* because it also clamps the x-axis to 0. */
action_clamp_scroll(region);
}
void ED_spacetype_action(void)
{
SpaceType *st = MEM_callocN(sizeof(SpaceType), "spacetype action");
@ -892,6 +914,7 @@ void ED_spacetype_action(void)
art->draw_overlay = action_main_region_draw_overlay;
art->listener = action_main_region_listener;
art->message_subscribe = saction_main_region_message_subscribe;
art->on_view2d_changed = action_main_region_view2d_changed;
art->keymapflag = ED_KEYMAP_GIZMO | ED_KEYMAP_VIEW2D | ED_KEYMAP_ANIMATION | ED_KEYMAP_FRAMES;
BLI_addhead(&st->regiontypes, art);

View File

@ -793,7 +793,7 @@ static void buttons_area_listener(const wmSpaceTypeListenerParams *params)
}
break;
case NC_ID:
if (wmn->action == NA_RENAME) {
if (ELEM(wmn->action, NA_RENAME, NA_EDITED)) {
ED_area_tag_redraw(area);
}
break;

View File

@ -785,7 +785,12 @@ void uiTemplateMovieclipInformation(uiLayout *layout,
uiLayout *col = uiLayoutColumn(layout, false);
uiLayoutSetAlignment(col, UI_LAYOUT_ALIGN_RIGHT);
ImBuf *ibuf = BKE_movieclip_get_ibuf_flag(clip, user, clip->flag, MOVIECLIP_CACHE_SKIP);
/* NOTE: Put the frame to cache. If the panel is drawn, the display will also be shown, as well
* as metadata panel. So if the cache is skipped here it is not really a memory saver, but
* skipping the cache could lead to a performance impact depending on the order in which panels
* and the main area is drawn. Basically, if it is this template drawn first and then the main
* area it will lead to frame read and processing happening twice. */
ImBuf *ibuf = BKE_movieclip_get_ibuf_flag(clip, user, clip->flag, 0);
int width, height;
/* Display frame dimensions, channels number and buffer type. */

View File

@ -148,9 +148,6 @@ static void file_but_enable_drag(uiBut *but,
(file->typeflag & FILE_TYPE_ASSET) != 0) {
char blend_path[FILE_MAX_LIBEXTRA];
if (BLO_library_path_explode(path, blend_path, NULL, NULL)) {
const FileAssetSelectParams *asset_params = ED_fileselect_get_asset_params(sfile);
BLI_assert(asset_params != NULL);
const int import_method = ED_fileselect_asset_import_method_get(sfile, file);
BLI_assert(import_method > -1);

View File

@ -1890,6 +1890,11 @@ static bool euler_filter_single_channel(FCurve *fcu)
return false;
}
/* Skip baked FCurves. */
if (fcu->bezt == NULL) {
return false;
}
/* `prev` follows bezt, bezt = "current" point to be fixed. */
/* Our method depends on determining a "difference" from the previous vert. */
bool is_modified = false;

View File

@ -736,7 +736,7 @@ static int nlaedit_delete_tracks_exec(bContext *C, wmOperator *UNUSED(op))
}
/* call delete on this track - deletes all strips too */
BKE_nlatrack_free(&adt->nla_tracks, nlt, true);
BKE_nlatrack_remove_and_free(&adt->nla_tracks, nlt, true);
ale->update = ANIM_UPDATE_DEPS;
}
}

View File

@ -60,7 +60,7 @@ bool space_node_view_flag(
int tot = 0;
bool has_frame = false;
if (snode.edittree) {
LISTBASE_FOREACH (const bNode *, node, &snode.edittree->nodes) {
for (const bNode *node : snode.edittree->all_nodes()) {
if ((node->flag & node_flag) == node_flag) {
BLI_rctf_union(&cur_new, &node->runtime->totr);
tot++;

View File

@ -821,7 +821,7 @@ static void node_region_listener(const wmRegionListenerParams *params)
}
break;
case NC_ID:
if (wmn->action == NA_RENAME) {
if (ELEM(wmn->action, NA_RENAME, NA_EDITED)) {
ED_region_tag_redraw(region);
}
break;

View File

@ -41,7 +41,18 @@ void GLIndexBuf::bind()
void GLIndexBuf::bind_as_ssbo(uint binding)
{
bind();
if (ibo_id_ == 0 || data_ != nullptr) {
/* Calling `glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo_id_)` changes the index buffer
* of the currently bound VAO.
*
* In the OpenGL backend, the VAO state persists even after `GLVertArray::update_bindings`
* is called.
*
* NOTE: For safety, we could call `glBindVertexArray(0)` right after drawing a `GPUBatch`.
* However, for performance reasons, we have chosen not to do so. */
glBindVertexArray(0);
bind();
}
BLI_assert(ibo_id_ != 0);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, binding, ibo_id_);
}

View File

@ -640,6 +640,10 @@ void VKShader::compute_shader_from_glsl(MutableSpan<const char *> sources)
build_shader_module(sources, shaderc_compute_shader, &compute_module_);
}
void VKShader::warm_cache(int /*limit*/)
{
}
bool VKShader::finalize(const shader::ShaderCreateInfo *info)
{
if (compilation_failed_) {

View File

@ -35,7 +35,7 @@ class VKShader : public Shader {
void fragment_shader_from_glsl(MutableSpan<const char *> sources) override;
void compute_shader_from_glsl(MutableSpan<const char *> sources) override;
bool finalize(const shader::ShaderCreateInfo *info = nullptr) override;
void warm_cache(int limit) override{};
void warm_cache(int limit) override;
void transform_feedback_names_set(Span<const char *> name_list,
eGPUShaderTFBType geom_type) override;

View File

@ -593,7 +593,7 @@ static void rna_NlaTrack_remove(
return;
}
BKE_nlatrack_free(&adt->nla_tracks, track, true);
BKE_nlatrack_remove_and_free(&adt->nla_tracks, track, true);
RNA_POINTER_INVALIDATE(track_ptr);
WM_event_add_notifier(C, NC_ANIMATION | ND_NLA | NA_REMOVED, NULL);

View File

@ -93,7 +93,7 @@ static void node_init(bNodeTree * /*tree*/, bNode *node)
class SocketSearchOp {
public:
std::string socket_name;
const StringRef socket_name;
eNodeSocketDatatype data_type;
NodeCompareOperation operation;
NodeCompareMode mode = NODE_COMPARE_MODE_ELEMENT;
@ -107,40 +107,62 @@ class SocketSearchOp {
}
};
static std::optional<eNodeSocketDatatype> get_compare_type_for_operation(
const eNodeSocketDatatype type, const NodeCompareOperation operation)
{
switch (type) {
case SOCK_BOOLEAN:
if (ELEM(operation, NODE_COMPARE_COLOR_BRIGHTER, NODE_COMPARE_COLOR_DARKER)) {
return SOCK_RGBA;
}
return SOCK_INT;
case SOCK_INT:
case SOCK_FLOAT:
case SOCK_VECTOR:
if (ELEM(operation, NODE_COMPARE_COLOR_BRIGHTER, NODE_COMPARE_COLOR_DARKER)) {
return SOCK_RGBA;
}
return type;
case SOCK_RGBA:
if (!ELEM(operation,
NODE_COMPARE_COLOR_BRIGHTER,
NODE_COMPARE_COLOR_DARKER,
NODE_COMPARE_EQUAL,
NODE_COMPARE_NOT_EQUAL)) {
return SOCK_VECTOR;
}
return type;
case SOCK_STRING:
if (!ELEM(operation, NODE_COMPARE_EQUAL, NODE_COMPARE_NOT_EQUAL)) {
return std::nullopt;
}
return type;
default:
BLI_assert_unreachable();
return std::nullopt;
}
}
static void node_gather_link_searches(GatherLinkSearchOpParams &params)
{
const eNodeSocketDatatype type = static_cast<eNodeSocketDatatype>(params.other_socket().type);
if (!ELEM(type, SOCK_BOOLEAN, SOCK_FLOAT, SOCK_RGBA, SOCK_VECTOR, SOCK_INT, SOCK_STRING)) {
const eNodeSocketDatatype type = eNodeSocketDatatype(params.other_socket().type);
if (!ELEM(type, SOCK_INT, SOCK_BOOLEAN, SOCK_FLOAT, SOCK_VECTOR, SOCK_RGBA, SOCK_STRING)) {
return;
}
const eNodeSocketDatatype mode_type = (type == SOCK_BOOLEAN) ? SOCK_INT : type;
const bool string_type = (type == SOCK_STRING);
const std::string socket_name = params.in_out() == SOCK_IN ? "A" : "Result";
const StringRef socket_name = params.in_out() == SOCK_IN ? "A" : "Result";
for (const EnumPropertyItem *item = rna_enum_node_compare_operation_items;
item->identifier != nullptr;
item++) {
if (item->name != nullptr && item->identifier[0] != '\0') {
if (!string_type &&
ELEM(item->value, NODE_COMPARE_COLOR_BRIGHTER, NODE_COMPARE_COLOR_DARKER)) {
params.add_item(IFACE_(item->name),
SocketSearchOp{socket_name,
SOCK_RGBA,
static_cast<NodeCompareOperation>(item->value)});
}
else if ((!string_type) ||
(string_type && ELEM(item->value, NODE_COMPARE_EQUAL, NODE_COMPARE_NOT_EQUAL))) {
params.add_item(IFACE_(item->name),
SocketSearchOp{socket_name,
mode_type,
static_cast<NodeCompareOperation>(item->value)});
const NodeCompareOperation operation = NodeCompareOperation(item->value);
if (const std::optional<eNodeSocketDatatype> fixed_type = get_compare_type_for_operation(
type, operation)) {
params.add_item(IFACE_(item->name), SocketSearchOp{socket_name, *fixed_type, operation});
}
}
}
/* Add Angle socket. */
if (!string_type && params.in_out() == SOCK_IN) {
if (params.in_out() != SOCK_IN && type != SOCK_STRING) {
params.add_item(
IFACE_("Angle"),
SocketSearchOp{

View File

@ -78,16 +78,14 @@ bool node_group_poll_instance(const bNode *node,
const bNodeTree *nodetree,
const char **disabled_hint)
{
if (node->typeinfo->poll(node->typeinfo, nodetree, disabled_hint)) {
const bNodeTree *grouptree = (const bNodeTree *)node->id;
if (grouptree) {
return nodeGroupPoll(nodetree, grouptree, disabled_hint);
}
return true; /* without a linked node tree, group node is always ok */
if (!node->typeinfo->poll(node->typeinfo, nodetree, disabled_hint)) {
return false;
}
return false;
const bNodeTree *grouptree = reinterpret_cast<const bNodeTree *>(node->id);
if (!grouptree) {
return true;
}
return nodeGroupPoll(nodetree, grouptree, disabled_hint);
}
bool nodeGroupPoll(const bNodeTree *nodetree,
@ -114,10 +112,9 @@ bool nodeGroupPoll(const bNodeTree *nodetree,
return false;
}
LISTBASE_FOREACH (const bNode *, node, &grouptree->nodes) {
for (const bNode *node : grouptree->all_nodes()) {
if (node->typeinfo->poll_instance &&
!node->typeinfo->poll_instance(
const_cast<bNode *>(node), const_cast<bNodeTree *>(nodetree), r_disabled_hint)) {
!node->typeinfo->poll_instance(node, nodetree, r_disabled_hint)) {
return false;
}
}

View File

@ -141,6 +141,9 @@ static int wm_link_append_flag(wmOperator *op)
if (RNA_boolean_get(op->ptr, "do_reuse_local_id")) {
flag |= BLO_LIBLINK_APPEND_LOCAL_ID_REUSE;
}
if (RNA_boolean_get(op->ptr, "clear_asset_data")) {
flag |= BLO_LIBLINK_APPEND_ASSET_DATA_CLEAR;
}
}
if (RNA_boolean_get(op->ptr, "instance_collections")) {
flag |= BLO_LIBLINK_COLLECTION_INSTANCE;
@ -400,6 +403,12 @@ static void wm_link_append_properties_common(wmOperatorType *ot, bool is_link)
"Re-Use Local Data",
"Try to re-use previously matching appended data-blocks instead of appending a new copy");
RNA_def_property_flag(prop, PROP_SKIP_SAVE | PROP_HIDDEN);
prop = RNA_def_boolean(ot->srna,
"clear_asset_data",
false,
"Clear Asset Data",
"Don't add asset meta-data or tags from the original data-block");
RNA_def_property_flag(prop, PROP_SKIP_SAVE | PROP_HIDDEN);
prop = RNA_def_boolean(ot->srna, "autoselect", true, "Select", "Select new objects");
RNA_def_property_flag(prop, PROP_SKIP_SAVE);