Vulkan: Clearing Framebuffer + Scissors #106044

Merged
Jeroen Bakker merged 49 commits from Jeroen-Bakker/blender:vulkan-framebuffer-clear into main 2023-03-28 11:51:45 +02:00
48 changed files with 502 additions and 385 deletions
Showing only changes of commit c5ec93cd7a - Show all commits

View File

@ -2664,13 +2664,7 @@ static void pointer_handle_enter(void *data,
/* Resetting scroll events is likely unnecessary,
* do this to avoid any possible problems as it's harmless. */
seat->pointer_scroll.smooth_xy[0] = 0;
seat->pointer_scroll.smooth_xy[1] = 0;
seat->pointer_scroll.discrete_xy[0] = 0;
seat->pointer_scroll.discrete_xy[1] = 0;
seat->pointer_scroll.inverted_xy[0] = false;
seat->pointer_scroll.inverted_xy[1] = false;
seat->pointer_scroll.axis_source = WL_POINTER_AXIS_SOURCE_WHEEL;
seat->pointer_scroll = GWL_SeatStatePointerScroll{};
seat->pointer.wl_surface_window = wl_surface;

View File

@ -450,10 +450,10 @@ void *MEM_guarded_mallocN(size_t len, const char *str)
#endif
return (++memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
mem_in_use);
return NULL;
}
@ -463,11 +463,11 @@ void *MEM_guarded_malloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Malloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)mem_in_use);
mem_in_use);
abort();
return NULL;
}
@ -523,10 +523,10 @@ void *MEM_guarded_mallocN_aligned(size_t len, size_t alignment, const char *str)
#endif
return (++memh);
}
print_error("aligned_malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("aligned_malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
mem_in_use);
return NULL;
}
@ -547,10 +547,10 @@ void *MEM_guarded_callocN(size_t len, const char *str)
#endif
return (++memh);
}
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)mem_in_use);
mem_in_use);
return NULL;
}
@ -560,11 +560,11 @@ void *MEM_guarded_calloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Calloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)mem_in_use);
mem_in_use);
abort();
return NULL;
}

View File

@ -213,10 +213,10 @@ void *MEM_lockfree_callocN(size_t len, const char *str)
return PTR_FROM_MEMHEAD(memh);
}
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Calloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)memory_usage_current());
memory_usage_current());
return NULL;
}
@ -226,11 +226,11 @@ void *MEM_lockfree_calloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Calloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(unsigned int)memory_usage_current());
memory_usage_current());
abort();
return NULL;
}
@ -256,10 +256,10 @@ void *MEM_lockfree_mallocN(size_t len, const char *str)
return PTR_FROM_MEMHEAD(memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)memory_usage_current());
memory_usage_current());
return NULL;
}
@ -269,11 +269,11 @@ void *MEM_lockfree_malloc_arrayN(size_t len, size_t size, const char *str)
if (UNLIKELY(!MEM_size_safe_multiply(len, size, &total_size))) {
print_error(
"Malloc array aborted due to integer overflow: "
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total %u\n",
"len=" SIZET_FORMAT "x" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
SIZET_ARG(size),
str,
(uint)memory_usage_current());
memory_usage_current());
abort();
return NULL;
}
@ -325,10 +325,10 @@ void *MEM_lockfree_mallocN_aligned(size_t len, size_t alignment, const char *str
return PTR_FROM_MEMHEAD(memh);
}
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total %u\n",
print_error("Malloc returns null: len=" SIZET_FORMAT " in %s, total " SIZET_FORMAT "\n",
SIZET_ARG(len),
str,
(uint)memory_usage_current());
memory_usage_current());
return NULL;
}

View File

@ -21,10 +21,7 @@ __all__ = (
"ImagePreviewCollection",
)
import _bpy
_utils_previews = _bpy._utils_previews
del _bpy
from _bpy import _utils_previews
_uuid_open = set()

View File

@ -379,7 +379,7 @@ class NODE_MT_geometry_node_GEO_MESH_OPERATIONS(Menu):
bl_idname = "NODE_MT_geometry_node_GEO_MESH_OPERATIONS"
bl_label = "Operations"
def draw(self, _context):
def draw(self, context):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeDualMesh")
node_add_menu.add_node_type(layout, "GeometryNodeEdgePathsToCurves")
@ -389,7 +389,7 @@ class NODE_MT_geometry_node_GEO_MESH_OPERATIONS(Menu):
node_add_menu.add_node_type(layout, "GeometryNodeMeshBoolean")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToCurve")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToPoints")
if _context.preferences.experimental.use_new_volume_nodes:
if context.preferences.experimental.use_new_volume_nodes:
node_add_menu.add_node_type(layout, "GeometryNodeMeshToSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeMeshToVolume")
node_add_menu.add_node_type(layout, "GeometryNodeScaleElements")
@ -448,14 +448,14 @@ class NODE_MT_category_GEO_POINT(Menu):
bl_idname = "NODE_MT_category_GEO_POINT"
bl_label = "Point"
def draw(self, _context):
def draw(self, context):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeDistributePointsInVolume")
node_add_menu.add_node_type(layout, "GeometryNodeDistributePointsOnFaces")
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodePoints")
node_add_menu.add_node_type(layout, "GeometryNodePointsToVertices")
if _context.preferences.experimental.use_new_volume_nodes:
if context.preferences.experimental.use_new_volume_nodes:
node_add_menu.add_node_type(layout, "GeometryNodePointsToSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodePointsToVolume")
layout.separator()
@ -593,11 +593,11 @@ class NODE_MT_category_GEO_VOLUME(Menu):
bl_idname = "NODE_MT_category_GEO_VOLUME"
bl_label = "Volume"
def draw(self, _context):
def draw(self, context):
layout = self.layout
node_add_menu.add_node_type(layout, "GeometryNodeVolumeCube")
node_add_menu.add_node_type(layout, "GeometryNodeVolumeToMesh")
if _context.preferences.experimental.use_new_volume_nodes:
if context.preferences.experimental.use_new_volume_nodes:
layout.separator()
node_add_menu.add_node_type(layout, "GeometryNodeMeanFilterSDFVolume")
node_add_menu.add_node_type(layout, "GeometryNodeOffsetSDFVolume")

View File

@ -104,26 +104,6 @@ void BKE_mesh_ensure_default_orig_index_customdata(struct Mesh *mesh);
*/
void BKE_mesh_ensure_default_orig_index_customdata_no_check(struct Mesh *mesh);
/**
* Find the index of the loop in 'poly' which references vertex,
* returns -1 if not found
*/
int poly_find_loop_from_vert(const struct MPoly *poly, const int *poly_verts, int vert);
/**
* Fill \a r_adj with the loop indices in \a poly adjacent to the
* vertex. Returns the index of the loop matching vertex, or -1 if the
* vertex is not in \a poly
*/
int poly_get_adj_loops_from_vert(const struct MPoly *poly,
const int *corner_verts,
int vert,
int r_adj[2]);
/**
* Return the index of the edge vert that is not equal to \a v. If
* neither edge vertex is equal to \a v, returns -1.
*/
int BKE_mesh_edge_other_vert(const struct MEdge *e, int v);
/**
* Sets each output array element to the edge index if it is a real edge, or -1.
*/

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0-or-later. */
/* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
@ -142,7 +142,71 @@ void edges_sharp_from_angle_set(Span<MPoly> polys,
const float split_angle,
MutableSpan<bool> sharp_edges);
} // namespace blender::bke::mesh
/** \} */
/* -------------------------------------------------------------------- */
/** \name Topology Queries
* \{ */
/**
* Find the index of the next corner in the polygon, looping to the start if necessary.
* The indices are into the entire corners array, not just the polygon's corners.
*/
inline int poly_corner_prev(const MPoly &poly, const int corner)
{
return corner - 1 + (corner == poly.loopstart) * poly.totloop;
}
/**
* Find the index of the previous corner in the polygon, looping to the end if necessary.
* The indices are into the entire corners array, not just the polygon's corners.
*/
inline int poly_corner_next(const MPoly &poly, const int corner)
{
if (corner == poly.loopstart + poly.totloop - 1) {
return poly.loopstart;
}
return corner + 1;
}
/**
* Find the index of the corner in the polygon that uses the given vertex.
* The index is into the entire corners array, not just the polygon's corners.
*/
inline int poly_find_corner_from_vert(const MPoly &poly,
const Span<int> corner_verts,
const int vert)
{
return poly.loopstart + corner_verts.slice(poly.loopstart, poly.totloop).first_index(vert);
}
/**
* Return the vertex indices on either side of the given vertex, ordered based on the winding
* direction of the polygon. The vertex must be in the polygon.
*/
inline int2 poly_find_adjecent_verts(const MPoly &poly,
const Span<int> corner_verts,
const int vert)
{
const int corner = poly_find_corner_from_vert(poly, corner_verts, vert);
return {corner_verts[poly_corner_prev(poly, corner)],
corner_verts[poly_corner_next(poly, corner)]};
}
/**
* Return the index of the edge's vertex that is not the \a vert.
* If neither edge vertex is equal to \a v, returns -1.
*/
inline int edge_other_vert(const MEdge &edge, const int vert)
{
if (edge.v1 == vert) {
return edge.v2;
}
if (edge.v2 == vert) {
return edge.v1;
}
return -1;
}
/** \} */
@ -150,6 +214,8 @@ void edges_sharp_from_angle_set(Span<MPoly> polys,
/** \name Inline Mesh Data Access
* \{ */
} // namespace blender::bke::mesh
inline blender::Span<blender::float3> Mesh::vert_positions() const
{
return {reinterpret_cast<const blender::float3 *>(BKE_mesh_vert_positions(this)), this->totvert};

View File

@ -363,18 +363,5 @@ Array<Vector<int, 2>> build_edge_to_poly_map(Span<MPoly> polys,
int edges_num);
Vector<Vector<int>> build_edge_to_loop_map_resizable(Span<int> corner_edges, int edges_num);
inline int poly_loop_prev(const MPoly &poly, int loop_i)
{
return loop_i - 1 + (loop_i == poly.loopstart) * poly.totloop;
}
inline int poly_loop_next(const MPoly &poly, int loop_i)
{
if (loop_i == poly.loopstart + poly.totloop - 1) {
return poly.loopstart;
}
return loop_i + 1;
}
} // namespace blender::bke::mesh_topology
#endif

View File

@ -51,6 +51,7 @@ struct PaletteColor;
struct Scene;
struct StrokeCache;
struct Sculpt;
struct SculptSession;
struct SubdivCCG;
struct Tex;
struct ToolSettings;
@ -563,6 +564,8 @@ typedef struct SculptAttributePointers {
SculptAttribute *dyntopo_node_id_face;
} SculptAttributePointers;
#ifdef __cplusplus
typedef struct SculptSession {
/* Mesh data (not copied) can come either directly from a Mesh, or from a MultiresDM */
struct { /* Special handling for multires meshes */
@ -576,8 +579,8 @@ typedef struct SculptSession {
/* These are always assigned to base mesh data when using PBVH_FACES and PBVH_GRIDS. */
float (*vert_positions)[3];
const struct MPoly *polys;
const int *corner_verts;
blender::Span<MPoly> polys;
blender::Span<int> corner_verts;
/* These contain the vertex and poly counts of the final mesh. */
int totvert, totpoly;
@ -758,12 +761,14 @@ typedef struct SculptSession {
bool islands_valid; /* Is attrs.topology_island_key valid? */
} SculptSession;
#endif
void BKE_sculptsession_free(struct Object *ob);
void BKE_sculptsession_free_deformMats(struct SculptSession *ss);
void BKE_sculptsession_free_vwpaint_data(struct SculptSession *ss);
void BKE_sculptsession_bm_to_me(struct Object *ob, bool reorder);
void BKE_sculptsession_bm_to_me_for_render(struct Object *object);
int BKE_sculptsession_vertex_count(const SculptSession *ss);
int BKE_sculptsession_vertex_count(const struct SculptSession *ss);
/* Ensure an attribute layer exists. */
SculptAttribute *BKE_sculpt_attribute_ensure(struct Object *ob,
@ -911,6 +916,11 @@ bool BKE_object_attributes_active_color_fill(struct Object *ob,
const float fill_color[4],
bool only_selected);
/** C accessor for #Object::sculpt::pbvh. */
struct PBVH *BKE_object_sculpt_pbvh_get(struct Object *object);
bool BKE_object_sculpt_use_dyntopo(const struct Object *object);
void BKE_object_sculpt_dyntopo_smooth_shading_set(struct Object *object, bool value);
/* paint_canvas.cc */
/**

View File

@ -37,6 +37,7 @@ struct PBVH;
struct PBVHBatches;
struct PBVHNode;
struct PBVH_GPU_Args;
struct SculptSession;
struct SubdivCCG;
struct TaskParallelSettings;
struct Image;

View File

@ -310,6 +310,7 @@ typedef enum SubdivCCGAdjacencyType {
SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int *corner_verts,
int corners_num,
const struct MPoly *mpoly,
int *r_v1,
int *r_v2);

View File

@ -2583,8 +2583,12 @@ const char *CustomData_get_render_layer_name(const CustomData *data, const int t
void CustomData_set_layer_active(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active = n;
}
}
@ -2592,8 +2596,12 @@ void CustomData_set_layer_active(CustomData *data, const int type, const int n)
void CustomData_set_layer_render(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active_rnd = n;
}
}
@ -2601,8 +2609,12 @@ void CustomData_set_layer_render(CustomData *data, const int type, const int n)
void CustomData_set_layer_clone(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active_clone = n;
}
}
@ -2610,8 +2622,12 @@ void CustomData_set_layer_clone(CustomData *data, const int type, const int n)
void CustomData_set_layer_stencil(CustomData *data, const int type, const int n)
{
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
BLI_assert(uint(n) < uint(layer_num));
data->layers[i].active_mask = n;
}
}
@ -2619,48 +2635,64 @@ void CustomData_set_layer_stencil(CustomData *data, const int type, const int n)
void CustomData_set_layer_active_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active = layer_index;
}
}
}
void CustomData_set_layer_render_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_rnd = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active_rnd = layer_index;
}
}
}
void CustomData_set_layer_clone_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_clone = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active_clone = layer_index;
}
}
}
void CustomData_set_layer_stencil_index(CustomData *data, const int type, const int n)
{
const int layer_index = data->typemap[type];
#ifndef NDEBUG
const int layer_num = CustomData_number_of_layers(data, type);
#endif
const int layer_index = n - data->typemap[type];
BLI_assert(customdata_typemap_is_valid(data));
for (int i = 0; i < data->totlayer; i++) {
if (data->layers[i].type == type) {
data->layers[i].active_mask = n - layer_index;
BLI_assert(uint(layer_index) < uint(layer_num));
data->layers[i].active_mask = layer_index;
}
}
}

View File

@ -606,7 +606,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
/* For every corner, mix the values from the adjacent edges on the face. */
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const int loop_index_prev = mesh::poly_corner_prev(poly, loop_index);
const int edge = corner_edges[loop_index];
const int edge_prev = corner_edges[loop_index_prev];
mixer.mix_in(loop_index, old_values[edge]);
@ -633,7 +633,7 @@ void adapt_mesh_domain_edge_to_corner_impl(const Mesh &mesh,
for (const int poly_index : range) {
const MPoly &poly = polys[poly_index];
for (const int loop_index : IndexRange(poly.loopstart, poly.totloop)) {
const int loop_index_prev = mesh_topology::poly_loop_prev(poly, loop_index);
const int loop_index_prev = mesh::poly_corner_prev(poly, loop_index);
const int edge = corner_edges[loop_index];
const int edge_prev = corner_edges[loop_index_prev];
if (old_values[edge] && old_values[edge_prev]) {

View File

@ -1505,45 +1505,6 @@ void BKE_mesh_auto_smooth_flag_set(Mesh *me,
}
}
int poly_find_loop_from_vert(const MPoly *poly, const int *poly_corner_verts, int vert)
{
for (int j = 0; j < poly->totloop; j++) {
if (poly_corner_verts[j] == vert) {
return j;
}
}
return -1;
}
int poly_get_adj_loops_from_vert(const MPoly *poly,
const int *corner_verts,
int vert,
int r_adj[2])
{
int corner = poly_find_loop_from_vert(poly, &corner_verts[poly->loopstart], vert);
if (corner != -1) {
/* vertex was found */
r_adj[0] = corner_verts[ME_POLY_LOOP_PREV(poly, corner)];
r_adj[1] = corner_verts[ME_POLY_LOOP_NEXT(poly, corner)];
}
return corner;
}
int BKE_mesh_edge_other_vert(const MEdge *edge, int v)
{
if (edge->v1 == v) {
return edge->v2;
}
if (edge->v2 == v) {
return edge->v1;
}
return -1;
}
void BKE_mesh_looptri_get_real_edges(const MEdge *edges,
const int *corner_verts,
const int *corner_edges,

View File

@ -237,11 +237,12 @@ class MeshFairingContext : public FairingContext {
float r_adj_next[3],
float r_adj_prev[3]) override
{
using namespace blender;
const int vert = corner_verts_[loop];
const MPoly &poly = polys[loop_to_poly_map_[loop]];
const int corner = poly_find_loop_from_vert(&poly, &corner_verts_[poly.loopstart], vert);
copy_v3_v3(r_adj_next, co_[corner_verts_[ME_POLY_LOOP_NEXT(&poly, corner)]]);
copy_v3_v3(r_adj_prev, co_[corner_verts_[ME_POLY_LOOP_PREV(&poly, corner)]]);
const int2 adjecent_verts = bke::mesh::poly_find_adjecent_verts(poly, corner_verts_, vert);
copy_v3_v3(r_adj_next, co_[adjecent_verts[0]]);
copy_v3_v3(r_adj_prev, co_[adjecent_verts[1]]);
}
int other_vertex_index_from_loop(const int loop, const uint v) override

View File

@ -1284,7 +1284,7 @@ static void loop_split_generator(TaskPool *pool, LoopSplitTaskDataCommon *common
const MPoly &poly = polys[poly_index];
for (const int ml_curr_index : IndexRange(poly.loopstart, poly.totloop)) {
const int ml_prev_index = mesh_topology::poly_loop_prev(poly, ml_curr_index);
const int ml_prev_index = mesh::poly_corner_prev(poly, ml_curr_index);
#if 0
printf("Checking loop %d / edge %u / vert %u (sharp edge: %d, skiploop: %d)",

View File

@ -740,13 +740,13 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
nearest.index = -1;
for (i = 0; i < numedges_dst; i++) {
const MEdge *e_dst = &edges_dst[i];
const MEdge &e_dst = edges_dst[i];
float best_totdist = FLT_MAX;
int best_eidx_src = -1;
int j = 2;
while (j--) {
const uint vidx_dst = j ? e_dst->v1 : e_dst->v2;
const uint vidx_dst = j ? e_dst.v1 : e_dst.v2;
/* Compute closest verts only once! */
if (v_dst_to_src_map[vidx_dst].hit_dist == -1.0f) {
@ -772,7 +772,7 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
/* Now, check all source edges of closest sources vertices,
* and select the one giving the smallest total verts-to-verts distance. */
for (j = 2; j--;) {
const uint vidx_dst = j ? e_dst->v1 : e_dst->v2;
const uint vidx_dst = j ? e_dst.v1 : e_dst.v2;
const float first_dist = v_dst_to_src_map[vidx_dst].hit_dist;
const int vidx_src = v_dst_to_src_map[vidx_dst].index;
int *eidx_src, k;
@ -785,10 +785,11 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
k = vert_to_edge_src_map[vidx_src].count;
for (; k--; eidx_src++) {
const MEdge *edge_src = &edges_src[*eidx_src];
const float *other_co_src = vcos_src[BKE_mesh_edge_other_vert(edge_src, vidx_src)];
const MEdge &edge_src = edges_src[*eidx_src];
const float *other_co_src =
vcos_src[blender::bke::mesh::edge_other_vert(edge_src, vidx_src)];
const float *other_co_dst =
vert_positions_dst[BKE_mesh_edge_other_vert(e_dst, int(vidx_dst))];
vert_positions_dst[blender::bke::mesh::edge_other_vert(e_dst, int(vidx_dst))];
const float totdist = first_dist + len_v3v3(other_co_src, other_co_dst);
if (totdist < best_totdist) {
@ -801,8 +802,8 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
if (best_eidx_src >= 0) {
const float *co1_src = vcos_src[edges_src[best_eidx_src].v1];
const float *co2_src = vcos_src[edges_src[best_eidx_src].v2];
const float *co1_dst = vert_positions_dst[e_dst->v1];
const float *co2_dst = vert_positions_dst[e_dst->v2];
const float *co1_dst = vert_positions_dst[e_dst.v1];
const float *co2_dst = vert_positions_dst[e_dst.v2];
float co_src[3], co_dst[3];
/* TODO: would need an isect_seg_seg_v3(), actually! */

View File

@ -929,16 +929,57 @@ static bool mesh_validate_customdata(CustomData *data,
PRINT_MSG("%s: Checking %d CD layers...\n", __func__, data->totlayer);
/* Set dummy values so the layer-type is always initialized on first access. */
int layer_num = -1;
int layer_num_type = -1;
while (i < data->totlayer) {
CustomDataLayer *layer = &data->layers[i];
bool ok = true;
/* Count layers when the type changes. */
if (layer_num_type != layer->type) {
layer_num = CustomData_number_of_layers(data, layer->type);
layer_num_type = layer->type;
}
/* Validate active index, for a time this could be set to a negative value, see: #105860. */
int *active_index_array[] = {
&layer->active,
&layer->active_rnd,
&layer->active_clone,
&layer->active_mask,
};
for (int *active_index : Span(active_index_array, ARRAY_SIZE(active_index_array))) {
if (*active_index < 0) {
PRINT_ERR("\tCustomDataLayer type %d has a negative active index (%d)\n",
layer->type,
*active_index);
if (do_fixes) {
*active_index = 0;
has_fixes = true;
}
}
else {
if (*active_index >= layer_num) {
PRINT_ERR("\tCustomDataLayer type %d has an out of bounds active index (%d >= %d)\n",
layer->type,
*active_index,
layer_num);
if (do_fixes) {
BLI_assert(layer_num > 0);
*active_index = layer_num - 1;
has_fixes = true;
}
}
}
}
if (CustomData_layertype_is_singleton(layer->type)) {
const int layer_tot = CustomData_number_of_layers(data, layer->type);
if (layer_tot > 1) {
if (layer_num > 1) {
PRINT_ERR("\tCustomDataLayer type %d is a singleton, found %d in Mesh structure\n",
layer->type,
layer_tot);
layer_num);
ok = false;
}
}

View File

@ -4357,7 +4357,7 @@ void BKE_object_handle_update(Depsgraph *depsgraph, Scene *scene, Object *ob)
void BKE_object_sculpt_data_create(Object *ob)
{
BLI_assert((ob->sculpt == nullptr) && (ob->mode & OB_MODE_ALL_SCULPT));
ob->sculpt = MEM_cnew<SculptSession>(__func__);
ob->sculpt = MEM_new<SculptSession>(__func__);
ob->sculpt->mode_type = (eObjectMode)ob->mode;
}

View File

@ -1696,16 +1696,16 @@ static void sculpt_update_object(
/* These are assigned to the base mesh in Multires. This is needed because Face Sets operators
* and tools use the Face Sets data from the base mesh when Multires is active. */
ss->vert_positions = BKE_mesh_vert_positions_for_write(me);
ss->polys = me->polys().data();
ss->corner_verts = me->corner_verts().data();
ss->polys = me->polys();
ss->corner_verts = me->corner_verts();
}
else {
ss->totvert = me->totvert;
ss->totpoly = me->totpoly;
ss->totfaces = me->totpoly;
ss->vert_positions = BKE_mesh_vert_positions_for_write(me);
ss->polys = me->polys().data();
ss->corner_verts = me->corner_verts().data();
ss->polys = me->polys();
ss->corner_verts = me->corner_verts();
ss->multires.active = false;
ss->multires.modifier = nullptr;
ss->multires.level = 0;
@ -1999,12 +1999,11 @@ int BKE_sculpt_mask_layers_ensure(Depsgraph *depsgraph,
int level = max_ii(1, mmd->sculptlvl);
int gridsize = BKE_ccg_gridsize(level);
int gridarea = gridsize * gridsize;
int i, j;
gmask = static_cast<GridPaintMask *>(
CustomData_add_layer(&me->ldata, CD_GRID_PAINT_MASK, CD_SET_DEFAULT, me->totloop));
for (i = 0; i < me->totloop; i++) {
for (int i = 0; i < me->totloop; i++) {
GridPaintMask *gpm = &gmask[i];
gpm->level = level;
@ -2012,29 +2011,28 @@ int BKE_sculpt_mask_layers_ensure(Depsgraph *depsgraph,
MEM_callocN(sizeof(float) * gridarea, "GridPaintMask.data"));
}
/* if vertices already have mask, copy into multires data */
/* If vertices already have mask, copy into multires data. */
if (paint_mask) {
for (i = 0; i < me->totpoly; i++) {
for (const int i : polys.index_range()) {
const MPoly &poly = polys[i];
float avg = 0;
/* mask center */
for (j = 0; j < poly.totloop; j++) {
const int vert = corner_verts[poly.loopstart + j];
/* Mask center. */
float avg = 0.0f;
for (const int vert : corner_verts.slice(poly.loopstart, poly.totloop)) {
avg += paint_mask[vert];
}
avg /= float(poly.totloop);
/* fill in multires mask corner */
for (j = 0; j < poly.totloop; j++) {
GridPaintMask *gpm = &gmask[poly.loopstart + j];
const int vert = corner_verts[poly.loopstart + j];
const int prev = ME_POLY_LOOP_PREV(&poly, j);
const int next = ME_POLY_LOOP_NEXT(&poly, j);
/* Fill in multires mask corner. */
for (const int corner : blender::IndexRange(poly.loopstart, poly.totloop)) {
GridPaintMask *gpm = &gmask[corner];
const int vert = corner_verts[corner];
const int prev = corner_verts[blender::bke::mesh::poly_corner_prev(poly, vert)];
const int next = corner_verts[blender::bke::mesh::poly_corner_next(poly, vert)];
gpm->data[0] = avg;
gpm->data[1] = (paint_mask[vert] + paint_mask[corner_verts[next]]) * 0.5f;
gpm->data[2] = (paint_mask[vert] + paint_mask[corner_verts[prev]]) * 0.5f;
gpm->data[1] = (paint_mask[vert] + paint_mask[next]) * 0.5f;
gpm->data[2] = (paint_mask[vert] + paint_mask[prev]) * 0.5f;
gpm->data[3] = paint_mask[vert];
}
}
@ -2285,6 +2283,24 @@ PBVH *BKE_sculpt_object_pbvh_ensure(Depsgraph *depsgraph, Object *ob)
return pbvh;
}
PBVH *BKE_object_sculpt_pbvh_get(Object *object)
{
if (!object->sculpt) {
return nullptr;
}
return object->sculpt->pbvh;
}
bool BKE_object_sculpt_use_dyntopo(const Object *object)
{
return object->sculpt && object->sculpt->bm;
}
void BKE_object_sculpt_dyntopo_smooth_shading_set(Object *object, const bool value)
{
object->sculpt->bm_smooth_shading = value;
}
void BKE_sculpt_bvh_update_from_ccg(PBVH *pbvh, SubdivCCG *subdiv_ccg)
{
CCGKey key;

View File

@ -1986,7 +1986,7 @@ const int *BKE_subdiv_ccg_start_face_grid_index_get(const SubdivCCG *subdiv_ccg)
static void adjacet_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int *corner_verts,
const blender::Span<int> corner_verts,
const MPoly *polys,
int *r_v1,
int *r_v2)
@ -1996,13 +1996,13 @@ static void adjacet_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_cc
const MPoly &poly = polys[poly_index];
*r_v1 = corner_verts[coord->grid_index];
const int corner = poly_find_loop_from_vert(&poly, &corner_verts[poly.loopstart], *r_v1);
const int corner = blender::bke::mesh::poly_find_corner_from_vert(poly, corner_verts, *r_v1);
if (coord->x == grid_size_1) {
const int next = ME_POLY_LOOP_NEXT(&poly, corner);
const int next = blender::bke::mesh::poly_corner_next(poly, corner);
*r_v2 = corner_verts[next];
}
if (coord->y == grid_size_1) {
const int prev = ME_POLY_LOOP_PREV(&poly, corner);
const int prev = blender::bke::mesh::poly_corner_prev(poly, corner);
*r_v2 = corner_verts[prev];
}
}
@ -2010,6 +2010,7 @@ static void adjacet_vertices_index_from_adjacent_edge(const SubdivCCG *subdiv_cc
SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const SubdivCCG *subdiv_ccg,
const SubdivCCGCoord *coord,
const int *corner_verts,
const int corners_num,
const MPoly *polys,
int *r_v1,
int *r_v2)
@ -2027,7 +2028,8 @@ SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const Subdi
return SUBDIV_CCG_ADJACENT_VERTEX;
}
/* Grid corner adjacent to the middle of a coarse mesh edge. */
adjacet_vertices_index_from_adjacent_edge(subdiv_ccg, coord, corner_verts, polys, r_v1, r_v2);
adjacet_vertices_index_from_adjacent_edge(
subdiv_ccg, coord, {corner_verts, corners_num}, polys, r_v1, r_v2);
return SUBDIV_CCG_ADJACENT_EDGE;
}
@ -2035,7 +2037,7 @@ SubdivCCGAdjacencyType BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(const Subdi
if (!is_inner_edge_grid_coordinate(subdiv_ccg, coord)) {
/* Grid boundary adjacent to a coarse mesh edge. */
adjacet_vertices_index_from_adjacent_edge(
subdiv_ccg, coord, corner_verts, polys, r_v1, r_v2);
subdiv_ccg, coord, {corner_verts, corners_num}, polys, r_v1, r_v2);
return SUBDIV_CCG_ADJACENT_EDGE;
}
}

View File

@ -5,10 +5,11 @@
* \brief Array storage to minimize duplication.
*
* This is done by splitting arrays into chunks and using copy-on-write (COW),
* to de-duplicate chunks,
* from the users perspective this is an implementation detail.
* to de-duplicate chunks, from the users perspective this is an implementation detail.
*
* Overview
* ========
*
* Data Structure
* --------------
*
@ -16,51 +17,52 @@
*
* \note The only 2 structures here which are referenced externally are the.
*
* - BArrayStore: The whole array store.
* - BArrayState: Represents a single state (array) of data.
* - #BArrayStore: The whole array store.
* - #BArrayState: Represents a single state (array) of data.
* These can be add using a reference state,
* while this could be considered the previous or parent state.
* no relationship is kept,
* so the caller is free to add any state from the same BArrayStore as a reference.
* so the caller is free to add any state from the same #BArrayStore as a reference.
*
* <pre>
* <+> BArrayStore: root data-structure,
* <+> #BArrayStore: root data-structure,
* | can store many 'states', which share memory.
* |
* | This can store many arrays, however they must share the same 'stride'.
* | Arrays of different types will need to use a new BArrayStore.
* | Arrays of different types will need to use a new #BArrayStore.
* |
* +- <+> states (Collection of BArrayState's):
* +- <+> states (Collection of #BArrayState's):
* | | Each represents an array added by the user of this API.
* | | and references a chunk_list (each state is a chunk_list user).
* | | Note that the list order has no significance.
* | |
* | +- <+> chunk_list (BChunkList):
* | +- <+> chunk_list (#BChunkList):
* | | The chunks that make up this state.
* | | Each state is a chunk_list user,
* | | avoids duplicating lists when there is no change between states.
* | |
* | +- chunk_refs (List of BChunkRef): Each chunk_ref links to a BChunk.
* | +- chunk_refs (List of #BChunkRef): Each chunk_ref links to a #BChunk.
* | Each reference is a chunk user,
* | avoids duplicating smaller chunks of memory found in multiple states.
* |
* +- info (BArrayInfo):
* +- info (#BArrayInfo):
* | Sizes and offsets for this array-store.
* | Also caches some variables for reuse.
* |
* +- <+> memory (BArrayMemory):
* | Memory pools for storing BArrayStore data.
* +- <+> memory (#BArrayMemory):
* | Memory pools for storing #BArrayStore data.
* |
* +- chunk_list (Pool of BChunkList):
* | All chunk_lists, (reference counted, used by BArrayState).
* +- chunk_list (Pool of #BChunkList):
* | All chunk_lists, (reference counted, used by #BArrayState).
* |
* +- chunk_ref (Pool of BChunkRef):
* | All chunk_refs (link between BChunkList & BChunk).
* +- chunk_ref (Pool of #BChunkRef):
* | All chunk_refs (link between #BChunkList & #BChunk).
* |
* +- chunks (Pool of BChunk):
* All chunks, (reference counted, used by BChunkList).
* +- chunks (Pool of #BChunk):
* All chunks, (reference counted, used by #BChunkList).
* These have their headers hashed for reuse so we can quickly check for duplicates.
* </pre>
*
* De-Duplication
* --------------
*
@ -71,7 +73,7 @@
* For identical arrays this is all that's needed.
*
* De-duplication is performed on any remaining chunks, by hashing the first few bytes of the chunk
* (see: BCHUNK_HASH_TABLE_ACCUMULATE_STEPS).
* (see: #BCHUNK_HASH_TABLE_ACCUMULATE_STEPS).
*
* \note This is cached for reuse since the referenced data never changes.
*
@ -93,9 +95,9 @@
#include "BLI_strict_flags.h"
#include "BLI_array_store.h" /* own include */
#include "BLI_array_store.h" /* Own include. */
/* only for BLI_array_store_is_valid */
/* Only for #BLI_array_store_is_valid. */
#include "BLI_ghash.h"
/* -------------------------------------------------------------------- */
@ -169,7 +171,7 @@
#endif
/**
* Calculate the key once and reuse it
* Calculate the key once and reuse it.
*/
#define USE_HASH_TABLE_KEY_CACHE
#ifdef USE_HASH_TABLE_KEY_CACHE
@ -219,7 +221,7 @@
# define BCHUNK_SIZE_MAX_MUL 2
#endif /* USE_MERGE_CHUNKS */
/** Slow (keep disabled), but handy for debugging */
/** Slow (keep disabled), but handy for debugging. */
// #define USE_VALIDATE_LIST_SIZE
// #define USE_VALIDATE_LIST_DATA_PARTIAL
@ -238,9 +240,9 @@ typedef struct BArrayInfo {
size_t chunk_stride;
// uint chunk_count; /* UNUSED (other values are derived from this) */
/* pre-calculated */
/* Pre-calculated. */
size_t chunk_byte_size;
/* min/max limits (inclusive) */
/* Min/max limits (inclusive) */
size_t chunk_byte_size_min;
size_t chunk_byte_size_max;
/**
@ -255,19 +257,19 @@ typedef struct BArrayInfo {
} BArrayInfo;
typedef struct BArrayMemory {
BLI_mempool *chunk_list; /* BChunkList */
BLI_mempool *chunk_ref; /* BChunkRef */
BLI_mempool *chunk; /* BChunk */
BLI_mempool *chunk_list; /* #BChunkList. */
BLI_mempool *chunk_ref; /* #BChunkRef. */
BLI_mempool *chunk; /* #BChunk. */
} BArrayMemory;
/**
* Main storage for all states
* Main storage for all states.
*/
struct BArrayStore {
/* static */
/* Static. */
BArrayInfo info;
/* memory storage */
/** Memory storage. */
BArrayMemory memory;
/**
@ -287,14 +289,14 @@ struct BArrayStore {
* it makes it easier to trace invalid usage, so leave as-is for now.
*/
struct BArrayState {
/** linked list in #BArrayStore.states */
/** linked list in #BArrayStore.states. */
struct BArrayState *next, *prev;
/** Shared chunk list, this reference must hold a #BChunkList::users. */
struct BChunkList *chunk_list;
};
typedef struct BChunkList {
/** List of #BChunkRef's */
/** List of #BChunkRef's. */
ListBase chunk_refs;
/** Result of `BLI_listbase_count(chunks)`, store for reuse. */
uint chunk_refs_len;
@ -466,15 +468,15 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
{
BChunkRef *cref = chunk_list->chunk_refs.last;
if (cref && cref->prev) {
/* both are decref'd after use (end of this block) */
/* Both are decref'd after use (end of this block) */
BChunk *chunk_curr = cref->link;
BChunk *chunk_prev = cref->prev->link;
if (MIN2(chunk_prev->data_len, chunk_curr->data_len) < info->chunk_byte_size_min) {
const size_t data_merge_len = chunk_prev->data_len + chunk_curr->data_len;
/* we could pass, but no need */
/* We could pass, but no need. */
if (data_merge_len <= info->chunk_byte_size_max) {
/* we have enough space to merge */
/* We have enough space to merge. */
/* Remove last from the linked-list. */
BLI_assert(chunk_list->chunk_refs.last != chunk_list->chunk_refs.first);
@ -498,10 +500,10 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
*
* if we do, the code below works (test by setting 'BCHUNK_SIZE_MAX_MUL = 1.2') */
/* keep chunk on the left hand side a regular size */
/* Keep chunk on the left hand side a regular size. */
const size_t split = info->chunk_byte_size;
/* merge and split */
/* Merge and split. */
const size_t data_prev_len = split;
const size_t data_curr_len = data_merge_len - split;
uchar *data_prev = MEM_mallocN(data_prev_len, __func__);
@ -510,10 +512,10 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
if (data_prev_len <= chunk_prev->data_len) {
const size_t data_curr_shrink_len = chunk_prev->data_len - data_prev_len;
/* setup 'data_prev' */
/* Setup 'data_prev'. */
memcpy(data_prev, chunk_prev->data, data_prev_len);
/* setup 'data_curr' */
/* Setup 'data_curr'. */
memcpy(data_curr, &chunk_prev->data[data_prev_len], data_curr_shrink_len);
memcpy(&data_curr[data_curr_shrink_len], chunk_curr->data, chunk_curr->data_len);
}
@ -523,11 +525,11 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
const size_t data_prev_grow_len = data_prev_len - chunk_prev->data_len;
/* setup 'data_prev' */
/* Setup 'data_prev'. */
memcpy(data_prev, chunk_prev->data, chunk_prev->data_len);
memcpy(&data_prev[chunk_prev->data_len], chunk_curr->data, data_prev_grow_len);
/* setup 'data_curr' */
/* Setup 'data_curr'. */
memcpy(data_curr, &chunk_curr->data[data_prev_grow_len], data_curr_len);
}
@ -538,7 +540,7 @@ static void bchunk_list_ensure_min_size_last(const BArrayInfo *info,
cref->link->users += 1;
}
/* free zero users */
/* Free zero users. */
bchunk_decref(bs_mem, chunk_curr);
bchunk_decref(bs_mem, chunk_prev);
}
@ -563,8 +565,7 @@ static void bchunk_list_calc_trim_len(const BArrayInfo *info,
size_t data_trim_len = data_len;
#ifdef USE_MERGE_CHUNKS
/* avoid creating too-small chunks
* more efficient than merging after */
/* Avoid creating too-small chunks more efficient than merging after. */
if (data_len > info->chunk_byte_size) {
data_last_chunk_len = (data_trim_len % info->chunk_byte_size);
data_trim_len = data_trim_len - data_last_chunk_len;
@ -626,7 +627,7 @@ static void bchunk_list_append_data(const BArrayInfo *info,
if (MIN2(chunk_prev->data_len, data_len) < info->chunk_byte_size_min) {
const size_t data_merge_len = chunk_prev->data_len + data_len;
/* realloc for single user */
/* Re-allocate for single user. */
if (cref->link->users == 1) {
uchar *data_merge = MEM_reallocN((void *)cref->link->data, data_merge_len);
memcpy(&data_merge[chunk_prev->data_len], data, data_len);
@ -651,7 +652,7 @@ static void bchunk_list_append_data(const BArrayInfo *info,
BChunk *chunk = bchunk_new_copydata(bs_mem, data, data_len);
bchunk_list_append_only(bs_mem, chunk_list, chunk);
/* don't run this, instead preemptively avoid creating a chunk only to merge it (above). */
/* Don't run this, instead preemptively avoid creating a chunk only to merge it (above). */
#if 0
# ifdef USE_MERGE_CHUNKS
bchunk_list_ensure_min_size_last(info, bs_mem, chunk_list);
@ -698,8 +699,7 @@ static void bchunk_list_append_data_n(const BArrayInfo *info,
}
}
else {
/* if we didn't write any chunks previously,
* we may need to merge with the last. */
/* If we didn't write any chunks previously, we may need to merge with the last. */
if (data_last_chunk_len) {
bchunk_list_append_data(info, bs_mem, chunk_list, data, data_last_chunk_len);
// i_prev = data_len; /* UNUSED */
@ -760,7 +760,7 @@ static void bchunk_list_fill_from_array(const BArrayInfo *info,
}
#endif
/* works but better avoid redundant re-alloc */
/* Works but better avoid redundant re-allocation. */
#if 0
# ifdef USE_MERGE_CHUNKS
bchunk_list_ensure_min_size_last(info, bs_mem, chunk_list);
@ -774,7 +774,7 @@ static void bchunk_list_fill_from_array(const BArrayInfo *info,
/** \} */
/*
* Internal Table Lookup Functions
* Internal Table Lookup Functions.
*/
/* -------------------------------------------------------------------- */
@ -790,7 +790,7 @@ BLI_INLINE hash_key hash_data_single(const uchar p)
return ((HASH_INIT << 5) + HASH_INIT) + (hash_key)(*((signed char *)&p));
}
/* hash bytes, from BLI_ghashutil_strhash_n */
/* Hash bytes, from #BLI_ghashutil_strhash_n. */
static hash_key hash_data(const uchar *key, size_t n)
{
const signed char *p;
@ -817,14 +817,14 @@ static void hash_array_from_data(const BArrayInfo *info,
}
}
else {
/* fast-path for bytes */
/* Fast-path for bytes. */
for (size_t i = 0; i < data_slice_len; i++) {
hash_array[i] = hash_data_single(data_slice[i]);
}
}
}
/*
/**
* Similar to hash_array_from_data,
* but able to step into the next chunk if we run-out of data.
*/
@ -849,7 +849,7 @@ static void hash_array_from_cref(const BArrayInfo *info,
} while ((i < hash_array_len) && (cref != NULL));
/* If this isn't equal, the caller didn't properly check
* that there was enough data left in all chunks */
* that there was enough data left in all chunks. */
BLI_assert(i == hash_array_len);
}
@ -886,11 +886,11 @@ static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len,
{
BLI_assert(iter_steps <= hash_array_len);
if (UNLIKELY(!(iter_steps <= hash_array_len))) {
/* while this shouldn't happen, avoid crashing */
/* While this shouldn't happen, avoid crashing. */
iter_steps = hash_array_len;
}
/* We can increase this value each step to avoid accumulating quite as much
* while getting the same results as hash_accum */
* while getting the same results as hash_accum. */
size_t iter_steps_sub = iter_steps;
while (iter_steps != 0) {
@ -906,11 +906,11 @@ static void hash_accum_single(hash_key *hash_array, const size_t hash_array_len,
static hash_key key_from_chunk_ref(const BArrayInfo *info,
const BChunkRef *cref,
/* avoid reallocating each time */
/* Avoid reallocating each time. */
hash_key *hash_store,
const size_t hash_store_len)
{
/* in C, will fill in a reusable array */
/* In C, will fill in a reusable array. */
BChunk *chunk = cref->link;
BLI_assert((info->accum_read_ahead_bytes * info->chunk_stride) != 0);
@ -921,14 +921,14 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info,
key = chunk->key;
if (key != HASH_TABLE_KEY_UNSET) {
/* Using key cache!
* avoids calculating every time */
* avoids calculating every time. */
}
else {
hash_array_from_cref(info, cref, info->accum_read_ahead_bytes, hash_store);
hash_accum_single(hash_store, hash_store_len, info->accum_steps);
key = hash_store[0];
/* cache the key */
/* Cache the key. */
if (UNLIKELY(key == HASH_TABLE_KEY_UNSET)) {
key = HASH_TABLE_KEY_FALLBACK;
}
@ -941,7 +941,7 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info,
# endif
return key;
}
/* corner case - we're too small, calculate the key each time. */
/* Corner case - we're too small, calculate the key each time. */
hash_array_from_cref(info, cref, info->accum_read_ahead_bytes, hash_store);
hash_accum_single(hash_store, hash_store_len, info->accum_steps);
@ -964,8 +964,8 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
const size_t offset,
const hash_key *table_hash_array)
{
hash_key key = table_hash_array[((offset - i_table_start) / info->chunk_stride)];
uint key_index = (uint)(key % (hash_key)table_len);
const hash_key key = table_hash_array[((offset - i_table_start) / info->chunk_stride)];
const uint key_index = (uint)(key % (hash_key)table_len);
const BTableRef *tref = table[key_index];
if (tref != NULL) {
const size_t size_left = data_len - offset;
@ -978,7 +978,7 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare_unchecked(chunk_test, data, data_len, offset)) {
/* we could remove the chunk from the table, to avoid multiple hits */
/* We could remove the chunk from the table, to avoid multiple hits. */
return cref;
}
}
@ -990,7 +990,7 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
#else /* USE_HASH_TABLE_ACCUMULATE */
/* NON USE_HASH_TABLE_ACCUMULATE code (simply hash each chunk) */
/* NON USE_HASH_TABLE_ACCUMULATE code (simply hash each chunk). */
static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref)
{
@ -1002,10 +1002,10 @@ static hash_key key_from_chunk_ref(const BArrayInfo *info, const BChunkRef *cref
key = chunk->key;
if (key != HASH_TABLE_KEY_UNSET) {
/* Using key cache!
* avoids calculating every time */
* avoids calculating every time. */
}
else {
/* cache the key */
/* Cache the key. */
key = hash_data(chunk->data, data_hash_len);
if (key == HASH_TABLE_KEY_UNSET) {
key = HASH_TABLE_KEY_FALLBACK;
@ -1030,9 +1030,9 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
{
const size_t data_hash_len = BCHUNK_HASH_LEN * info->chunk_stride; /* TODO: cache. */
size_t size_left = data_len - offset;
hash_key key = hash_data(&data[offset], MIN2(data_hash_len, size_left));
uint key_index = (uint)(key % (hash_key)table_len);
const size_t size_left = data_len - offset;
const hash_key key = hash_data(&data[offset], MIN2(data_hash_len, size_left));
const uint key_index = (uint)(key % (hash_key)table_len);
for (BTableRef *tref = table[key_index]; tref; tref = tref->next) {
const BChunkRef *cref = tref->cref;
# ifdef USE_HASH_TABLE_KEY_CACHE
@ -1042,7 +1042,7 @@ static const BChunkRef *table_lookup(const BArrayInfo *info,
BChunk *chunk_test = cref->link;
if (chunk_test->data_len <= size_left) {
if (bchunk_data_compare_unchecked(chunk_test, data, data_len, offset)) {
/* we could remove the chunk from the table, to avoid multiple hits */
/* We could remove the chunk from the table, to avoid multiple hits. */
return cref;
}
}
@ -1118,7 +1118,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#endif /* USE_FASTPATH_CHUNKS_FIRST */
/* Copy until we have a mismatch */
/* Copy until we have a mismatch. */
BChunkList *chunk_list = bchunk_list_new(bs_mem, data_len_original);
if (cref_match_first != NULL) {
size_t chunk_size_step = 0;
@ -1134,7 +1134,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
}
cref = cref->next;
}
/* happens when bytes are removed from the end of the array */
/* Happens when bytes are removed from the end of the array. */
if (chunk_size_step == data_len_original) {
return chunk_list;
}
@ -1148,17 +1148,16 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
/* ------------------------------------------------------------------------
* Fast-Path for end chunks
*
* Check for trailing chunks
* Check for trailing chunks.
*/
/* In this case use 'chunk_list_reference_last' to define the last index
* index_match_last = -1 */
* `index_match_last = -1`. */
/* warning, from now on don't use len(data)
* since we want to ignore chunks already matched */
/* Warning, from now on don't use len(data) since we want to ignore chunks already matched. */
size_t data_len = data_len_original;
#define data_len_original invalid_usage
#ifdef data_len_original /* quiet warning */
#ifdef data_len_original /* Quiet warning. */
#endif
const BChunkRef *chunk_list_reference_last = NULL;
@ -1198,7 +1197,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#ifdef USE_ALIGN_CHUNKS_TEST
if (chunk_list->total_expanded_size == chunk_list_reference->total_expanded_size) {
/* if we're already a quarter aligned */
/* If we're already a quarter aligned. */
if (data_len - i_prev <= chunk_list->total_expanded_size / 4) {
use_aligned = true;
}
@ -1212,7 +1211,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
* ----------------------- */
if (use_aligned) {
/* Copy matching chunks, creates using the same 'layout' as the reference */
/* Copy matching chunks, creates using the same 'layout' as the reference. */
const BChunkRef *cref = cref_match_first ? cref_match_first->next :
chunk_list_reference->chunk_refs.first;
while (i_prev != data_len) {
@ -1241,12 +1240,12 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
(chunk_list_reference->chunk_refs.first != NULL)) {
/* --------------------------------------------------------------------
* Non-Aligned Chunk De-Duplication */
* Non-Aligned Chunk De-Duplication. */
/* only create a table if we have at least one chunk to search
/* Only create a table if we have at least one chunk to search
* otherwise just make a new one.
*
* Support re-arranged chunks */
* Support re-arranged chunks. */
#ifdef USE_HASH_TABLE_ACCUMULATE
size_t i_table_start = i_prev;
@ -1257,7 +1256,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
hash_accum(table_hash_array, table_hash_array_len, info->accum_steps);
#else
/* dummy vars */
/* Dummy vars. */
uint i_table_start = 0;
hash_key *table_hash_array = NULL;
#endif
@ -1272,8 +1271,8 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
const size_t table_len = chunk_list_reference_remaining_len * BCHUNK_HASH_TABLE_MUL;
BTableRef **table = MEM_callocN(table_len * sizeof(*table), __func__);
/* table_make - inline
* include one matching chunk, to allow for repeating values */
/* Table_make - inline
* include one matching chunk, to allow for repeating values. */
{
#ifdef USE_HASH_TABLE_ACCUMULATE
const size_t hash_store_len = info->accum_read_ahead_len;
@ -1315,19 +1314,31 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
hash_store_len
#endif
);
uint key_index = (uint)(key % (hash_key)table_len);
const uint key_index = (uint)(key % (hash_key)table_len);
BTableRef *tref_prev = table[key_index];
BLI_assert(table_ref_stack_n < chunk_list_reference_remaining_len);
#ifdef USE_HASH_TABLE_DEDUPLICATE
bool is_duplicate = false;
for (BTableRef *tref_iter = tref_prev; tref_iter; tref_iter = tref_iter->next) {
if ((cref->link->data_len == tref_iter->cref->link->data_len) &&
(memcmp(cref->link->data,
tref_iter->cref->link->data,
tref_iter->cref->link->data_len) == 0)) {
is_duplicate = true;
break;
}
if (tref_prev) {
const BChunk *chunk_a = cref->link;
const BTableRef *tref = tref_prev;
do {
const BChunk *chunk_b = tref->cref->link;
# ifdef USE_HASH_TABLE_KEY_CACHE
if (key == chunk_b->key)
# endif
{
/* Not an error, it just isn't expected, in the case chunks are shared
* matching chunks should also be skipped to avoid a redundant `memcmp` call. */
BLI_assert(chunk_a != chunk_b);
if (chunk_a->data_len == chunk_b->data_len) {
if (memcmp(chunk_a->data, chunk_b->data, chunk_a->data_len) == 0) {
is_duplicate = true;
break;
}
}
}
} while ((tref = tref->next));
}
if (!is_duplicate)
@ -1349,7 +1360,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
MEM_freeN(hash_store);
#endif
}
/* done making the table */
/* Done making the table. */
BLI_assert(i_prev <= data_len);
for (size_t i = i_prev; i < data_len;) {
@ -1364,7 +1375,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
i_prev = i;
}
/* now add the reference chunk */
/* Now add the reference chunk. */
{
BChunk *chunk_found = cref_found->link;
i += chunk_found->data_len;
@ -1375,7 +1386,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
ASSERT_CHUNKLIST_SIZE(chunk_list, i_prev);
ASSERT_CHUNKLIST_DATA(chunk_list, data);
/* its likely that the next chunk in the list will be a match, so check it! */
/* Its likely that the next chunk in the list will be a match, so check it! */
while (!ELEM(cref_found->next, NULL, chunk_list_reference_last)) {
cref_found = cref_found->next;
BChunk *chunk_found = cref_found->link;
@ -1385,7 +1396,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
* repeating memory where it would be useful to re-use chunks. */
i += chunk_found->data_len;
bchunk_list_append(info, bs_mem, chunk_list, chunk_found);
/* chunk_found may be freed! */
/* Chunk_found may be freed! */
i_prev = i;
BLI_assert(i_prev <= data_len);
ASSERT_CHUNKLIST_SIZE(chunk_list, i_prev);
@ -1428,14 +1439,13 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
#ifdef USE_FASTPATH_CHUNKS_LAST
if (chunk_list_reference_last != NULL) {
/* write chunk_list_reference_last since it hasn't been written yet */
/* Write chunk_list_reference_last since it hasn't been written yet. */
const BChunkRef *cref = chunk_list_reference_last;
while (cref != NULL) {
BChunk *chunk = cref->link;
// BLI_assert(bchunk_data_compare(chunk, data, data_len, i_prev));
i_prev += chunk->data_len;
/* use simple since we assume the references chunks
* have already been sized correctly. */
/* Use simple since we assume the references chunks have already been sized correctly. */
bchunk_list_append_only(bs_mem, chunk_list, chunk);
ASSERT_CHUNKLIST_DATA(chunk_list, data);
cref = cref->next;
@ -1447,7 +1457,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
BLI_assert(i_prev == data_len_original);
/* check we're the correct size and that we didn't accidentally modify the reference */
/* Check we're the correct size and that we didn't accidentally modify the reference. */
ASSERT_CHUNKLIST_SIZE(chunk_list, data_len_original);
ASSERT_CHUNKLIST_SIZE(chunk_list_reference, chunk_list_reference->total_expanded_size);
@ -1455,7 +1465,7 @@ static BChunkList *bchunk_list_from_data_merge(const BArrayInfo *info,
return chunk_list;
}
/* end private API */
/* End private API. */
/** \} */
@ -1509,7 +1519,7 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
bs->memory.chunk_list = BLI_mempool_create(sizeof(BChunkList), 0, 512, BLI_MEMPOOL_NOP);
bs->memory.chunk_ref = BLI_mempool_create(sizeof(BChunkRef), 0, 512, BLI_MEMPOOL_NOP);
/* allow iteration to simplify freeing, otherwise its not needed
/* Allow iteration to simplify freeing, otherwise its not needed
* (we could loop over all states as an alternative). */
bs->memory.chunk = BLI_mempool_create(sizeof(BChunk), 0, 512, BLI_MEMPOOL_ALLOW_ITER);
@ -1520,7 +1530,7 @@ BArrayStore *BLI_array_store_create(uint stride, uint chunk_count)
static void array_store_free_data(BArrayStore *bs)
{
/* free chunk data */
/* Free chunk data. */
{
BLI_mempool_iter iter;
BChunk *chunk;
@ -1531,7 +1541,7 @@ static void array_store_free_data(BArrayStore *bs)
}
}
/* free states */
/* Free states. */
for (BArrayState *state = bs->states.first, *state_next; state; state = state_next) {
state_next = state->next;
MEM_freeN(state);
@ -1599,7 +1609,7 @@ BArrayState *BLI_array_store_state_add(BArrayStore *bs,
const size_t data_len,
const BArrayState *state_reference)
{
/* ensure we're aligned to the stride */
/* Ensure we're aligned to the stride. */
BLI_assert((data_len % bs->info.chunk_stride) == 0);
#ifdef USE_PARANOID_CHECKS
@ -1614,7 +1624,7 @@ BArrayState *BLI_array_store_state_add(BArrayStore *bs,
&bs->memory,
(const uchar *)data,
data_len,
/* re-use reference chunks */
/* Re-use reference chunks. */
state_reference->chunk_list);
}
else {
@ -1691,7 +1701,7 @@ void *BLI_array_store_state_data_get_alloc(BArrayState *state, size_t *r_data_le
/** \name Debugging API (for testing).
* \{ */
/* only for test validation */
/* Only for test validation. */
static size_t bchunk_list_size(const BChunkList *chunk_list)
{
size_t total_expanded_size = 0;
@ -1719,7 +1729,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
}
#ifdef USE_MERGE_CHUNKS
/* ensure we merge all chunks that could be merged */
/* Ensure we merge all chunks that could be merged. */
if (chunk_list->total_expanded_size > bs->info.chunk_byte_size_min) {
LISTBASE_FOREACH (BChunkRef *, cref, &chunk_list->chunk_refs) {
if (cref->link->data_len < bs->info.chunk_byte_size_min) {
@ -1758,7 +1768,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
} \
((void)0)
/* count chunk_list's */
/* Count chunk_list's. */
GHash *chunk_list_map = BLI_ghash_ptr_new(__func__);
GHash *chunk_map = BLI_ghash_ptr_new(__func__);
@ -1779,7 +1789,7 @@ bool BLI_array_store_is_valid(BArrayStore *bs)
goto user_finally;
}
/* count chunk's */
/* Count chunk's. */
GHASH_ITER (gh_iter, chunk_list_map) {
const struct BChunkList *chunk_list = BLI_ghashIterator_getKey(&gh_iter);
LISTBASE_FOREACH (const BChunkRef *, cref, &chunk_list->chunk_refs) {

View File

@ -222,10 +222,10 @@ static void basic_cache_populate(void *vedata, Object *ob)
}
}
if (G.debug_value == 889 && ob->sculpt && ob->sculpt->pbvh) {
if (G.debug_value == 889 && ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
int debug_node_nr = 0;
DRW_debug_modelmat(ob->object_to_world);
BKE_pbvh_draw_debug_cb(ob->sculpt->pbvh, DRW_sculpt_debug_cb, &debug_node_nr);
BKE_pbvh_draw_debug_cb(BKE_object_sculpt_pbvh_get(ob), DRW_sculpt_debug_cb, &debug_node_nr);
}
}
}

View File

@ -814,8 +814,8 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
bool use_sculpt_pbvh = BKE_sculptsession_use_pbvh_draw(ob, draw_ctx->rv3d) &&
!DRW_state_is_image_render();
if (ob->sculpt && ob->sculpt->pbvh) {
BKE_pbvh_is_drawing_set(ob->sculpt->pbvh, use_sculpt_pbvh);
if (ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
BKE_pbvh_is_drawing_set(BKE_object_sculpt_pbvh_get(ob), use_sculpt_pbvh);
}
/* First get materials for this mesh. */
@ -887,10 +887,11 @@ void EEVEE_materials_cache_populate(EEVEE_Data *vedata,
}
}
if (G.debug_value == 889 && ob->sculpt && ob->sculpt->pbvh) {
if (G.debug_value == 889 && ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
int debug_node_nr = 0;
DRW_debug_modelmat(ob->object_to_world);
BKE_pbvh_draw_debug_cb(ob->sculpt->pbvh, DRW_sculpt_debug_cb, &debug_node_nr);
BKE_pbvh_draw_debug_cb(
BKE_object_sculpt_pbvh_get(ob), DRW_sculpt_debug_cb, &debug_node_nr);
}
}

View File

@ -1,3 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "eevee_defines.hh"
#include "gpu_shader_create_info.hh"

View File

@ -277,8 +277,8 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
* of vertex color arrays from being sent to the GPU (e.g.
* when switching from eevee to workbench).
*/
if (ob->sculpt && ob->sculpt->pbvh) {
BKE_pbvh_is_drawing_set(ob->sculpt->pbvh, is_sculpt_pbvh);
if (ob->sculpt && BKE_object_sculpt_pbvh_get(ob)) {
BKE_pbvh_is_drawing_set(BKE_object_sculpt_pbvh_get(ob), is_sculpt_pbvh);
}
bool has_color = false;
@ -334,7 +334,7 @@ static eV3DShadingColorType workbench_color_type_get(WORKBENCH_PrivateData *wpd,
}
if (is_sculpt_pbvh && color_type == V3D_SHADING_TEXTURE_COLOR &&
BKE_pbvh_type(ob->sculpt->pbvh) != PBVH_FACES) {
BKE_pbvh_type(BKE_object_sculpt_pbvh_get(ob)) != PBVH_FACES) {
/* Force use of material color for sculpt. */
color_type = V3D_SHADING_MATERIAL_COLOR;
}

View File

@ -32,8 +32,8 @@ static bool ui_view_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
{
const ARegion *region = CTX_wm_region(C);
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(
region, event->xy);
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region,
event->xy);
if (!drop_target) {
return false;
}
@ -49,8 +49,7 @@ static bool ui_view_drop_poll(bContext *C, wmDrag *drag, const wmEvent *event)
static char *ui_view_drop_tooltip(bContext *C, wmDrag *drag, const int xy[2], wmDropBox * /*drop*/)
{
const ARegion *region = CTX_wm_region(C);
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region,
xy);
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region, xy);
return drop_target_tooltip(*drop_target, *drag);
}

View File

@ -2374,8 +2374,8 @@ static int ui_view_drop_invoke(bContext *C, wmOperator * /*op*/, const wmEvent *
}
const ARegion *region = CTX_wm_region(C);
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(
region, event->xy);
std::unique_ptr<DropTargetInterface> drop_target = region_views_find_drop_target_at(region,
event->xy);
if (!drop_target_apply_drop(
*C, *drop_target, *static_cast<const ListBase *>(event->customdata))) {

View File

@ -373,8 +373,6 @@ static bool wm_collada_export_check(bContext *UNUSED(C), wmOperator *op)
void WM_OT_collada_export(wmOperatorType *ot)
{
struct StructRNA *func = ot->srna;
static const EnumPropertyItem prop_bc_export_mesh_type[] = {
{BC_MESH_TYPE_VIEW, "view", 0, "Viewport", "Apply modifier's viewport settings"},
{BC_MESH_TYPE_RENDER, "render", 0, "Render", "Apply modifier's render settings"},
@ -456,20 +454,20 @@ void WM_OT_collada_export(wmOperatorType *ot)
FILE_DEFAULTDISPLAY,
FILE_SORT_DEFAULT);
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"prop_bc_export_ui_section",
prop_bc_export_ui_section,
0,
"Export Section",
"Only for User Interface organization");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"apply_modifiers",
0,
"Apply Modifiers",
"Apply modifiers to exported mesh (non destructive))");
RNA_def_int(func,
RNA_def_int(ot->srna,
"export_mesh_type",
0,
INT_MIN,
@ -479,83 +477,83 @@ void WM_OT_collada_export(wmOperatorType *ot)
INT_MIN,
INT_MAX);
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_mesh_type_selection",
prop_bc_export_mesh_type,
0,
"Resolution",
"Modifier resolution for export");
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_global_forward_selection",
prop_bc_export_global_forward,
BC_DEFAULT_FORWARD,
"Global Forward Axis",
"Global Forward axis for export");
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_global_up_selection",
prop_bc_export_global_up,
BC_DEFAULT_UP,
"Global Up Axis",
"Global Up axis for export");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"apply_global_orientation",
false,
"Apply Global Orientation",
"Rotate all root objects to match the global orientation settings "
"otherwise set the global orientation per Collada asset");
RNA_def_boolean(func, "selected", false, "Selection Only", "Export only selected elements");
RNA_def_boolean(ot->srna, "selected", false, "Selection Only", "Export only selected elements");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"include_children",
false,
"Include Children",
"Export all children of selected objects (even if not selected)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"include_armatures",
false,
"Include Armatures",
"Export related armatures (even if not selected)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"include_shapekeys",
false,
"Include Shape Keys",
"Export all Shape Keys from Mesh Objects");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"deform_bones_only",
false,
"Deform Bones Only",
"Only export deforming bones with armatures");
RNA_def_boolean(
func,
ot->srna,
"include_animations",
true,
"Include Animations",
"Export animations if available (exporting animations will enforce the decomposition of "
"node transforms into <translation> <rotation> and <scale> components)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"include_all_actions",
true,
"Include all Actions",
"Export also unassigned actions (this allows you to export entire animation "
"libraries for your character(s))");
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_animation_type_selection",
prop_bc_export_animation_type,
0,
"Key Type",
"Type for exported animations (use sample keys or Curve keys)");
RNA_def_int(func,
RNA_def_int(ot->srna,
"sampling_rate",
1,
1,
@ -565,7 +563,7 @@ void WM_OT_collada_export(wmOperatorType *ot)
1,
INT_MAX);
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"keep_smooth_curves",
0,
"Keep Smooth curves",
@ -573,48 +571,51 @@ void WM_OT_collada_export(wmOperatorType *ot)
"inverse parent matrix "
"is the unity matrix, otherwise you may end up with odd results)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"keep_keyframes",
0,
"Keep Keyframes",
"Use existing keyframes as additional sample points (this helps when you want "
"to keep manual tweaks)");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"keep_flat_curves",
0,
"All Keyed Curves",
"Export also curves which have only one key or are totally flat");
RNA_def_boolean(
func, "active_uv_only", 0, "Only Selected UV Map", "Export only the selected UV Map");
ot->srna, "active_uv_only", 0, "Only Selected UV Map", "Export only the selected UV Map");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"use_texture_copies",
1,
"Copy",
"Copy textures to same folder where the .dae file is exported");
RNA_def_boolean(
func, "triangulate", 1, "Triangulate", "Export polygons (quads and n-gons) as triangles");
RNA_def_boolean(ot->srna,
"triangulate",
1,
"Triangulate",
"Export polygons (quads and n-gons) as triangles");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"use_object_instantiation",
1,
"Use Object Instances",
"Instantiate multiple Objects from same Data");
RNA_def_boolean(
func,
ot->srna,
"use_blender_profile",
1,
"Use Blender Profile",
"Export additional Blender specific information (for material, shaders, bones, etc.)");
RNA_def_boolean(
func, "sort_by_name", 0, "Sort by Object name", "Sort exported data by Object name");
ot->srna, "sort_by_name", 0, "Sort by Object name", "Sort exported data by Object name");
RNA_def_int(func,
RNA_def_int(ot->srna,
"export_object_transformation_type",
0,
INT_MIN,
@ -624,14 +625,14 @@ void WM_OT_collada_export(wmOperatorType *ot)
INT_MIN,
INT_MAX);
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_object_transformation_type_selection",
prop_bc_export_transformation_type,
0,
"Transform",
"Object Transformation type for translation, scale and rotation");
RNA_def_int(func,
RNA_def_int(ot->srna,
"export_animation_transformation_type",
0,
INT_MIN,
@ -643,7 +644,7 @@ void WM_OT_collada_export(wmOperatorType *ot)
INT_MIN,
INT_MAX);
RNA_def_enum(func,
RNA_def_enum(ot->srna,
"export_animation_transformation_type_selection",
prop_bc_export_transformation_type,
0,
@ -652,20 +653,20 @@ void WM_OT_collada_export(wmOperatorType *ot)
"Note: The Animation transformation type in the Anim Tab "
"is always equal to the Object transformation type in the Geom tab");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"open_sim",
0,
"Export to SL/OpenSim",
"Compatibility mode for SL, OpenSim and other compatible online worlds");
RNA_def_boolean(func,
RNA_def_boolean(ot->srna,
"limit_precision",
0,
"Limit Precision",
"Reduce the precision of the exported data to 6 digits");
RNA_def_boolean(
func,
ot->srna,
"keep_bind_info",
0,
"Keep Bind Info",

View File

@ -81,7 +81,7 @@ static size_t array_chunk_size_calc(const size_t stride)
{
/* Return a chunk size that targets a size in bytes,
* this is done so boolean arrays don't add so much overhead and
* larger arrays aren't unreasonably big, see: #105205. */
* larger arrays aren't so big as to waste memory, see: #105205. */
return std::max(ARRAY_CHUNK_NUM_MIN, ARRAY_CHUNK_SIZE_IN_BYTES / power_of_2_max_i(stride));
}

View File

@ -768,7 +768,13 @@ bool SCULPT_vertex_has_unique_face_set(SculptSession *ss, PBVHVertRef vertex)
coord.y = vertex_index / key->grid_size;
int v1, v2;
const SubdivCCGAdjacencyType adjacency = BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(
ss->subdiv_ccg, &coord, ss->corner_verts, ss->polys, &v1, &v2);
ss->subdiv_ccg,
&coord,
ss->corner_verts.data(),
ss->corner_verts.size(),
ss->polys.data(),
&v1,
&v2);
switch (adjacency) {
case SUBDIV_CCG_ADJACENT_VERTEX:
return sculpt_check_unique_face_set_in_base_mesh(ss, v1);
@ -888,12 +894,11 @@ static void sculpt_vertex_neighbors_get_faces(SculptSession *ss,
continue;
}
const MPoly &poly = ss->polys[vert_map->indices[i]];
int f_adj_v[2];
if (poly_get_adj_loops_from_vert(&poly, ss->corner_verts, vertex.i, f_adj_v) != -1) {
for (int j = 0; j < ARRAY_SIZE(f_adj_v); j += 1) {
if (f_adj_v[j] != vertex.i) {
sculpt_vertex_neighbor_add(iter, BKE_pbvh_make_vref(f_adj_v[j]), f_adj_v[j]);
}
const blender::int2 f_adj_v = blender::bke::mesh::poly_find_adjecent_verts(
poly, ss->corner_verts, vertex.i);
for (int j = 0; j < 2; j++) {
if (f_adj_v[j] != vertex.i) {
sculpt_vertex_neighbor_add(iter, BKE_pbvh_make_vref(f_adj_v[j]), f_adj_v[j]);
}
}
}
@ -1003,7 +1008,13 @@ bool SCULPT_vertex_is_boundary(const SculptSession *ss, const PBVHVertRef vertex
coord.y = vertex_index / key->grid_size;
int v1, v2;
const SubdivCCGAdjacencyType adjacency = BKE_subdiv_ccg_coarse_mesh_adjacency_info_get(
ss->subdiv_ccg, &coord, ss->corner_verts, ss->polys, &v1, &v2);
ss->subdiv_ccg,
&coord,
ss->corner_verts.data(),
ss->corner_verts.size(),
ss->polys.data(),
&v1,
&v2);
switch (adjacency) {
case SUBDIV_CCG_ADJACENT_VERTEX:
return sculpt_check_boundary_vertex_in_base_mesh(ss, v1);

View File

@ -129,7 +129,6 @@ static void do_draw_face_sets_brush_task_cb_ex(void *__restrict userdata,
const Span<float3> positions(
reinterpret_cast<const float3 *>(SCULPT_mesh_deformed_positions_get(ss)),
SCULPT_vertex_count_get(ss));
const Span<int> corner_verts(ss->corner_verts, data->me->totloop);
AutomaskingNodeData automask_data;
SCULPT_automasking_node_begin(
data->ob, ss, ss->cache->automasking, &automask_data, data->nodes[n]);
@ -144,7 +143,7 @@ static void do_draw_face_sets_brush_task_cb_ex(void *__restrict userdata,
const MPoly &poly = ss->polys[vert_map->indices[j]];
const float3 poly_center = bke::mesh::poly_center_calc(
positions, corner_verts.slice(poly.loopstart, poly.totloop));
positions, ss->corner_verts.slice(poly.loopstart, poly.totloop));
if (!sculpt_brush_test_sq_fn(&test, poly_center)) {
continue;

View File

@ -273,7 +273,7 @@ static void sculpt_init_session(Main *bmain, Depsgraph *depsgraph, Scene *scene,
if (ob->sculpt != nullptr) {
BKE_sculptsession_free(ob);
}
ob->sculpt = MEM_cnew<SculptSession>(__func__);
ob->sculpt = MEM_new<SculptSession>(__func__);
ob->sculpt->mode_type = OB_MODE_SCULPT;
/* Trigger evaluation of modifier stack to ensure

View File

@ -656,14 +656,14 @@ static void timeline_cache_draw_cached_segments(PointCache *cache, uint pos_id)
static void timeline_cache_draw_single(PTCacheID *pid, float y_offset, float height, uint pos_id)
{
GPU_matrix_push();
GPU_matrix_translate_2f(0.0, (float)V2D_SCROLL_HANDLE_HEIGHT + y_offset);
GPU_matrix_translate_2f(0.0, float(V2D_SCROLL_HANDLE_HEIGHT) + y_offset);
GPU_matrix_scale_2f(1.0, height);
float color[4];
timeline_cache_color_get(pid, color);
immUniformColor4fv(color);
immRectf(pos_id, (float)pid->cache->startframe, 0.0, (float)pid->cache->endframe, 1.0);
immRectf(pos_id, float(pid->cache->startframe), 0.0, float(pid->cache->endframe), 1.0);
color[3] = 0.4f;
timeline_cache_modify_color_based_on_state(pid->cache, color);

View File

@ -178,7 +178,7 @@ static bool get_keyframe_extents(bAnimContext *ac, float *min, float *max, const
/* Find gp-frame which is less than or equal to current-frame. */
for (gpf = static_cast<bGPDframe *>(gpl->frames.first); gpf; gpf = gpf->next) {
if (!onlySel || (gpf->flag & GP_FRAME_SELECT)) {
const float framenum = (float)gpf->framenum;
const float framenum = float(gpf->framenum);
*min = min_ff(*min, framenum);
*max = max_ff(*max, framenum);
found = true;
@ -193,7 +193,7 @@ static bool get_keyframe_extents(bAnimContext *ac, float *min, float *max, const
for (masklay_shape = static_cast<MaskLayerShape *>(masklay->splines_shapes.first);
masklay_shape;
masklay_shape = masklay_shape->next) {
const float framenum = (float)masklay_shape->frame;
const float framenum = float(masklay_shape->frame);
*min = min_ff(*min, framenum);
*max = max_ff(*max, framenum);
found = true;
@ -231,8 +231,8 @@ static bool get_keyframe_extents(bAnimContext *ac, float *min, float *max, const
else {
/* set default range */
if (ac->scene) {
*min = (float)ac->scene->r.sfra;
*max = (float)ac->scene->r.efra;
*min = float(ac->scene->r.sfra);
*max = float(ac->scene->r.efra);
}
else {
*min = -5;
@ -397,7 +397,7 @@ static int actkeys_viewall(bContext *C, const bool only_sel)
/* view all -> the summary channel is usually the shows everything,
* and resides right at the top... */
v2d->cur.ymax = 0.0f;
v2d->cur.ymin = (float)-BLI_rcti_size_y(&v2d->mask);
v2d->cur.ymin = float(-BLI_rcti_size_y(&v2d->mask));
}
else {
/* locate first selected channel (or the active one), and frame those */
@ -850,7 +850,7 @@ static void insert_action_keys(bAnimContext *ac, short mode)
/* insert keyframes */
const AnimationEvalContext anim_eval_context = BKE_animsys_eval_context_construct(
ac->depsgraph, (float)scene->r.cfra);
ac->depsgraph, float(scene->r.cfra));
for (ale = static_cast<bAnimListElem *>(anim_data.first); ale; ale = ale->next) {
switch (ale->type) {
case ANIMTYPE_GPLAYER:
@ -1962,7 +1962,7 @@ static void mirror_action_keys(bAnimContext *ac, short mode)
TimeMarker *marker = ED_markers_get_first_selected(ac->markers);
if (marker) {
ked.f1 = (float)marker->frame;
ked.f1 = float(marker->frame);
}
else {
return;

View File

@ -163,8 +163,10 @@ static void actkeys_find_key_in_list_element(bAnimContext *ac,
/* half-size (for either side), but rounded up to nearest int (for easier targeting) */
key_hsize = roundf(key_hsize / 2.0f);
const Range2f range = {UI_view2d_region_to_view_x(v2d, region_x - (int)key_hsize),
UI_view2d_region_to_view_x(v2d, region_x + (int)key_hsize)};
const Range2f range = {
UI_view2d_region_to_view_x(v2d, region_x - int(key_hsize)),
UI_view2d_region_to_view_x(v2d, region_x + int(key_hsize)),
};
const ActKeyColumn *ak = ED_keylist_find_any_between(keylist, range);
if (ak) {
@ -1075,7 +1077,7 @@ static void columnselect_action_keys(bAnimContext *ac, short mode)
ce = MEM_cnew<CfraElem>("cfraElem");
BLI_addtail(&ked.list, ce);
ce->cfra = (float)scene->r.cfra;
ce->cfra = float(scene->r.cfra);
break;
case ACTKEYS_COLUMNSEL_MARKERS_COLUMN: /* list of selected markers */
@ -1416,10 +1418,10 @@ static void actkeys_select_leftright(bAnimContext *ac, short leftright, short se
if (leftright == ACTKEYS_LRSEL_LEFT) {
ked.f1 = MINAFRAMEF;
ked.f2 = (float)(scene->r.cfra + 0.1f);
ked.f2 = float(scene->r.cfra + 0.1f);
}
else {
ked.f1 = (float)(scene->r.cfra - 0.1f);
ked.f1 = float(scene->r.cfra - 0.1f);
ked.f2 = MAXFRAMEF;
}

View File

@ -101,9 +101,9 @@ static SpaceLink *action_create(const ScrArea *area, const Scene *scene)
BLI_addtail(&saction->regionbase, region);
region->regiontype = RGN_TYPE_WINDOW;
region->v2d.tot.xmin = (float)(scene->r.sfra - 10);
region->v2d.tot.ymin = (float)(-area->winy) / 3.0f;
region->v2d.tot.xmax = (float)(scene->r.efra + 10);
region->v2d.tot.xmin = float(scene->r.sfra - 10);
region->v2d.tot.ymin = float(-area->winy) / 3.0f;
region->v2d.tot.xmax = float(scene->r.efra + 10);
region->v2d.tot.ymax = 0.0f;
region->v2d.cur = region->v2d.tot;
@ -577,8 +577,8 @@ static void action_listener(const wmSpaceTypeListenerParams *params)
LISTBASE_FOREACH (ARegion *, region, &area->regionbase) {
if (region->regiontype == RGN_TYPE_WINDOW) {
Scene *scene = static_cast<Scene *>(wmn->reference);
region->v2d.tot.xmin = (float)(scene->r.sfra - 4);
region->v2d.tot.xmax = (float)(scene->r.efra + 4);
region->v2d.tot.xmin = float(scene->r.sfra - 4);
region->v2d.tot.xmax = float(scene->r.efra + 4);
break;
}
}

View File

@ -251,8 +251,8 @@ static int adjacent_edge(const Span<int> corner_verts,
const int vertex)
{
const int adjacent_loop_i = (corner_verts[loop_i] == vertex) ?
bke::mesh_topology::poly_loop_prev(poly, loop_i) :
bke::mesh_topology::poly_loop_next(poly, loop_i);
bke::mesh::poly_corner_prev(poly, loop_i) :
bke::mesh::poly_corner_next(poly, loop_i);
return corner_edges[adjacent_loop_i];
}

View File

@ -1476,7 +1476,6 @@ struct EdgeFeatData {
Object *ob_eval; /* For evaluated materials. */
const int *material_indices;
blender::Span<MEdge> edges;
blender::Span<MPoly> polys;
blender::Span<int> corner_verts;
blender::Span<int> corner_edges;
blender::Span<MLoopTri> looptris;
@ -2106,7 +2105,6 @@ static void lineart_geometry_object_load(LineartObjectInfo *ob_info,
edge_feat_data.ob_eval = ob_info->original_ob_eval;
edge_feat_data.material_indices = material_indices;
edge_feat_data.edges = me->edges();
edge_feat_data.polys = me->polys();
edge_feat_data.corner_verts = me->corner_verts();
edge_feat_data.corner_edges = me->corner_edges();
edge_feat_data.looptris = looptris;

View File

@ -1,3 +1,5 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "gpu_testing.hh"
#include "MEM_guardedalloc.h"

View File

@ -398,10 +398,6 @@ enum {
/** \name Utility Macros
* \{ */
#define ME_POLY_LOOP_PREV(poly, i) \
((poly)->loopstart + (((i) + (poly)->totloop - 1) % (poly)->totloop))
#define ME_POLY_LOOP_NEXT(poly, i) ((poly)->loopstart + (((i) + 1) % (poly)->totloop))
/** Number of tri's that make up this polygon once tessellated. */
#define ME_POLY_TRI_TOT(poly) ((poly)->totloop - 2)

View File

@ -108,6 +108,12 @@
Mesh *me = rna_mesh(ptr); \
CustomData *data = rna_mesh_##customdata_type(ptr); \
if (data) { \
if (UNLIKELY(value < 0)) { \
value = 0; \
} \
else if (value > 0) { \
value = min_ii(value, CustomData_number_of_layers(data, layer_type) - 1); \
} \
CustomData_set_layer_##active_type(data, layer_type, value); \
BKE_mesh_tessface_clear(me); \
} \

View File

@ -2242,8 +2242,7 @@ bool rna_GPencil_object_poll(PointerRNA *UNUSED(ptr), PointerRNA value)
bool rna_Object_use_dynamic_topology_sculpting_get(PointerRNA *ptr)
{
SculptSession *ss = ((Object *)ptr->owner_id)->sculpt;
return (ss && ss->bm);
return BKE_object_sculpt_use_dyntopo((Object *)ptr->owner_id);
}
static void rna_object_lineart_update(Main *UNUSED(bmain), Scene *UNUSED(scene), PointerRNA *ptr)

View File

@ -385,8 +385,8 @@ static void rna_Sculpt_update(bContext *C, PointerRNA *UNUSED(ptr))
WM_main_add_notifier(NC_OBJECT | ND_MODIFIER, ob);
if (ob->sculpt) {
ob->sculpt->bm_smooth_shading = ((scene->toolsettings->sculpt->flags &
SCULPT_DYNTOPO_SMOOTH_SHADING) != 0);
BKE_object_sculpt_dyntopo_smooth_shading_set(
ob, ((scene->toolsettings->sculpt->flags & SCULPT_DYNTOPO_SMOOTH_SHADING) != 0));
}
}
}

View File

@ -245,8 +245,8 @@ static Mesh *modifyMesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh *
sculpt_session->totvert = mesh->totvert;
sculpt_session->totpoly = mesh->totpoly;
sculpt_session->vert_positions = nullptr;
sculpt_session->polys = nullptr;
sculpt_session->corner_verts = nullptr;
sculpt_session->polys = {};
sculpt_session->corner_verts = {};
}
// BKE_subdiv_stats_print(&subdiv->stats);
}

View File

@ -464,8 +464,8 @@ static Frame **collect_hull_frames(int v,
hull_frames = MEM_cnew_array<Frame *>(*tothullframe, __func__);
hull_frames_num = 0;
for (i = 0; i < emap[v].count; i++) {
const MEdge *edge = &edges[emap[v].indices[i]];
f = &frames[BKE_mesh_edge_other_vert(edge, v)];
const MEdge &edge = edges[emap[v].indices[i]];
f = &frames[blender::bke::mesh::edge_other_vert(edge, v)];
/* Can't have adjacent branch nodes yet */
if (f->totframe) {
hull_frames[hull_frames_num++] = &f->frames[0];
@ -745,7 +745,7 @@ static void build_emats_stack(BLI_Stack *stack,
parent_is_branch = ((emap[parent_v].count > 2) || (vs[parent_v].flag & MVERT_SKIN_ROOT));
v = BKE_mesh_edge_other_vert(&edges[e], parent_v);
v = blender::bke::mesh::edge_other_vert(edges[e], parent_v);
emat[e].origin = parent_v;
/* If parent is a branch node, start a new edge chain */
@ -796,9 +796,10 @@ static EMat *build_edge_mats(const MVertSkin *vs,
for (v = 0; v < verts_num; v++) {
if (vs[v].flag & MVERT_SKIN_ROOT) {
if (emap[v].count >= 1) {
const MEdge *edge = &edges[emap[v].indices[0]];
calc_edge_mat(
stack_elem.mat, vert_positions[v], vert_positions[BKE_mesh_edge_other_vert(edge, v)]);
const MEdge &edge = edges[emap[v].indices[0]];
calc_edge_mat(stack_elem.mat,
vert_positions[v],
vert_positions[blender::bke::mesh::edge_other_vert(edge, v)]);
stack_elem.parent_v = v;
/* Add adjacent edges to stack */

View File

@ -82,7 +82,7 @@ class CornerPreviousEdgeFieldInput final : public bke::MeshFieldInput {
mesh.totloop,
[polys, corner_edges, loop_to_poly_map = std::move(loop_to_poly_map)](const int corner_i) {
const MPoly &poly = polys[loop_to_poly_map[corner_i]];
return corner_edges[bke::mesh_topology::poly_loop_prev(poly, corner_i)];
return corner_edges[bke::mesh::poly_corner_prev(poly, corner_i)];
});
}

View File

@ -40,10 +40,10 @@ static CLG_LogRef LOG = {"bgl"};
static void report_deprecated_call(const char *function_name)
{
/* Only report first 100 deprecated calls. BGL is typically used inside an handler that is
/* Only report first 10 deprecated calls. BGL is typically used inside an handler that is
* triggered at refresh. */
static int times = 0;
while (times >= 100) {
while (times >= 10) {
return;
}
char message[256];