Compare commits

...

6 Commits

Author SHA1 Message Date
21e446cc9d * PBVH node join code is now enabled for dyntopo bmesh
* Fixed bug in dyntopo bmesh time limiting code
2020-10-24 02:16:49 -07:00
6fcbabeb5e remove accidental debug return 2020-10-23 17:43:55 -07:00
8ca4337bdf * Fixed undo with bmesh dyntopo 2020-10-23 17:42:22 -07:00
6c9c7f14ec * Migrated optimizations from trimesh to bmesh dyntopo:
- Original coordinate/normals are now stored in customdata layers,
  instead of being looked up in the BMLog
- Vertex "indices" are now actually pointers to BMVerts.
- Dyntopo split/collapse is now time-limited
- Increased pbvh->leaf_limit to 2000
- Nodes are split after topolgy updates, not just after the user lets up
  the mouse.

* Also, renamed TMElemSet to TableGSet

TODO:
- Migrate PBVHNode->bm_[unique_verts/other_verts/faces] to TableGSet.
  Currently GHash is completely inlined in this branch which does almost
  the same thing performance-size; inlining GHash seems beyond the scope of this
  project however.
2020-10-23 17:19:47 -07:00
8f6326d494 BMesh dyntopo works again, if you disable WITH_TRIMESH in BKE_pbvh.h 2020-10-23 15:53:39 -07:00
4faa2b4bc9 Rebase branch 2020-10-23 14:49:14 -07:00
80 changed files with 4282 additions and 724 deletions

View File

@@ -262,6 +262,10 @@ ForEachMacros:
- SET_SLOT_PROBING_BEGIN
- MAP_SLOT_PROBING_BEGIN
- VECTOR_SET_SLOT_PROBING_BEGIN
- TMS_ITER
- TM_ITER_VERT_TRIS
- TM_ITER_VERT_TRIEDGES
- TM_ITER_MESH
# Use once we bump the minimum version to version 8.
# # Without this string literals that in-line 'STRINGIFY' behave strangely (a bug?).

View File

@@ -105,6 +105,7 @@ add_subdirectory(editors)
add_subdirectory(windowmanager)
add_subdirectory(blenkernel)
add_subdirectory(blenlib)
add_subdirectory(trimesh)
add_subdirectory(bmesh)
add_subdirectory(draw)
add_subdirectory(render)

View File

@@ -311,6 +311,15 @@ void CustomData_bmesh_interp(struct CustomData *data,
int count,
void *dst_block);
struct TM_TriMesh;
void CustomData_trimesh_init_pool(struct TM_TriMesh *tm, struct CustomData *data, int totelem, const char htype);
bool CustomData_trimesh_merge(const struct CustomData *source,
struct CustomData *dest,
CustomDataMask mask,
eCDAllocType alloctype,
struct TM_TriMesh *bm,
const char htype);
/* swaps the data in the element corners, to new corners with indices as
* specified in corner_indices. for edges this is an array of length 2, for
* faces an array of length 4 */

View File

@@ -95,6 +95,8 @@ struct Mesh *BKE_multires_create_mesh(struct Depsgraph *depsgraph,
struct Object *object,
struct MultiresModifierData *mmd);
void BKE_multires_bmesh_space_set(struct Object *ob, struct BMesh *bm, int mode);
/* Get coordinates of a deformed base mesh which is an input to the given multires modifier.
* NOTE: The modifiers will be re-evaluated. */
float (*BKE_multires_create_deformed_base_mesh_vert_coords(struct Depsgraph *depsgraph,

View File

@@ -23,6 +23,8 @@
* \ingroup bke
*/
#include "BKE_pbvh.h"
#include "BLI_bitmap.h"
#include "BLI_utildefines.h"
#include "DNA_object_enums.h"
@@ -286,10 +288,10 @@ typedef struct SculptClothLengthConstraint {
* point, position for a previous state). In that case, elem_index_a and elem_index_b should be
* the same to avoid affecting two different vertices when solving the constraints.
* *elem_position points to the position which is owned by the element. */
int elem_index_a;
SculptIdx elem_index_a;
float *elem_position_a;
int elem_index_b;
SculptIdx elem_index_b;
float *elem_position_b;
float length;
@@ -342,7 +344,7 @@ typedef struct SculptPersistentBase {
typedef struct SculptVertexInfo {
/* Indexed by vertex, stores and ID of its topologically connected component. */
int *connected_component;
SculptIdx *connected_component;
/* Indexed by base mesh vertex index, stores if that vertex is a boundary. */
BLI_bitmap *boundary;
@@ -350,7 +352,7 @@ typedef struct SculptVertexInfo {
typedef struct SculptBoundaryEditInfo {
/* Vertex index from where the topology propagation reached this vertex. */
int original_vertex;
SculptIdx original_vertex;
/* How many steps were needed to reach this vertex from the boundary. */
int num_propagation_steps;
@@ -361,13 +363,13 @@ typedef struct SculptBoundaryEditInfo {
/* Edge for drawing the boundary preview in the cursor. */
typedef struct SculptBoundaryPreviewEdge {
int v1;
int v2;
SculptIdx v1;
SculptIdx v2;
} SculptBoundaryPreviewEdge;
typedef struct SculptBoundary {
/* Vertex indices of the active boundary. */
int *vertices;
SculptIdx *vertices;
int vertices_capacity;
int num_vertices;
@@ -385,12 +387,12 @@ typedef struct SculptBoundary {
bool forms_loop;
/* Initial vertex in the boundary which is closest to the current sculpt active vertex. */
int initial_vertex;
SculptIdx initial_vertex;
/* Vertex that at max_propagation_steps from the boundary and closest to the original active
* vertex that was used to initialize the boundary. This is used as a reference to check how much
* the deformation will go into the mesh and to calculate the strength of the brushes. */
int pivot_vertex;
SculptIdx pivot_vertex;
/* Stores the initial positions of the pivot and boundary initial vertex as they may be deformed
* during the brush action. This allows to use them as a reference positions and vectors for some
@@ -430,8 +432,7 @@ typedef struct SculptFakeNeighbors {
float current_max_distance;
/* Indexed by vertex, stores the vertex index of its fake neighbor if available. */
int *fake_neighbor_index;
SculptIdx *fake_neighbor_index;
} SculptFakeNeighbors;
/* Session data (mode-specific) */
@@ -470,10 +471,15 @@ typedef struct SculptSession {
* Face Set ID. Positive IDs are visible, negative IDs are hidden. */
int *face_sets;
struct TM_TriMesh *tm;
struct TriMeshLog *tm_log;
/* BMesh for dynamic topology sculpting */
struct BMesh *bm;
int cd_vert_node_offset;
int cd_face_node_offset;
int cd_origco_offset;
int cd_origno_offset;
bool bm_smooth_shading;
/* Undo/redo log for dynamic topology sculpting */
struct BMLog *bm_log;
@@ -500,7 +506,7 @@ typedef struct SculptSession {
struct FilterCache *filter_cache;
/* Cursor data and active vertex for tools */
int active_vertex_index;
SculptIdx active_vertex_index;
int active_face_index;
int active_grid_index;
@@ -522,7 +528,7 @@ typedef struct SculptSession {
struct Scene *scene;
/* Dynamic mesh preview */
int *preview_vert_index_list;
SculptIdx *preview_vert_index_list;
int preview_vert_index_count;
/* Pose Brush Preview */
@@ -579,7 +585,6 @@ typedef struct SculptSession {
* Set #Main.is_memfile_undo_flush_needed when enabling.
*/
char needs_flush_to_id;
} SculptSession;
void BKE_sculptsession_free(struct Object *ob);
@@ -588,6 +593,10 @@ void BKE_sculptsession_free_vwpaint_data(struct SculptSession *ss);
void BKE_sculptsession_bm_to_me(struct Object *ob, bool reorder);
void BKE_sculptsession_bm_to_me_for_render(struct Object *object);
void BKE_sculptsession_tm_to_me(struct Object *ob, bool reorder);
void BKE_sculptsession_tm_to_me_for_render(struct Object *object);
/* Create new color layer on object if it doesn't have one and if experimental feature set has
* sculpt vertex color enabled. Returns truth if new layer has been added, false otherwise. */
void BKE_sculpt_color_layer_create_if_needed(struct Object *object);

View File

@@ -23,6 +23,11 @@
#include "BLI_bitmap.h"
#include "BLI_ghash.h"
#include "BLI_threadsafe_mempool.h"
#include "stdint.h"
typedef intptr_t SculptIdx;
/* For embedding CCGKey in iterator. */
#include "BKE_ccg.h"
@@ -53,6 +58,79 @@ struct TaskParallelTLS;
typedef struct PBVH PBVH;
typedef struct PBVHNode PBVHNode;
//#define WITH_TRIMESH
//#define PROXY_ADVANCED
// experimental performance test of "data-based programming" approach
#ifdef PROXY_ADVANCED
typedef struct ProxyKey {
int node;
int pindex;
} ProxyKey;
# define MAX_PROXY_NEIGHBORS 12
typedef struct ProxyVertArray {
float **ownerco;
short **ownerno;
float (*co)[3];
float (*fno)[3];
short (*no)[3];
float *mask, **ownermask;
SculptIdx *index;
float **ownercolor, (*color)[4];
ProxyKey (*neighbors)[MAX_PROXY_NEIGHBORS];
int size;
int datamask;
GHash *indexmap;
} ProxyVertArray;
typedef enum {
PV_OWNERCO = 1,
PV_OWNERNO = 2,
PV_CO = 4,
PV_NO = 8,
PV_MASK = 16,
PV_OWNERMASK = 32,
PV_INDEX = 64,
PV_OWNERCOLOR = 128,
PV_COLOR = 256,
PV_NEIGHBORS = 512
} ProxyVertField;
typedef struct ProxyVertUpdateRec {
float *co, *no, *mask, *color;
int index, newindex;
} ProxyVertUpdateRec;
# define PBVH_PROXY_DEFAULT CO | INDEX | MASK
struct SculptSession;
void BKE_pbvh_ensure_proxyarrays(struct SculptSession *ss, PBVH *pbvh, int mask);
void BKE_pbvh_load_proxyarrays(PBVH *pbvh, PBVHNode **nodes, int totnode, int mask);
void BKE_pbvh_ensure_proxyarray(
struct SculptSession *ss,
struct PBVH *pbvh,
struct PBVHNode *node,
int mask,
struct GHash
*vert_node_map, // vert_node_map maps vertex SculptIdxs to PBVHNode indices; optional
bool check_indexmap,
bool force_update);
void BKE_pbvh_gather_proxyarray(PBVH *pbvh, PBVHNode **nodes, int totnode);
void BKE_pbvh_free_proxyarray(struct PBVH *pbvh, struct PBVHNode *node);
void BKE_pbvh_update_proxyvert(struct PBVH *pbvh, struct PBVHNode *node, ProxyVertUpdateRec *rec);
ProxyVertArray *BKE_pbvh_get_proxyarrays(struct PBVH *pbvh, struct PBVHNode *node);
#endif
typedef struct {
float (*co)[3];
} PBVHProxyNode;
@@ -79,6 +157,7 @@ typedef enum {
PBVH_UpdateTopology = 1 << 13,
PBVH_UpdateColor = 1 << 14,
PBVH_Delete = 1 << 15,
} PBVHNodeFlags;
typedef struct PBVHFrustumPlanes {
@@ -86,6 +165,32 @@ typedef struct PBVHFrustumPlanes {
int num_planes;
} PBVHFrustumPlanes;
typedef struct TableGSet {
struct GHash *ptr_to_idx;
void **elems;
int size, length;
int cur;
} TableGSet;
TableGSet *BLI_table_gset_new();
void BLI_table_gset_free(TableGSet *ts);
void BLI_table_gset_insert(TableGSet *ts, void *elem);
bool BLI_table_gset_add(TableGSet *ts, void *elem);
void BLI_table_gset_remove(TableGSet *ts, void *elem, bool ignoreExist);
bool BLI_table_gset_has(TableGSet *ts, void *elem);
#define TMS_ITER(v, ts) \
{ \
int _i1; \
for (_i1 = 0; _i1 < ts->cur; _i1++) { \
if (!ts->elems[_i1]) \
continue; \
v = ts->elems[_i1];
#define TMS_ITER_END \
} \
}
void BKE_pbvh_set_frustum_planes(PBVH *pbvh, PBVHFrustumPlanes *planes);
void BKE_pbvh_get_frustum_planes(PBVH *pbvh, PBVHFrustumPlanes *planes);
@@ -101,6 +206,8 @@ typedef void (*BKE_pbvh_SearchNearestCallback)(PBVHNode *node, void *data, float
/* Building */
struct TM_TriMesh;
PBVH *BKE_pbvh_new(void);
void BKE_pbvh_build_mesh(PBVH *pbvh,
const struct Mesh *mesh,
@@ -125,8 +232,21 @@ void BKE_pbvh_build_bmesh(PBVH *pbvh,
bool smooth_shading,
struct BMLog *log,
const int cd_vert_node_offset,
const int cd_face_node_offset);
void BKE_pbvh_free(PBVH *pbvh);
const int cd_face_node_offset,
const int cd_origco_offset,
const int cd_origno_offset);
void BKE_pbvh_build_trimesh(PBVH *bvh,
struct TM_TriMesh *bm,
bool smooth_shading,
struct TriMeshLog *log,
const int cd_vert_node_offset,
const int cd_face_node_offset);
struct BMVert;
void BKE_pbvh_bmesh_update_origvert(PBVH *bvh, struct BMVert *v);
void BKE_pbvh_free(PBVH *bvh);
// void BKE_pbvh_free_layer_disp(PBVH *bvh);
/* Hierarchical Search in the BVH, two methods:
* - for each hit calling a callback
@@ -161,7 +281,7 @@ bool BKE_pbvh_node_raycast(PBVH *pbvh,
const float ray_normal[3],
struct IsectRayPrecalc *isect_precalc,
float *depth,
int *active_vertex_index,
SculptIdx *active_vertex_index,
int *active_face_grid_index,
float *face_normal);
@@ -170,6 +290,11 @@ bool BKE_pbvh_bmesh_node_raycast_detail(PBVHNode *node,
struct IsectRayPrecalc *isect_precalc,
float *depth,
float *r_edge_length);
bool BKE_pbvh_trimesh_node_raycast_detail(PBVHNode *node,
const float ray_start[3],
struct IsectRayPrecalc *isect_precalc,
float *depth,
float *r_edge_length);
/* for orthographic cameras, project the far away ray segment points to the root node so
* we can have better precision. */
@@ -207,11 +332,7 @@ void BKE_pbvh_draw_debug_cb(
void *user_data);
/* PBVH Access */
typedef enum {
PBVH_FACES,
PBVH_GRIDS,
PBVH_BMESH,
} PBVHType;
typedef enum { PBVH_FACES, PBVH_GRIDS, PBVH_BMESH, PBVH_TRIMESH } PBVHType;
PBVHType BKE_pbvh_type(const PBVH *pbvh);
bool BKE_pbvh_has_faces(const PBVH *pbvh);
@@ -238,6 +359,8 @@ int BKE_pbvh_get_grid_num_vertices(const PBVH *pbvh);
/* Only valid for type == PBVH_BMESH */
struct BMesh *BKE_pbvh_get_bmesh(PBVH *pbvh);
struct TM_TriMesh *BKE_pbvh_get_trimesh(PBVH *pbvh);
void BKE_pbvh_topology_detail_size_set(PBVH *pbvh, float detail_size);
void BKE_pbvh_bmesh_detail_size_set(PBVH *pbvh, float detail_size);
typedef enum {
@@ -250,8 +373,17 @@ bool BKE_pbvh_bmesh_update_topology(PBVH *pbvh,
const float view_normal[3],
float radius,
const bool use_frontface,
const bool use_projected);
const bool use_projected,
int sym_axis);
bool BKE_pbvh_trimesh_update_topology(PBVH *bvh,
PBVHTopologyUpdateMode mode,
const float center[3],
const float view_normal[3],
float radius,
const bool use_frontface,
const bool use_projected,
int sym_axis);
/* Node Access */
void BKE_pbvh_node_mark_update(PBVHNode *node);
@@ -298,6 +430,12 @@ struct GSet *BKE_pbvh_bmesh_node_faces(PBVHNode *node);
void BKE_pbvh_bmesh_node_save_orig(struct BMesh *bm, PBVHNode *node);
void BKE_pbvh_bmesh_after_stroke(PBVH *pbvh);
struct TableGSet *BKE_pbvh_trimesh_node_unique_verts(PBVHNode *node);
struct TableGSet *BKE_pbvh_trimesh_node_other_verts(PBVHNode *node);
struct GSet *BKE_pbvh_trimesh_node_faces(PBVHNode *node);
void BKE_pbvh_trimesh_node_save_orig(struct TM_TriMesh *tm, PBVHNode *node);
void BKE_pbvh_trimesh_after_stroke(PBVH *bvh);
/* Update Bounding Box/Redraw and clear flags */
void BKE_pbvh_update_bounds(PBVH *pbvh, int flags);
@@ -334,6 +472,8 @@ bool BKE_pbvh_is_deformed(struct PBVH *pbvh);
#define PBVH_ITER_ALL 0
#define PBVH_ITER_UNIQUE 1
struct TMVert;
typedef struct PBVHVertexIter {
/* iteration */
int g;
@@ -342,7 +482,7 @@ typedef struct PBVHVertexIter {
int gx;
int gy;
int i;
int index;
SculptIdx index;
bool respect_hide;
/* grid */
@@ -365,12 +505,22 @@ typedef struct PBVHVertexIter {
struct GSetIterator bm_unique_verts;
struct GSetIterator bm_other_verts;
struct CustomData *bm_vdata;
int ti;
struct TableGSet *tm_cur_set;
struct TableGSet *tm_unique_verts;
struct TableGSet *tm_other_verts;
struct CustomData *tm_vdata;
int cd_vert_mask_offset;
int cd_origco_offset;
int cd_origno_offset;
/* result: these are all computed in the macro, but we assume
* that compiler optimization's will skip the ones we don't use */
struct MVert *mvert;
struct BMVert *bm_vert;
struct TMVert *tm_vert;
float *co;
short *no;
float *fno;
@@ -381,78 +531,193 @@ typedef struct PBVHVertexIter {
void pbvh_vertex_iter_init(PBVH *pbvh, PBVHNode *node, PBVHVertexIter *vi, int mode);
#define BKE_pbvh_vertex_iter_begin(pbvh, node, vi, mode) \
pbvh_vertex_iter_init(pbvh, node, &vi, mode); \
#ifdef WITH_TRIMESH
# define BKE_pbvh_vertex_iter_begin(pbvh, node, vi, mode) \
pbvh_vertex_iter_init(pbvh, node, &vi, mode); \
\
for (vi.i = 0, vi.g = 0; vi.g < vi.totgrid; vi.g++) { \
if (vi.grids) { \
vi.width = vi.gridsize; \
vi.height = vi.gridsize; \
vi.index = vi.grid_indices[vi.g] * vi.key.grid_area - 1; \
vi.grid = vi.grids[vi.grid_indices[vi.g]]; \
if (mode == PBVH_ITER_UNIQUE) { \
vi.gh = vi.grid_hidden[vi.grid_indices[vi.g]]; \
for (vi.i = 0, vi.g = 0; vi.g < vi.totgrid; vi.g++) { \
if (vi.grids) { \
vi.width = vi.gridsize; \
vi.height = vi.gridsize; \
vi.index = vi.grid_indices[vi.g] * vi.key.grid_area - 1; \
vi.grid = vi.grids[vi.grid_indices[vi.g]]; \
if (mode == PBVH_ITER_UNIQUE) { \
vi.gh = vi.grid_hidden[vi.grid_indices[vi.g]]; \
} \
} \
else { \
vi.width = vi.totvert; \
vi.height = 1; \
} \
} \
else { \
vi.width = vi.totvert; \
vi.height = 1; \
} \
\
for (vi.gy = 0; vi.gy < vi.height; vi.gy++) { \
for (vi.gx = 0; vi.gx < vi.width; vi.gx++, vi.i++) { \
if (vi.grid) { \
vi.co = CCG_elem_co(&vi.key, vi.grid); \
vi.fno = CCG_elem_no(&vi.key, vi.grid); \
vi.mask = vi.key.has_mask ? CCG_elem_mask(&vi.key, vi.grid) : NULL; \
vi.grid = CCG_elem_next(&vi.key, vi.grid); \
vi.index++; \
vi.visible = true; \
if (vi.gh) { \
if (BLI_BITMAP_TEST(vi.gh, vi.gy * vi.gridsize + vi.gx)) { \
continue; \
for (vi.gy = 0; vi.gy < vi.height; vi.gy++) { \
for (vi.gx = 0; vi.gx < vi.width; vi.gx++, vi.i++) { \
if (vi.grid) { \
vi.co = CCG_elem_co(&vi.key, vi.grid); \
vi.fno = CCG_elem_no(&vi.key, vi.grid); \
vi.mask = vi.key.has_mask ? CCG_elem_mask(&vi.key, vi.grid) : NULL; \
vi.grid = CCG_elem_next(&vi.key, vi.grid); \
vi.index++; \
vi.visible = true; \
if (vi.gh) { \
if (BLI_BITMAP_TEST(vi.gh, vi.gy * vi.gridsize + vi.gx)) { \
continue; \
} \
} \
} \
} \
else if (vi.mverts) { \
vi.mvert = &vi.mverts[vi.vert_indices[vi.gx]]; \
if (vi.respect_hide) { \
vi.visible = !(vi.mvert->flag & ME_HIDE); \
else if (vi.mverts) { \
vi.mvert = &vi.mverts[vi.vert_indices[vi.gx]]; \
if (vi.respect_hide) { \
vi.visible = !(vi.mvert->flag & ME_HIDE); \
if (mode == PBVH_ITER_UNIQUE && !vi.visible) { \
continue; \
} \
} \
else { \
BLI_assert(vi.visible); \
} \
vi.co = vi.mvert->co; \
vi.no = vi.mvert->no; \
vi.index = vi.vert_indices[vi.i]; \
if (vi.vmask) { \
vi.mask = &vi.vmask[vi.index]; \
} \
if (vi.vcol) { \
vi.col = vi.vcol[vi.index].color; \
} \
} \
else if (vi.tm_vdata) { \
TMVert *tv = NULL; \
while (!tv) { \
if (!vi.tm_cur_set->elems || vi.ti >= vi.tm_cur_set->cur) { \
if (vi.tm_cur_set != vi.tm_other_verts) { \
vi.tm_cur_set = vi.tm_other_verts; \
vi.ti = 0; \
if (!vi.tm_cur_set->elems || vi.ti >= vi.tm_other_verts->cur) { \
break; \
} \
} \
else { \
break; \
} \
} \
else { \
tv = vi.tm_cur_set->elems[vi.ti++]; \
if (tv && BLI_safepool_elem_is_dead(tv)) { \
printf("dead vert: %p\n", tv); \
tv = NULL; \
} \
} \
} \
if (!tv) { \
continue; \
} \
vi.tm_vert = tv; \
vi.visible = !TM_elem_flag_test_bool(vi.tm_vert, TM_ELEM_HIDDEN); \
if (mode == PBVH_ITER_UNIQUE && !vi.visible) { \
continue; \
} \
vi.co = vi.tm_vert->co; \
vi.fno = vi.tm_vert->no; \
vi.index = (SculptIdx)vi.tm_vert; \
vi.mask = TM_ELEM_CD_GET_VOID_P(vi.tm_vert, vi.cd_vert_mask_offset); \
} \
else { \
BLI_assert(vi.visible); \
} \
vi.co = vi.mvert->co; \
vi.no = vi.mvert->no; \
vi.index = vi.vert_indices[vi.i]; \
if (vi.vmask) { \
vi.mask = &vi.vmask[vi.index]; \
} \
if (vi.vcol) { \
vi.col = vi.vcol[vi.index].color; \
} \
if (!BLI_gsetIterator_done(&vi.bm_unique_verts)) { \
vi.bm_vert = BLI_gsetIterator_getKey(&vi.bm_unique_verts); \
BLI_gsetIterator_step(&vi.bm_unique_verts); \
} \
else { \
vi.bm_vert = BLI_gsetIterator_getKey(&vi.bm_other_verts); \
BLI_gsetIterator_step(&vi.bm_other_verts); \
} \
vi.visible = !BM_elem_flag_test_bool(vi.bm_vert, BM_ELEM_HIDDEN); \
if (mode == PBVH_ITER_UNIQUE && !vi.visible) { \
continue; \
} \
vi.co = vi.bm_vert->co; \
vi.fno = vi.bm_vert->no; \
vi.index = BM_elem_index_get(vi.bm_vert); \
vi.mask = BM_ELEM_CD_GET_VOID_P(vi.bm_vert, vi.cd_vert_mask_offset); \
}
#else
# define BKE_pbvh_vertex_iter_begin(pbvh, node, vi, mode) \
pbvh_vertex_iter_init(pbvh, node, &vi, mode); \
\
for (vi.i = 0, vi.g = 0; vi.g < vi.totgrid; vi.g++) { \
if (vi.grids) { \
vi.width = vi.gridsize; \
vi.height = vi.gridsize; \
vi.index = vi.grid_indices[vi.g] * vi.key.grid_area - 1; \
vi.grid = vi.grids[vi.grid_indices[vi.g]]; \
if (mode == PBVH_ITER_UNIQUE) { \
vi.gh = vi.grid_hidden[vi.grid_indices[vi.g]]; \
} \
else { \
if (!BLI_gsetIterator_done(&vi.bm_unique_verts)) { \
vi.bm_vert = BLI_gsetIterator_getKey(&vi.bm_unique_verts); \
BLI_gsetIterator_step(&vi.bm_unique_verts); \
} \
else { \
vi.width = vi.totvert; \
vi.height = 1; \
} \
\
for (vi.gy = 0; vi.gy < vi.height; vi.gy++) { \
for (vi.gx = 0; vi.gx < vi.width; vi.gx++, vi.i++) { \
if (vi.grid) { \
vi.co = CCG_elem_co(&vi.key, vi.grid); \
vi.fno = CCG_elem_no(&vi.key, vi.grid); \
vi.mask = vi.key.has_mask ? CCG_elem_mask(&vi.key, vi.grid) : NULL; \
vi.grid = CCG_elem_next(&vi.key, vi.grid); \
vi.index++; \
vi.visible = true; \
if (vi.gh) { \
if (BLI_BITMAP_TEST(vi.gh, vi.gy * vi.gridsize + vi.gx)) { \
continue; \
} \
} \
} \
else if (vi.mverts) { \
vi.mvert = &vi.mverts[vi.vert_indices[vi.gx]]; \
if (vi.respect_hide) { \
vi.visible = !(vi.mvert->flag & ME_HIDE); \
if (mode == PBVH_ITER_UNIQUE && !vi.visible) { \
continue; \
} \
} \
else { \
BLI_assert(vi.visible); \
} \
vi.co = vi.mvert->co; \
vi.no = vi.mvert->no; \
vi.index = vi.vert_indices[vi.i]; \
if (vi.vmask) { \
vi.mask = &vi.vmask[vi.index]; \
} \
if (vi.vcol) { \
vi.col = vi.vcol[vi.index].color; \
} \
} \
else { \
vi.bm_vert = BLI_gsetIterator_getKey(&vi.bm_other_verts); \
BLI_gsetIterator_step(&vi.bm_other_verts); \
} \
vi.visible = !BM_elem_flag_test_bool(vi.bm_vert, BM_ELEM_HIDDEN); \
if (mode == PBVH_ITER_UNIQUE && !vi.visible) { \
continue; \
} \
vi.co = vi.bm_vert->co; \
vi.fno = vi.bm_vert->no; \
vi.index = BM_elem_index_get(vi.bm_vert); \
vi.mask = BM_ELEM_CD_GET_VOID_P(vi.bm_vert, vi.cd_vert_mask_offset); \
}
if (!BLI_gsetIterator_done(&vi.bm_unique_verts)) { \
vi.bm_vert = BLI_gsetIterator_getKey(&vi.bm_unique_verts); \
BLI_gsetIterator_step(&vi.bm_unique_verts); \
} \
else { \
vi.bm_vert = BLI_gsetIterator_getKey(&vi.bm_other_verts); \
BLI_gsetIterator_step(&vi.bm_other_verts); \
} \
vi.visible = !BM_elem_flag_test_bool(vi.bm_vert, BM_ELEM_HIDDEN); \
if (mode == PBVH_ITER_UNIQUE && !vi.visible) { \
continue; \
} \
vi.co = vi.bm_vert->co; \
vi.fno = vi.bm_vert->no; \
vi.index = (SculptIdx)vi.bm_vert; \
vi.mask = BM_ELEM_CD_GET_VOID_P(vi.bm_vert, vi.cd_vert_mask_offset); \
}
#endif
#define BKE_pbvh_vertex_iter_end \
} \

View File

@@ -25,6 +25,7 @@ set(INC
../blenloader
../blentranslation
../bmesh
../trimesh
../depsgraph
../draw
../functions
@@ -207,6 +208,7 @@ set(SRC
intern/particle_distribute.c
intern/particle_system.c
intern/pbvh.c
intern/pbvh_trimesh.c
intern/pbvh_bmesh.c
intern/pointcache.c
intern/pointcloud.c
@@ -422,6 +424,7 @@ set(LIB
bf_blenloader
bf_blentranslation
bf_bmesh
bf_trimesh
bf_depsgraph
bf_draw
bf_functions

View File

@@ -1039,6 +1039,7 @@ CCGError ccgSubSurf_updateToFaces(CCGSubSurf *ss, int lvl, CCGFace **effectedF,
* and vertices, for multires displacements */
CCGError ccgSubSurf_stitchFaces(CCGSubSurf *ss, int lvl, CCGFace **effectedF, int numEffectedF)
{
return eCCGError_None;
CCGVert **effectedV;
CCGEdge **effectedE;
int numEffectedV, numEffectedE, freeF;

View File

@@ -86,7 +86,7 @@
# define ASSERT_IS_VALID_MESH(mesh)
#endif
static ThreadRWMutex loops_cache_lock = PTHREAD_RWLOCK_INITIALIZER;
static ThreadRWMutex loops_cache_lock = BLI_RWLOCK_INITIALIZER;
static void mesh_init_origspace(Mesh *mesh);
static void editbmesh_calc_modifier_final_normals(Mesh *mesh_final,
@@ -912,7 +912,7 @@ static void mesh_calc_modifiers(struct Depsgraph *depsgraph,
const bool has_multires = BKE_sculpt_multires_active(scene, ob) != NULL;
bool multires_applied = false;
const bool sculpt_mode = ob->mode & OB_MODE_SCULPT && ob->sculpt && !use_render;
const bool sculpt_dyntopo = (sculpt_mode && ob->sculpt->bm) && !use_render;
const bool sculpt_dyntopo = (sculpt_mode && (ob->sculpt->bm || ob->sculpt->tm)) && !use_render;
/* Modifier evaluation contexts for different types of modifiers. */
ModifierApplyFlag apply_render = use_render ? MOD_APPLY_RENDER : 0;

View File

@@ -422,7 +422,7 @@ void BKE_animdata_copy_id_action(Main *bmain, ID *id)
void BKE_animdata_duplicate_id_action(struct Main *bmain,
struct ID *id,
const eDupli_ID_Flags duplicate_flags)
const uint duplicate_flags)
{
if (duplicate_flags & USER_DUP_ACT) {
animdata_copy_id_action(bmain, id, true, (duplicate_flags & USER_DUP_LINKED_ID) != 0);

View File

@@ -479,8 +479,8 @@ static Collection *collection_duplicate_recursive(Main *bmain,
Collection *BKE_collection_duplicate(Main *bmain,
Collection *parent,
Collection *collection,
eDupli_ID_Flags duplicate_flags,
eLibIDDuplicateFlags duplicate_options)
uint duplicate_flags,
uint duplicate_options)
{
const bool is_subprocess = (duplicate_options & LIB_ID_DUPLICATE_IS_SUBPROCESS) != 0;

View File

@@ -39,6 +39,7 @@
#include "BLI_math.h"
#include "BLI_math_color_blend.h"
#include "BLI_mempool.h"
#include "BLI_threadsafe_mempool.h"
#include "BLI_path_util.h"
#include "BLI_string.h"
#include "BLI_string_utils.h"
@@ -58,6 +59,7 @@
#include "BLO_read_write.h"
#include "bmesh.h"
#include "trimesh.h"
#include "CLG_log.h"
@@ -3454,25 +3456,25 @@ void CustomData_bmesh_do_versions_update_active_layers(CustomData *fdata, Custom
}
}
void CustomData_bmesh_init_pool(CustomData *data, int totelem, const char htype)
void CustomData_trimesh_init_pool(TM_TriMesh *tm, CustomData *data, int totelem, const char htype)
{
int chunksize;
/* Dispose old pools before calling here to avoid leaks */
BLI_assert(data->pool == NULL);
BLI_assert(data->tpool == NULL);
switch (htype) {
case BM_VERT:
chunksize = bm_mesh_chunksize_default.totvert;
case TM_VERTEX:
chunksize = 512;
break;
case BM_EDGE:
chunksize = bm_mesh_chunksize_default.totedge;
case TM_EDGE:
chunksize = 1024;
break;
case BM_LOOP:
chunksize = bm_mesh_chunksize_default.totloop;
case TM_LOOP:
chunksize = 2048;
break;
case BM_FACE:
chunksize = bm_mesh_chunksize_default.totface;
case TM_TRI:
chunksize = 512;
break;
default:
BLI_assert(0);
@@ -3482,10 +3484,137 @@ void CustomData_bmesh_init_pool(CustomData *data, int totelem, const char htype)
/* If there are no layers, no pool is needed just yet */
if (data->totlayer) {
data->pool = BLI_mempool_create(data->totsize, totelem, chunksize, BLI_MEMPOOL_NOP);
data->tpool = BLI_safepool_create(data->totsize, chunksize, tm->maxthread);
}
}
//XXX original code got axed in. . .copy/paste error? anyway, restore it from GIT later
void CustomData_bmesh_init_pool(CustomData *data, int totelem, const char htype)
{
int chunksize;
/* Dispose old pools before calling here to avoid leaks */
BLI_assert(data->tpool == NULL);
switch (htype) {
case BM_VERT:
chunksize = 512;
break;
case BM_EDGE:
chunksize = 1024;
break;
case BM_LOOP:
chunksize = 2048;
break;
case BM_FACE:
chunksize = 512;
break;
default:
BLI_assert(0);
chunksize = 512;
break;
}
/* If there are no layers, no pool is needed just yet */
if (data->totlayer) {
data->tpool = BLI_safepool_create(data->totsize, chunksize, 1);
}
}
bool CustomData_trimesh_merge(const CustomData *source,
CustomData *dest,
CustomDataMask mask,
eCDAllocType alloctype,
TM_TriMesh *bm,
const char htype)
{
TMElement *h;
TM_TriMeshIter iter;
CustomData destold;
void *tmp;
int iter_type;
int totelem;
if (CustomData_number_of_layers_typemask(source, mask) == 0) {
return false;
}
/* copy old layer description so that old data can be copied into
* the new allocation */
destold = *dest;
if (destold.layers) {
destold.layers = MEM_dupallocN(destold.layers);
}
if (CustomData_merge(source, dest, mask, alloctype, 0) == false) {
if (destold.layers) {
MEM_freeN(destold.layers);
}
return false;
}
switch (htype) {
case TM_VERTEX:
iter_type = TM_VERTS_OF_MESH;
totelem = bm->totvert;
break;
case TM_EDGE:
iter_type = TM_EDGES_OF_MESH;
totelem = bm->totedge;
break;
case BM_LOOP:
iter_type = -1;
totelem = bm->tottri*3;
break;
case BM_FACE:
iter_type = TM_TRIS_OF_MESH;
totelem = bm->tottri;
break;
default: /* should never happen */
BLI_assert(!"invalid type given");
iter_type = TM_VERTS_OF_MESH;
totelem = bm->totvert;
break;
}
dest->tpool = NULL;
CustomData_trimesh_init_pool(bm, dest, totelem, htype);
if (iter_type != -1) {
/*ensure all current elements follow new customdata layout*/
TM_ITER_MESH (h, &iter, bm, iter_type) {
tmp = NULL;
CustomData_bmesh_copy_data(&destold, dest, h->customdata, &tmp);
CustomData_bmesh_free_block(&destold, &h->customdata);
h->customdata = tmp;
}
}
else {
TMFace *f;
/*ensure all current elements follow new customdata layout*/
TM_ITER_MESH (f, &iter, bm, BM_FACES_OF_MESH) {
for (int i=0; i<3; i++) {
TMLoopData *l = TM_GET_TRI_LOOP(f, i);
tmp = NULL;
CustomData_bmesh_copy_data(&destold, dest, l->customdata, &tmp);
CustomData_bmesh_free_block(&destold, &l->customdata);
l->customdata = tmp;
}
}
}
if (destold.tpool) {
BLI_safepool_destroy(destold.tpool);
}
if (destold.layers) {
MEM_freeN(destold.layers);
}
return true;
}
bool CustomData_bmesh_merge(const CustomData *source,
CustomData *dest,
CustomDataMask mask,
@@ -3538,7 +3667,7 @@ bool CustomData_bmesh_merge(const CustomData *source,
break;
}
dest->pool = NULL;
dest->tpool = NULL;
CustomData_bmesh_init_pool(dest, totelem, htype);
if (iter_type != BM_LOOPS_OF_FACE) {
@@ -3569,8 +3698,8 @@ bool CustomData_bmesh_merge(const CustomData *source,
}
}
if (destold.pool) {
BLI_mempool_destroy(destold.pool);
if (destold.tpool) {
BLI_safepool_destroy(destold.tpool);
}
if (destold.layers) {
MEM_freeN(destold.layers);
@@ -3596,7 +3725,7 @@ void CustomData_bmesh_free_block(CustomData *data, void **block)
}
if (data->totsize) {
BLI_mempool_free(data->pool, *block);
BLI_safepool_free(data->tpool, *block);
}
*block = NULL;
@@ -3631,7 +3760,7 @@ static void CustomData_bmesh_alloc_block(CustomData *data, void **block)
}
if (data->totsize > 0) {
*block = BLI_mempool_alloc(data->pool);
*block = BLI_safepool_alloc(data->tpool);
}
else {
*block = NULL;

View File

@@ -623,7 +623,7 @@ ID *BKE_id_copy(Main *bmain, const ID *id)
* Invokes the appropriate copy method for the block and returns the result in
* newid, unless test. Returns true if the block can be copied.
*/
ID *BKE_id_copy_for_duplicate(Main *bmain, ID *id, const eDupli_ID_Flags duplicate_flags)
ID *BKE_id_copy_for_duplicate(Main *bmain, ID *id, const uint duplicate_flags)
{
if (id == NULL) {
return id;

View File

@@ -18,6 +18,8 @@
* \ingroup bke
*/
#define NOMINMAX
#include "DNA_mesh_types.h"
#include "DNA_meshdata_types.h"
#include "DNA_object_types.h"
@@ -87,25 +89,50 @@ static void reserve_hash_maps(const Mesh *mesh,
MutableSpan<EdgeMap> edge_maps)
{
const int totedge_guess = std::max(keep_existing_edges ? mesh->totedge : 0, mesh->totpoly * 2);
parallel_for_each(
edge_maps, [&](EdgeMap &edge_map) { edge_map.reserve(totedge_guess / edge_maps.size()); });
#ifndef __clang__
parallel_for_each(edge_maps,
[&](EdgeMap &edge_map) {
#else
int ilen = edge_maps.size();
for (int i = 0; i < ilen; i++) {
EdgeMap &edge_map = edge_maps[i];
#endif
edge_map.reserve(totedge_guess / edge_maps.size());
}
#ifndef __clang__
);
#endif
}
static void add_existing_edges_to_hash_maps(Mesh *mesh,
MutableSpan<EdgeMap> edge_maps,
uint32_t parallel_mask)
{
#ifndef __clang__
/* Assume existing edges are valid. */
parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
const int task_index = &edge_map - &edge_maps[0];
for (const MEdge &edge : Span(mesh->medge, mesh->totedge)) {
OrderedEdge ordered_edge{edge.v1, edge.v2};
/* Only add the edge when it belongs into this map. */
if (task_index == (parallel_mask & ordered_edge.hash2())) {
edge_map.add_new(ordered_edge, {&edge});
}
}
});
parallel_for_each(edge_maps,
[&](EdgeMap &edge_map) {
#else
int ilen = edge_maps.size();
for (int i = 0; i < ilen; i++) {
EdgeMap &edge_map = edge_maps[i];
#endif
const int task_index = &edge_map - &edge_maps[0];
for (const MEdge &edge : Span(mesh->medge, mesh->totedge)) {
OrderedEdge ordered_edge{edge.v1, edge.v2};
/* Only add the edge when it belongs into this map. */
if (task_index == (parallel_mask & ordered_edge.hash2())) {
edge_map.add_new(ordered_edge, {&edge});
}
}
}
#ifndef __clang__
);
#endif
}
static void add_polygon_edges_to_hash_maps(Mesh *mesh,
@@ -113,24 +140,36 @@ static void add_polygon_edges_to_hash_maps(Mesh *mesh,
uint32_t parallel_mask)
{
const Span<MLoop> loops{mesh->mloop, mesh->totloop};
parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
const int task_index = &edge_map - &edge_maps[0];
for (const MPoly &poly : Span(mesh->mpoly, mesh->totpoly)) {
Span<MLoop> poly_loops = loops.slice(poly.loopstart, poly.totloop);
const MLoop *prev_loop = &poly_loops.last();
for (const MLoop &next_loop : poly_loops) {
/* Can only be the same when the mesh data is invalid. */
if (prev_loop->v != next_loop.v) {
OrderedEdge ordered_edge{prev_loop->v, next_loop.v};
/* Only add the edge when it belongs into this map. */
if (task_index == (parallel_mask & ordered_edge.hash2())) {
edge_map.lookup_or_add(ordered_edge, {nullptr});
}
}
prev_loop = &next_loop;
}
}
});
#ifndef __clang__
parallel_for_each(edge_maps,
[&](EdgeMap &edge_map) {
#else
int ilen = edge_maps.size();
for (int i = 0; i < ilen; i++) {
EdgeMap &edge_map = edge_maps[i];
#endif
const int task_index = &edge_map - &edge_maps[0];
for (const MPoly &poly : Span(mesh->mpoly, mesh->totpoly)) {
Span<MLoop> poly_loops = loops.slice(poly.loopstart, poly.totloop);
const MLoop *prev_loop = &poly_loops.last();
for (const MLoop &next_loop : poly_loops) {
/* Can only be the same when the mesh data is invalid. */
if (prev_loop->v != next_loop.v) {
OrderedEdge ordered_edge{prev_loop->v, next_loop.v};
/* Only add the edge when it belongs into this map. */
if (task_index == (parallel_mask & ordered_edge.hash2())) {
edge_map.lookup_or_add(ordered_edge, {nullptr});
}
}
prev_loop = &next_loop;
}
}
}
#ifndef __clang__
);
#endif
}
static void serialize_and_initialize_deduplicated_edges(MutableSpan<EdgeMap> edge_maps,
@@ -146,27 +185,38 @@ static void serialize_and_initialize_deduplicated_edges(MutableSpan<EdgeMap> edg
edge_index_offsets[i + 1] = edge_index_offsets[i] + edge_maps[i].size();
}
parallel_for_each(edge_maps, [&](EdgeMap &edge_map) {
const int task_index = &edge_map - &edge_maps[0];
#ifndef __clang__
parallel_for_each(edge_maps,
[&](EdgeMap &edge_map) {
#else
int ilen = edge_maps.size();
int new_edge_index = edge_index_offsets[task_index];
for (EdgeMap::MutableItem item : edge_map.items()) {
MEdge &new_edge = new_edges[new_edge_index];
const MEdge *orig_edge = item.value.original_edge;
if (orig_edge != nullptr) {
/* Copy values from original edge. */
new_edge = *orig_edge;
}
else {
/* Initialize new edge. */
new_edge.v1 = item.key.v_low;
new_edge.v2 = item.key.v_high;
new_edge.flag = new_edge_flag;
}
item.value.index = new_edge_index;
new_edge_index++;
}
});
for (int i = 0; i < ilen; i++) {
EdgeMap &edge_map = edge_maps[i];
#endif
const int task_index = &edge_map - &edge_maps[0];
int new_edge_index = edge_index_offsets[task_index];
for (EdgeMap::MutableItem item : edge_map.items()) {
MEdge &new_edge = new_edges[new_edge_index];
const MEdge *orig_edge = item.value.original_edge;
if (orig_edge != nullptr) {
/* Copy values from original edge. */
new_edge = *orig_edge;
}
else {
/* Initialize new edge. */
new_edge.v1 = item.key.v_low;
new_edge.v2 = item.key.v_high;
new_edge.flag = new_edge_flag;
}
item.value.index = new_edge_index;
new_edge_index++;
}
}
#ifndef __clang__
);
#endif
}
static void update_edge_indices_in_poly_loops(Mesh *mesh,
@@ -174,31 +224,46 @@ static void update_edge_indices_in_poly_loops(Mesh *mesh,
uint32_t parallel_mask)
{
const MutableSpan<MLoop> loops{mesh->mloop, mesh->totloop};
parallel_for(IndexRange(mesh->totpoly), 100, [&](IndexRange range) {
for (const int poly_index : range) {
MPoly &poly = mesh->mpoly[poly_index];
MutableSpan<MLoop> poly_loops = loops.slice(poly.loopstart, poly.totloop);
#ifndef __clang__
parallel_for(IndexRange(mesh->totpoly),
100,
[&](IndexRange range) {
#else
int ilen = edge_maps.size();
MLoop *prev_loop = &poly_loops.last();
for (MLoop &next_loop : poly_loops) {
int edge_index;
if (prev_loop->v != next_loop.v) {
OrderedEdge ordered_edge{prev_loop->v, next_loop.v};
/* Double lookup: First find the map that contains the edge, then lookup the edge. */
const EdgeMap &edge_map = edge_maps[parallel_mask & ordered_edge.hash2()];
edge_index = edge_map.lookup(ordered_edge).index;
}
else {
/* This is an invalid edge; normally this does not happen in Blender,
* but it can be part of an imported mesh with invalid geometry. See
* T76514. */
edge_index = 0;
}
prev_loop->e = edge_index;
prev_loop = &next_loop;
}
}
});
for (int i = 0; i < ilen; i++) {
const EdgeMap &edge_map = edge_maps[i];
IndexRange range(i);
#endif
for (const int poly_index : range) {
MPoly &poly = mesh->mpoly[poly_index];
MutableSpan<MLoop> poly_loops = loops.slice(poly.loopstart, poly.totloop);
MLoop *prev_loop = &poly_loops.last();
for (MLoop &next_loop : poly_loops) {
int edge_index;
if (prev_loop->v != next_loop.v) {
OrderedEdge ordered_edge{prev_loop->v, next_loop.v};
/* Double lookup: First find the map that contains the edge, then lookup the
* edge. */
const EdgeMap &edge_map = edge_maps[parallel_mask & ordered_edge.hash2()];
edge_index = edge_map.lookup(ordered_edge).index;
}
else {
/* This is an invalid edge; normally this does not happen in Blender,
* but it can be part of an imported mesh with invalid geometry. See
* T76514. */
edge_index = 0;
}
prev_loop->e = edge_index;
prev_loop = &next_loop;
}
}
}
#ifndef __clang__
);
#endif
}
static int get_parallel_maps_count(const Mesh *mesh)
@@ -215,7 +280,20 @@ static int get_parallel_maps_count(const Mesh *mesh)
static void clear_hash_tables(MutableSpan<EdgeMap> edge_maps)
{
parallel_for_each(edge_maps, [](EdgeMap &edge_map) { edge_map.clear(); });
#ifndef __clang__
parallel_for_each(edge_maps,
[](EdgeMap &edge_map) {
#else
int ilen = edge_maps.size();
for (int i = 0; i < ilen; i++) {
EdgeMap &edge_map = edge_maps[i];
#endif
edge_map.clear();
}
#ifndef __clang__
);
#endif
}
} // namespace blender::bke::calc_edges

View File

@@ -1799,8 +1799,8 @@ void BKE_object_transform_copy(Object *ob_tar, const Object *ob_src)
*/
Object *BKE_object_duplicate(Main *bmain,
Object *ob,
eDupli_ID_Flags dupflag,
const eLibIDDuplicateFlags duplicate_options)
uint dupflag,
const uint duplicate_options)
{
const bool is_subprocess = (duplicate_options & LIB_ID_DUPLICATE_IS_SUBPROCESS) != 0;

View File

@@ -76,6 +76,7 @@
#include "BLO_read_write.h"
#include "bmesh.h"
#include "trimesh.h"
static void palette_init_data(ID *id)
{
@@ -1083,7 +1084,9 @@ bool BKE_paint_ensure(ToolSettings *ts, struct Paint **r_paint)
paint = &ts->imapaint.paint;
}
paint->flags |= PAINT_SHOW_BRUSH;
if (paint) {
paint->flags |= PAINT_SHOW_BRUSH;
}
*r_paint = paint;
@@ -1199,6 +1202,17 @@ bool paint_is_bmesh_face_hidden(BMFace *f)
return false;
}
/* Return true if all vertices in the face are visible, false otherwise */
bool paint_is_trimesh_face_hidden(TMFace *f)
{
bool ret = f->v1->flag & TM_ELEM_HIDDEN;
ret = ret || (f->v2->flag & TM_ELEM_HIDDEN);
ret = ret || (f->v3->flag & TM_ELEM_HIDDEN);
return ret;
}
float paint_grid_paint_mask(const GridPaintMask *gpm, uint level, uint x, uint y)
{
int factor = BKE_ccg_factor(level, gpm->level);
@@ -1298,6 +1312,34 @@ void BKE_sculptsession_free_vwpaint_data(struct SculptSession *ss)
MEM_SAFE_FREE(gmap->poly_map_mem);
}
/* Write out the sculpt dynamic-topology BMesh to the Mesh */
static void sculptsession_tm_to_me_update_data_only(Object *ob, bool reorder)
{
SculptSession *ss = ob->sculpt;
if (ss->tm) {
if (ob->data) {
TM_TriMeshIter iter;
TMFace *f;
TM_tri_iternew(ss->tm, &iter);
f = TM_iterstep(&iter);
for (; f; f = TM_iterstep(&iter)) {
TM_elem_flag_set(f, TRIMESH_SMOOTH, ss->bm_smooth_shading);
}
//if (reorder) {
// BM_log_mesh_elems_reorder(ss->tm, ss->tm_log);
//}
TM_mesh_bm_to_me(NULL,
ss->tm,
ob->data,
(&(struct TMeshToMeshParams){
.calc_object_remap = false,
}));
}
}
}
/**
* Write out the sculpt dynamic-topology #BMesh to the #Mesh.
*/
@@ -1336,6 +1378,17 @@ void BKE_sculptsession_bm_to_me(Object *ob, bool reorder)
}
}
void BKE_sculptsession_tm_to_me(Object *ob, bool reorder)
{
if (ob && ob->sculpt) {
sculptsession_tm_to_me_update_data_only(ob, reorder);
/* Ensure the objects evaluated mesh doesn't hold onto arrays
* now realloc'd in the mesh T34473. */
DEG_id_tag_update(&ob->id, ID_RECALC_GEOMETRY);
}
}
static void sculptsession_free_pbvh(Object *object)
{
SculptSession *ss = object->sculpt;
@@ -1387,14 +1440,43 @@ void BKE_sculptsession_bm_to_me_for_render(Object *object)
}
}
void BKE_sculptsession_tm_to_me_for_render(Object *object)
{
if (object && object->sculpt) {
if (object->sculpt->tm) {
/* Ensure no points to old arrays are stored in DM
*
* Apparently, we could not use DEG_id_tag_update
* here because this will lead to the while object
* surface to disappear, so we'll release DM in place.
*/
BKE_object_free_derived_caches(object);
sculptsession_tm_to_me_update_data_only(object, false);
/* In contrast with sculptsession_bm_to_me no need in
* DAG tag update here - derived mesh was freed and
* old pointers are nowhere stored.
*/
}
}
}
void BKE_sculptsession_free(Object *ob)
{
if (ob && ob->sculpt) {
SculptSession *ss = ob->sculpt;
if (ss->bm) {
if (ss->tm || ss->bm) {
#ifdef WITH_TRIMESH
BKE_sculptsession_tm_to_me(ob, true);
TMesh_free(ss->tm);
ss->tm = NULL;
#else
BKE_sculptsession_bm_to_me(ob, true);
BM_mesh_free(ss->bm);
ss->bm = NULL;
#endif
}
sculptsession_free_pbvh(ob);
@@ -1439,7 +1521,7 @@ MultiresModifierData *BKE_sculpt_multires_active(Scene *scene, Object *ob)
ModifierData *md;
VirtualModifierData virtualModifierData;
if (ob->sculpt && ob->sculpt->bm) {
if (ob->sculpt && (ob->sculpt->bm || ob->sculpt->tm)) {
/* can't combine multires and dynamic topology */
return NULL;
}
@@ -1481,7 +1563,7 @@ static bool sculpt_modifiers_active(Scene *scene, Sculpt *sd, Object *ob)
Mesh *me = (Mesh *)ob->data;
VirtualModifierData virtualModifierData;
if (ob->sculpt->bm || BKE_sculpt_multires_active(scene, ob)) {
if (BKE_sculpt_multires_active(scene, ob) || ob->sculpt->bm || ob->sculpt->tm) {
return false;
}
@@ -1969,12 +2051,24 @@ void BKE_sculpt_sync_face_set_visibility(struct Mesh *mesh, struct SubdivCCG *su
static PBVH *build_pbvh_for_dynamic_topology(Object *ob)
{
PBVH *pbvh = BKE_pbvh_new();
BKE_pbvh_build_bmesh(pbvh,
ob->sculpt->bm,
#ifdef WITH_TRIMESH
BKE_pbvh_build_trimesh(pbvh,
ob->sculpt->tm,
ob->sculpt->bm_smooth_shading,
ob->sculpt->bm_log,
ob->sculpt->tm_log,
ob->sculpt->cd_vert_node_offset,
ob->sculpt->cd_face_node_offset);
#else
BKE_pbvh_build_bmesh(pbvh,
ob->sculpt->bm,
ob->sculpt->bm_smooth_shading,
ob->sculpt->bm_log,
ob->sculpt->cd_vert_node_offset,
ob->sculpt->cd_face_node_offset,
ob->sculpt->cd_origco_offset,
ob->sculpt->cd_origno_offset);
#endif
pbvh_show_mask_set(pbvh, ob->sculpt->show_mask);
pbvh_show_face_sets_set(pbvh, false);
return pbvh;
@@ -2069,8 +2163,8 @@ PBVH *BKE_sculpt_object_pbvh_ensure(Depsgraph *depsgraph, Object *ob)
return pbvh;
}
if (ob->sculpt->bm != NULL) {
/* Sculpting on a BMesh (dynamic-topology) gets a special PBVH. */
if (ob->sculpt->tm != NULL || ob->sculpt->bm != NULL) {
/* Sculpting on a TriMesh (dynamic-topology) gets a special PBVH. */
pbvh = build_pbvh_for_dynamic_topology(ob);
}
else {

View File

@@ -42,6 +42,7 @@
#include "GPU_buffers.h"
#include "bmesh.h"
#include "trimesh.h"
#include "atomic_ops.h"
@@ -49,7 +50,7 @@
#include <limits.h>
#define LEAF_LIMIT 10000
#define LEAF_LIMIT 600
//#define PERFCNTRS
@@ -595,6 +596,16 @@ void BKE_pbvh_build_mesh(PBVH *pbvh,
BB_reset(&cb);
for (int i = 0; i < totvert; i++) {
MVert *mv = verts + i;
for (int j = 0; j < 3; j++) {
if (isnan(mv->co[j])) {
mv->co[j] = 0.0f;
}
}
}
/* For each face, store the AABB and the AABB centroid */
prim_bbc = MEM_mallocN(sizeof(BBC) * looptri_num, "prim_bbc");
@@ -692,6 +703,27 @@ void BKE_pbvh_free(PBVH *pbvh)
if (node->face_vert_indices) {
MEM_freeN((void *)node->face_vert_indices);
}
if (node->tm_orco) {
MEM_freeN(node->tm_orco);
}
if (node->tm_ortri) {
MEM_freeN(node->tm_ortri);
}
if (node->tm_unique_verts) {
BLI_table_gset_free(node->tm_unique_verts);
}
if (node->tm_other_verts) {
BLI_table_gset_free(node->tm_other_verts);
}
if (node->tm_faces) {
BLI_gset_free(node->tm_faces, NULL);
}
if (node->bm_faces) {
BLI_gset_free(node->bm_faces, NULL);
}
@@ -1292,6 +1324,10 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata,
node->draw_buffers = GPU_pbvh_bmesh_buffers_build(pbvh->flags &
PBVH_DYNTOPO_SMOOTH_SHADING);
break;
case PBVH_TRIMESH:
node->draw_buffers = GPU_pbvh_trimesh_buffers_build(pbvh->flags &
PBVH_DYNTOPO_SMOOTH_SHADING);
break;
}
}
@@ -1328,7 +1364,17 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata,
node->bm_faces,
node->bm_unique_verts,
node->bm_other_verts,
update_flags);
update_flags,
pbvh->cd_vert_node_offset);
break;
case PBVH_TRIMESH:
GPU_pbvh_trimesh_buffers_update(node->draw_buffers,
pbvh->tm,
node->tm_faces,
node->tm_unique_verts,
node->tm_other_verts,
update_flags,
pbvh->cd_vert_node_offset);
break;
}
}
@@ -1336,10 +1382,17 @@ static void pbvh_update_draw_buffer_cb(void *__restrict userdata,
static void pbvh_update_draw_buffers(PBVH *pbvh, PBVHNode **nodes, int totnode, int update_flag)
{
if ((update_flag & PBVH_RebuildDrawBuffers) || ELEM(pbvh->type, PBVH_GRIDS, PBVH_BMESH)) {
if ((update_flag & PBVH_RebuildDrawBuffers) ||
ELEM(pbvh->type, PBVH_GRIDS, PBVH_TRIMESH, PBVH_BMESH)) {
/* Free buffers uses OpenGL, so not in parallel. */
for (int n = 0; n < totnode; n++) {
PBVHNode *node = nodes[n];
if (node->flag & PBVH_Delete) {
printf("corrupted node! %p %d\n", node, node->flag);
return;
}
if (node->flag & PBVH_RebuildDrawBuffers) {
GPU_pbvh_buffers_free(node->draw_buffers);
node->draw_buffers = NULL;
@@ -1352,6 +1405,9 @@ static void pbvh_update_draw_buffers(PBVH *pbvh, PBVHNode **nodes, int totnode,
else if (pbvh->type == PBVH_BMESH) {
GPU_pbvh_bmesh_buffers_update_free(node->draw_buffers);
}
else if (pbvh->type == PBVH_TRIMESH) {
GPU_pbvh_trimesh_buffers_update_free(node->draw_buffers);
}
}
}
}
@@ -1527,6 +1583,33 @@ static void pbvh_bmesh_node_visibility_update(PBVHNode *node)
BKE_pbvh_node_fully_hidden_set(node, true);
}
static void pbvh_trimesh_node_visibility_update(PBVHNode *node)
{
TableGSet *unique, *other;
TMVert *v;
unique = BKE_pbvh_trimesh_node_unique_verts(node);
other = BKE_pbvh_trimesh_node_other_verts(node);
TMS_ITER (v, unique) {
if (!TM_elem_flag_test(v, TM_ELEM_HIDDEN)) {
BKE_pbvh_node_fully_hidden_set(node, false);
return;
}
}
TMS_ITER_END
TMS_ITER (v, other) {
if (!TM_elem_flag_test(v, TM_ELEM_HIDDEN)) {
BKE_pbvh_node_fully_hidden_set(node, false);
return;
}
}
TMS_ITER_END;
BKE_pbvh_node_fully_hidden_set(node, true);
}
static void pbvh_update_visibility_task_cb(void *__restrict userdata,
const int n,
const TaskParallelTLS *__restrict UNUSED(tls))
@@ -1546,6 +1629,9 @@ static void pbvh_update_visibility_task_cb(void *__restrict userdata,
case PBVH_BMESH:
pbvh_bmesh_node_visibility_update(node);
break;
case PBVH_TRIMESH:
pbvh_trimesh_node_visibility_update(node);
break;
}
node->flag &= ~PBVH_UpdateVisibility;
}
@@ -1660,6 +1746,9 @@ bool BKE_pbvh_has_faces(const PBVH *pbvh)
if (pbvh->type == PBVH_BMESH) {
return (pbvh->bm->totface != 0);
}
else if (pbvh->type == PBVH_TRIMESH) {
return pbvh->tm->tottri > 0;
}
return (pbvh->totprim != 0);
}
@@ -1713,6 +1802,12 @@ BMesh *BKE_pbvh_get_bmesh(PBVH *pbvh)
return pbvh->bm;
}
TM_TriMesh *BKE_pbvh_get_trimesh(PBVH *bvh)
{
BLI_assert(bvh->type == PBVH_TRIMESH);
return bvh->tm;
}
/***************************** Node Access ***********************************/
void BKE_pbvh_node_mark_update(PBVHNode *node)
@@ -1848,6 +1943,15 @@ void BKE_pbvh_node_num_verts(PBVH *pbvh, PBVHNode *node, int *r_uniquevert, int
*r_uniquevert = tot;
}
break;
case PBVH_TRIMESH:
tot = node->tm_unique_verts->length;
if (r_totvert) {
*r_totvert = tot + node->tm_other_verts->length;
}
if (r_uniquevert) {
*r_uniquevert = tot;
}
break;
}
}
@@ -1895,6 +1999,23 @@ void BKE_pbvh_node_get_grids(PBVH *pbvh,
*r_griddata = NULL;
}
break;
case PBVH_TRIMESH:
if (r_grid_indices) {
*r_grid_indices = NULL;
}
if (r_totgrid) {
*r_totgrid = 0;
}
if (r_maxgrid) {
*r_maxgrid = 0;
}
if (r_gridsize) {
*r_gridsize = 0;
}
if (r_griddata) {
*r_griddata = NULL;
}
break;
}
}
@@ -2122,7 +2243,7 @@ static bool pbvh_faces_node_raycast(PBVH *pbvh,
const float ray_normal[3],
struct IsectRayPrecalc *isect_precalc,
float *depth,
int *r_active_vertex_index,
SculptIdx *r_active_vertex_index,
int *r_active_face_index,
float *r_face_normal)
{
@@ -2190,7 +2311,7 @@ static bool pbvh_grids_node_raycast(PBVH *pbvh,
const float ray_normal[3],
struct IsectRayPrecalc *isect_precalc,
float *depth,
int *r_active_vertex_index,
SculptIdx *r_active_vertex_index,
int *r_active_grid_index,
float *r_face_normal)
{
@@ -2285,7 +2406,7 @@ bool BKE_pbvh_node_raycast(PBVH *pbvh,
const float ray_normal[3],
struct IsectRayPrecalc *isect_precalc,
float *depth,
int *active_vertex_index,
SculptIdx *active_vertex_index,
int *active_face_grid_index,
float *face_normal)
{
@@ -2321,7 +2442,7 @@ bool BKE_pbvh_node_raycast(PBVH *pbvh,
face_normal);
break;
case PBVH_BMESH:
BM_mesh_elem_index_ensure(pbvh->bm, BM_VERT);
// BM_mesh_elem_index_ensure(pbvh->bm, BM_VERT);
hit = pbvh_bmesh_node_raycast(node,
ray_start,
ray_normal,
@@ -2331,6 +2452,17 @@ bool BKE_pbvh_node_raycast(PBVH *pbvh,
active_vertex_index,
face_normal);
break;
case PBVH_TRIMESH:
// TM_mesh_elem_index_ensure(pbvh->tm, TM_VERTEX);
hit = pbvh_trimesh_node_raycast(node,
ray_start,
ray_normal,
isect_precalc,
depth,
use_origco,
active_vertex_index,
face_normal);
break;
}
return hit;
@@ -2565,6 +2697,10 @@ bool BKE_pbvh_node_find_nearest_to_ray(PBVH *pbvh,
hit = pbvh_bmesh_node_nearest_to_ray(
node, ray_start, ray_normal, depth, dist_sq, use_origco);
break;
case PBVH_TRIMESH:
hit = pbvh_trimesh_node_nearest_to_ray(
node, ray_start, ray_normal, depth, dist_sq, use_origco);
break;
}
return hit;
@@ -2646,6 +2782,9 @@ void BKE_pbvh_update_normals(PBVH *pbvh, struct SubdivCCG *subdiv_ccg)
if (pbvh->type == PBVH_BMESH) {
pbvh_bmesh_normals_update(nodes, totnode);
}
else if (pbvh->type == PBVH_TRIMESH) {
pbvh_trimesh_normals_update(nodes, totnode);
}
else if (pbvh->type == PBVH_FACES) {
pbvh_faces_update_normals(pbvh, nodes, totnode);
}
@@ -2726,7 +2865,7 @@ void BKE_pbvh_draw_cb(PBVH *pbvh,
for (int a = 0; a < totnode; a++) {
PBVHNode *node = nodes[a];
if (node->flag & PBVH_UpdateDrawBuffers) {
if (node->flag & PBVH_UpdateDrawBuffers && node->draw_buffers) {
/* Flush buffers uses OpenGL, so not in parallel. */
GPU_pbvh_buffers_update_flush(node->draw_buffers);
}
@@ -2873,9 +3012,11 @@ void BKE_pbvh_node_free_proxies(PBVHNode *node)
node->proxies[p].co = NULL;
}
MEM_freeN(node->proxies);
node->proxies = NULL;
if (node->proxies) {
MEM_freeN(node->proxies);
}
node->proxies = NULL;
node->proxy_count = 0;
}
@@ -2970,9 +3111,21 @@ void pbvh_vertex_iter_init(PBVH *pbvh, PBVHNode *node, PBVHVertexIter *vi, int m
BLI_gsetIterator_init(&vi->bm_unique_verts, node->bm_unique_verts);
BLI_gsetIterator_init(&vi->bm_other_verts, node->bm_other_verts);
vi->bm_vdata = &pbvh->bm->vdata;
vi->cd_origco_offset = pbvh->cd_origco_offset;
vi->cd_origno_offset = pbvh->cd_origno_offset;
vi->cd_vert_mask_offset = CustomData_get_offset(vi->bm_vdata, CD_PAINT_MASK);
}
if (pbvh->type == PBVH_TRIMESH) {
vi->ti = 0;
vi->tm_unique_verts = node->tm_unique_verts;
vi->tm_other_verts = node->tm_other_verts;
vi->tm_cur_set = vi->tm_unique_verts;
vi->tm_vdata = &pbvh->tm->vdata;
vi->cd_vert_mask_offset = CustomData_get_offset(vi->tm_vdata, CD_PAINT_MASK);
}
vi->gh = NULL;
if (vi->grids && mode == PBVH_ITER_UNIQUE) {
vi->grid_hidden = pbvh->grid_hidden;
@@ -2994,6 +3147,8 @@ bool pbvh_has_mask(PBVH *pbvh)
return (pbvh->vdata && CustomData_get_layer(pbvh->vdata, CD_PAINT_MASK));
case PBVH_BMESH:
return (pbvh->bm && (CustomData_get_offset(&pbvh->bm->vdata, CD_PAINT_MASK) != -1));
case PBVH_TRIMESH:
return (pbvh->tm && (CustomData_get_offset(&pbvh->tm->vdata, CD_PAINT_MASK) != -1));
}
return false;
@@ -3006,6 +3161,8 @@ bool pbvh_has_face_sets(PBVH *pbvh)
return (pbvh->pdata && CustomData_get_layer(pbvh->pdata, CD_SCULPT_FACE_SETS));
case PBVH_FACES:
return (pbvh->pdata && CustomData_get_layer(pbvh->pdata, CD_SCULPT_FACE_SETS));
case PBVH_TRIMESH:
return false;
case PBVH_BMESH:
return false;
}
@@ -3067,3 +3224,389 @@ void BKE_pbvh_respect_hide_set(PBVH *pbvh, bool respect_hide)
{
pbvh->respect_hide = respect_hide;
}
#ifdef PROXY_ADVANCED
// TODO: if this really works, make sure to pull the neighbor iterator out of sculpt.c and put it
// here
/* clang-format off */
# include "BKE_context.h"
# include "DNA_object_types.h"
# include "DNA_scene_types.h"
# include "../../editors/sculpt_paint/sculpt_intern.h"
/* clang-format on */
int checkalloc(void **data, int esize, int oldsize, int newsize, int emask, int umask)
{
if (!*data && (emask & umask)) {
*data = MEM_callocN(newsize * esize, "pbvh proxy vert arrays");
return emask;
}
// update channel if it already was allocated once, or is requested by umask
else if (newsize != oldsize && (*data || (emask & umask))) {
*data = MEM_reallocN(data, newsize * esize);
return emask;
}
return 0;
}
void BKE_pbvh_ensure_proxyarray_indexmap(PBVH *pbvh, PBVHNode *node, GHash *vert_node_map)
{
ProxyVertArray *p = &node->proxyverts;
int totvert = 0;
BKE_pbvh_node_num_verts(pbvh, node, &totvert, NULL);
bool update = !p->indexmap || p->size != totvert;
if (!update) {
return;
}
if (p->indexmap) {
BLI_ghash_free(p->indexmap, NULL, NULL);
}
GHash *gs = p->indexmap = BLI_ghash_ptr_new("BKE_pbvh_ensure_proxyarray_indexmap");
PBVHVertexIter vd;
int i = 0;
BKE_pbvh_vertex_iter_begin(pbvh, node, vd, PBVH_ITER_UNIQUE)
{
BLI_ghash_insert(gs, (void *)vd.index, (void *)i);
i++;
}
BKE_pbvh_vertex_iter_end;
}
bool pbvh_proxyarray_needs_update(PBVH *pbvh, PBVHNode *node, int mask)
{
ProxyVertArray *p = &node->proxyverts;
int totvert = 0;
BKE_pbvh_node_num_verts(pbvh, node, &totvert, NULL);
bool bad = p->size != totvert || !p->neighbors;
bad = bad || (p->datamask & mask) != mask;
bad = bad && totvert > 0;
return bad;
}
GHash *pbvh_build_vert_node_map(PBVH *pbvh, int mask)
{
GHash *vert_node_map = BLI_ghash_ptr_new("BKE_pbvh_ensure_proxyarrays");
for (int i = 0; i < pbvh->totnode; i++) {
PBVHVertexIter vd;
PBVHNode *node = pbvh->nodes + i;
BKE_pbvh_vertex_iter_begin(pbvh, node, vd, PBVH_ITER_UNIQUE)
{
BLI_ghash_insert(vert_node_map, (void *)vd.index, (void *)i);
}
BKE_pbvh_vertex_iter_end;
}
return vert_node_map;
}
void BKE_pbvh_ensure_proxyarrays(SculptSession *ss, PBVH *pbvh, int mask)
{
bool update = false;
for (int i = 0; i < pbvh->totnode; i++) {
if (pbvh_proxyarray_needs_update(pbvh, pbvh->nodes + i, mask)) {
update = true;
break;
}
}
if (!update) {
return;
}
GHash *vert_node_map = pbvh_build_vert_node_map(pbvh, mask);
for (int i = 0; i < pbvh->totnode; i++) {
BKE_pbvh_ensure_proxyarray_indexmap(pbvh, pbvh->nodes + i, vert_node_map);
}
for (int i = 0; i < pbvh->totnode; i++) {
BKE_pbvh_ensure_proxyarray(ss, pbvh, pbvh->nodes + i, mask, vert_node_map, false, false);
}
if (vert_node_map) {
BLI_ghash_free(vert_node_map, NULL, NULL);
}
}
void BKE_pbvh_ensure_proxyarray(SculptSession *ss,
PBVH *pbvh,
PBVHNode *node,
int mask,
GHash *vert_node_map,
bool check_indexmap,
bool force_update)
{
ProxyVertArray *p = &node->proxyverts;
if (check_indexmap) {
BKE_pbvh_ensure_proxyarray_indexmap(pbvh, node, vert_node_map);
}
GHash *gs = p->indexmap;
int totvert = 0;
BKE_pbvh_node_num_verts(pbvh, node, &totvert, NULL);
if (!totvert) {
return;
}
int updatemask = 0;
# define UPDATETEST(name, emask, esize) \
if (mask & emask) { \
updatemask |= checkalloc((void **)&p->name, esize, p->size, totvert, emask, mask); \
}
UPDATETEST(ownerco, PV_OWNERCO, sizeof(void *))
UPDATETEST(ownerno, PV_OWNERNO, sizeof(void *))
UPDATETEST(ownermask, PV_OWNERMASK, sizeof(void *))
UPDATETEST(ownercolor, PV_OWNERCOLOR, sizeof(void *))
UPDATETEST(co, PV_CO, sizeof(float) * 3)
UPDATETEST(no, PV_NO, sizeof(short) * 3)
UPDATETEST(fno, PV_NO, sizeof(float) * 3)
UPDATETEST(mask, PV_MASK, sizeof(float))
UPDATETEST(color, PV_COLOR, sizeof(float) * 4)
UPDATETEST(index, PV_INDEX, sizeof(int))
UPDATETEST(neighbors, PV_NEIGHBORS, sizeof(ProxyKey) * MAX_PROXY_NEIGHBORS)
p->size = totvert;
if (force_update) {
updatemask |= mask;
}
if (!updatemask) {
return;
}
p->datamask |= mask;
PBVHVertexIter vd;
int i = 0;
BKE_pbvh_vertex_iter_begin(pbvh, node, vd, PBVH_ITER_UNIQUE)
{
BLI_ghash_insert(gs, (void *)vd.index, (void *)i);
i++;
}
BKE_pbvh_vertex_iter_end;
i = 0;
BKE_pbvh_vertex_iter_begin(pbvh, node, vd, PBVH_ITER_UNIQUE)
{
if (updatemask & PV_OWNERCO) {
p->ownerco[i] = vd.co;
}
if (updatemask & PV_INDEX) {
p->index[i] = vd.index;
}
if (updatemask & PV_OWNERNO) {
p->ownerno[i] = vd.no;
}
if (updatemask & PV_NO) {
if (vd.no) {
copy_v3_v3_short(p->no[i], vd.no);
normal_short_to_float_v3(p->fno[i], vd.no);
}
else if (vd.fno) {
copy_v3_v3(p->fno[i], vd.fno);
normal_float_to_short_v3(p->no[i], p->fno[i]);
}
else {
zero_v3(p->fno[i]);
p->fno[i][2] = 1.0f;
normal_float_to_short_v3(p->no[i], p->fno[i]);
}
}
if (updatemask & PV_CO) {
copy_v3_v3(p->co[i], vd.co);
}
if (updatemask & PV_OWNERMASK) {
p->ownermask[i] = vd.mask;
}
if (updatemask & PV_MASK) {
p->mask[i] = vd.mask ? *vd.mask : 0.0f;
}
if (updatemask & PV_COLOR) {
if (vd.vcol) {
copy_v4_v4(p->color[i], vd.vcol->color);
}
}
if (updatemask & PV_NEIGHBORS) {
int j = 0;
SculptVertexNeighborIter ni;
SCULPT_VERTEX_NEIGHBORS_ITER_BEGIN (ss, vd.index, ni) {
if (j >= MAX_PROXY_NEIGHBORS - 1) {
break;
}
ProxyKey key;
int *pindex = (int *)BLI_ghash_lookup_p(gs, (void *)ni.index);
if (!pindex) {
if (vert_node_map) {
int *nindex = BLI_ghash_lookup_p(vert_node_map, (void *)ni.index);
if (!nindex) {
continue;
}
PBVHNode *node2 = pbvh->nodes + *nindex;
if (node2->proxyverts.indexmap) {
pindex = (int *)BLI_ghash_lookup_p(node2->proxyverts.indexmap, (void *)ni.index);
}
if (!pindex) {
continue;
}
key.node = (int)(node2 - pbvh->nodes);
key.pindex = *pindex;
}
else {
continue;
}
}
else {
key.node = (int)(node - pbvh->nodes);
key.pindex = *pindex;
}
p->neighbors[i][j++] = key;
}
SCULPT_VERTEX_NEIGHBORS_ITER_END(ni);
p->neighbors[i][j].node = -1;
}
i++;
}
BKE_pbvh_vertex_iter_end;
}
typedef struct GatherProxyThread {
PBVHNode **nodes;
PBVH *pbvh;
int mask;
} GatherProxyThread;
static void pbvh_load_proxyarray_exec(void *__restrict userdata,
const int n,
const TaskParallelTLS *__restrict tls)
{
GatherProxyThread *data = (GatherProxyThread *)userdata;
PBVHNode *node = data->nodes[n];
PBVHVertexIter vd;
ProxyVertArray *p = &node->proxyverts;
int i = 0;
int mask = p->datamask;
BKE_pbvh_ensure_proxyarray(NULL, data->pbvh, node, data->mask, NULL, false, true);
}
void BKE_pbvh_load_proxyarrays(PBVH *pbvh, PBVHNode **nodes, int totnode, int mask)
{
GatherProxyThread data = {.nodes = nodes, .pbvh = pbvh, .mask = mask};
mask = mask & ~PV_NEIGHBORS; // don't update neighbors in threaded code?
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BLI_task_parallel_range(0, totnode, &data, pbvh_load_proxyarray_exec, &settings);
}
static void pbvh_gather_proxyarray_exec(void *__restrict userdata,
const int n,
const TaskParallelTLS *__restrict tls)
{
GatherProxyThread *data = (GatherProxyThread *)userdata;
PBVHNode *node = data->nodes[n];
PBVHVertexIter vd;
ProxyVertArray *p = &node->proxyverts;
int i = 0;
int mask = p->datamask;
BKE_pbvh_vertex_iter_begin(data->pbvh, node, vd, PBVH_ITER_UNIQUE)
{
if (mask & PV_CO) {
copy_v3_v3(vd.co, p->co[i]);
}
if (vd.mask && (mask & PV_MASK)) {
*vd.mask = p->mask[i];
}
i++;
}
BKE_pbvh_vertex_iter_end;
}
void BKE_pbvh_gather_proxyarray(PBVH *pbvh, PBVHNode **nodes, int totnode)
{
GatherProxyThread data = {.nodes = nodes, .pbvh = pbvh};
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BLI_task_parallel_range(0, totnode, &data, pbvh_gather_proxyarray_exec, &settings);
}
void BKE_pbvh_free_proxyarray(PBVH *pbvh, PBVHNode *node)
{
ProxyVertArray *p = &node->proxyverts;
if (p->co)
MEM_freeN(p->co);
if (p->no)
MEM_freeN(p->no);
if (p->index)
MEM_freeN(p->index);
if (p->mask)
MEM_freeN(p->mask);
if (p->ownerco)
MEM_freeN(p->ownerco);
if (p->ownerno)
MEM_freeN(p->ownerno);
if (p->ownermask)
MEM_freeN(p->ownermask);
if (p->ownercolor)
MEM_freeN(p->ownercolor);
if (p->color)
MEM_freeN(p->color);
if (p->neighbors)
MEM_freeN(p->neighbors);
memset(p, 0, sizeof(*p));
}
void BKE_pbvh_update_proxyvert(PBVH *pbvh, PBVHNode *node, ProxyVertUpdateRec *rec)
{
}
ProxyVertArray *BKE_pbvh_get_proxyarrays(PBVH *pbvh, PBVHNode *node)
{
return &node->proxyverts;
}
#endif

View File

@@ -20,12 +20,15 @@
#include "MEM_guardedalloc.h"
#include "BLI_array.h"
#include "BLI_buffer.h"
#include "BLI_ghash.h"
#include "BLI_heap_simple.h"
#include "BLI_math.h"
#include "BLI_memarena.h"
#include "BLI_rand.h"
#include "BLI_utildefines.h"
#include "PIL_time.h"
#include "BKE_DerivedMesh.h"
#include "BKE_ccg.h"
@@ -36,17 +39,39 @@
#include "bmesh.h"
#include "pbvh_intern.h"
#define DYNTOPO_TIME_LIMIT 0.015
#define DYNTOPO_RUN_INTERVAL 0.01
#define DYNTOPO_USE_HEAP
#ifndef DYNTOPO_USE_HEAP
/* don't add edges into the queue multiple times */
# define USE_EDGEQUEUE_TAG
#endif
/* Avoid skinny faces */
#define USE_EDGEQUEUE_EVEN_SUBDIV
#ifdef USE_EDGEQUEUE_EVEN_SUBDIV
# include "BKE_global.h"
#endif
#ifdef WIN32
# include "crtdbg.h"
#endif
static void check_heap()
{
#ifdef WIN32
if (!_CrtCheckMemory()) {
printf("Memory corruption!");
_CrtDbgBreak();
}
#endif
}
/* Support for only operating on front-faces */
#define USE_EDGEQUEUE_FRONTFACE
/* don't add edges into the queue multiple times */
#define USE_EDGEQUEUE_TAG
/**
* Ensure we don't have dirty tags for the edge queue, and that they are left cleared.
* (slow, even for debug mode, so leave disabled for now).
@@ -200,7 +225,8 @@ static BMVert *bm_vert_hash_lookup_chain(GHash *deleted_verts, BMVert *v)
static void pbvh_bmesh_node_finalize(PBVH *pbvh,
const int node_index,
const int cd_vert_node_offset,
const int cd_face_node_offset)
const int cd_face_node_offset,
bool add_orco)
{
GSetIterator gs_iter;
PBVHNode *n = &pbvh->nodes[node_index];
@@ -251,19 +277,24 @@ static void pbvh_bmesh_node_finalize(PBVH *pbvh,
BKE_pbvh_node_mark_rebuild_draw(n);
BKE_pbvh_node_fully_hidden_set(n, !has_visible);
n->flag |= PBVH_UpdateNormals;
n->flag |= PBVH_UpdateNormals | PBVH_UpdateTopology;
if (add_orco) {
BKE_pbvh_bmesh_node_save_orig(pbvh->bm, n);
}
}
/* Recursively split the node if it exceeds the leaf_limit */
static void pbvh_bmesh_node_split(PBVH *pbvh, const BBC *bbc_array, int node_index)
static void pbvh_bmesh_node_split(
PBVH *pbvh, const BBC *bbc_array, int node_index, bool add_orco, int depth)
{
const int cd_vert_node_offset = pbvh->cd_vert_node_offset;
const int cd_face_node_offset = pbvh->cd_face_node_offset;
PBVHNode *n = &pbvh->nodes[node_index];
if (BLI_gset_len(n->bm_faces) <= pbvh->leaf_limit) {
if (depth > 6 || BLI_gset_len(n->bm_faces) <= pbvh->leaf_limit) {
/* Node limit not exceeded */
pbvh_bmesh_node_finalize(pbvh, node_index, cd_vert_node_offset, cd_face_node_offset);
pbvh_bmesh_node_finalize(pbvh, node_index, cd_vert_node_offset, cd_face_node_offset, add_orco);
return;
}
@@ -282,6 +313,10 @@ static void pbvh_bmesh_node_split(PBVH *pbvh, const BBC *bbc_array, int node_ind
const int axis = BB_widest_axis(&cb);
const float mid = (cb.bmax[axis] + cb.bmin[axis]) * 0.5f;
if (isnan(mid)) {
printf("NAN ERROR! %s\n", __func__);
}
/* Add two new child nodes */
const int children = pbvh->totnode;
n->children_offset = children;
@@ -309,7 +344,7 @@ static void pbvh_bmesh_node_split(PBVH *pbvh, const BBC *bbc_array, int node_ind
BLI_gset_insert(c2->bm_faces, f);
}
}
#if 0
/* Enforce at least one primitive in each node */
GSet *empty = NULL, *other;
if (BLI_gset_len(c1->bm_faces) == 0) {
@@ -320,6 +355,7 @@ static void pbvh_bmesh_node_split(PBVH *pbvh, const BBC *bbc_array, int node_ind
empty = c2->bm_faces;
other = c1->bm_faces;
}
if (empty) {
GSET_ITER (gs_iter, other) {
void *key = BLI_gsetIterator_getKey(&gs_iter);
@@ -328,7 +364,7 @@ static void pbvh_bmesh_node_split(PBVH *pbvh, const BBC *bbc_array, int node_ind
break;
}
}
#endif
/* Clear this node */
/* Mark this node's unique verts as unclaimed */
@@ -367,8 +403,8 @@ static void pbvh_bmesh_node_split(PBVH *pbvh, const BBC *bbc_array, int node_ind
n->flag &= ~PBVH_Leaf;
/* Recurse */
pbvh_bmesh_node_split(pbvh, bbc_array, children);
pbvh_bmesh_node_split(pbvh, bbc_array, children + 1);
pbvh_bmesh_node_split(pbvh, bbc_array, children, add_orco, depth + 1);
pbvh_bmesh_node_split(pbvh, bbc_array, children + 1, add_orco, depth + 1);
/* Array maybe reallocated, update current node pointer */
n = &pbvh->nodes[node_index];
@@ -385,6 +421,7 @@ static bool pbvh_bmesh_node_limit_ensure(PBVH *pbvh, int node_index)
{
GSet *bm_faces = pbvh->nodes[node_index].bm_faces;
const int bm_faces_size = BLI_gset_len(bm_faces);
if (bm_faces_size <= pbvh->leaf_limit) {
/* Node limit not exceeded */
return false;
@@ -413,7 +450,7 @@ static bool pbvh_bmesh_node_limit_ensure(PBVH *pbvh, int node_index)
/* Likely this is already dirty. */
pbvh->bm->elem_index_dirty |= BM_FACE;
pbvh_bmesh_node_split(pbvh, bbc_array, node_index);
pbvh_bmesh_node_split(pbvh, bbc_array, node_index, pbvh->nodes[node_index].bm_ortri != NULL, 0);
MEM_freeN(bbc_array);
@@ -476,12 +513,18 @@ BLI_INLINE int pbvh_bmesh_node_index_from_face(PBVH *pbvh, const BMFace *key)
BLI_INLINE PBVHNode *pbvh_bmesh_node_from_vert(PBVH *pbvh, const BMVert *key)
{
return &pbvh->nodes[pbvh_bmesh_node_index_from_vert(pbvh, key)];
int ni = pbvh_bmesh_node_index_from_vert(pbvh, key);
return ni >= 0 ? pbvh->nodes + ni : NULL;
// return &pbvh->nodes[pbvh_bmesh_node_index_from_vert(pbvh, key)];
}
BLI_INLINE PBVHNode *pbvh_bmesh_node_from_face(PBVH *pbvh, const BMFace *key)
{
return &pbvh->nodes[pbvh_bmesh_node_index_from_face(pbvh, key)];
int ni = pbvh_bmesh_node_index_from_face(pbvh, key);
return ni >= 0 ? pbvh->nodes + ni : NULL;
// return &pbvh->nodes[pbvh_bmesh_node_index_from_face(pbvh, key)];
}
static BMVert *pbvh_bmesh_vert_create(PBVH *pbvh,
@@ -636,6 +679,10 @@ static void pbvh_bmesh_vert_remove(PBVH *pbvh, BMVert *v)
BM_FACES_OF_VERT_ITER_BEGIN (f, v) {
const int f_node_index = pbvh_bmesh_node_index_from_face(pbvh, f);
if (f_node_index == DYNTOPO_NODE_NONE) {
continue;
}
/* faces often share the same node,
* quick check to avoid redundant #BLI_gset_remove calls */
if (f_node_index_prev != f_node_index) {
@@ -658,6 +705,11 @@ static void pbvh_bmesh_face_remove(PBVH *pbvh, BMFace *f)
{
PBVHNode *f_node = pbvh_bmesh_node_from_face(pbvh, f);
if (!f_node) {
printf("pbvh corruption\n");
fflush(stdout);
return;
}
/* Check if any of this face's vertices need to be removed
* from the node */
BMLoop *l_first = BM_FACE_FIRST_LOOP(f);
@@ -729,6 +781,10 @@ struct EdgeQueue;
typedef struct EdgeQueue {
HeapSimple *heap;
void **elems;
int totelems;
const float *center;
float center_proj[3]; /* for when we use projected coords. */
float radius_squared;
@@ -738,6 +794,7 @@ typedef struct EdgeQueue {
#endif
bool (*edge_queue_tri_in_range)(const struct EdgeQueue *q, BMFace *f);
bool (*edge_queue_vert_in_range)(const struct EdgeQueue *q, BMVert *v);
const float *view_normal;
#ifdef USE_EDGEQUEUE_FRONTFACE
@@ -754,6 +811,17 @@ typedef struct {
int cd_face_node_offset;
} EdgeQueueContext;
static float calc_weighted_edge(EdgeQueueContext *eq_ctx, BMEdge *e)
{
float l = BM_edge_calc_length_squared(e);
float n;
n = (float)(BM_vert_edge_count(e->v1) + BM_vert_edge_count(e->v2)) * 0.5f;
n = MAX2(n - 5.0f, 1.0f);
return l * powf(n, 5.0f);
}
/* only tag'd edges are in the queue */
#ifdef USE_EDGEQUEUE_TAG
# define EDGE_QUEUE_TEST(e) (BM_elem_flag_test((CHECK_TYPE_INLINE(e, BMEdge *), e), BM_ELEM_TAG))
@@ -793,6 +861,12 @@ static void pbvh_bmesh_edge_tag_verify(PBVH *pbvh)
}
#endif
static bool edge_queue_vert_in_sphere(const EdgeQueue *q, BMVert *v)
{
/* Check if triangle intersects the sphere */
return len_squared_v3v3(q->center, v->co) <= q->radius_squared;
}
static bool edge_queue_tri_in_sphere(const EdgeQueue *q, BMFace *f)
{
BMVert *v_tri[3];
@@ -826,6 +900,15 @@ static bool edge_queue_tri_in_circle(const EdgeQueue *q, BMFace *f)
return len_squared_v3v3(q->center_proj, c) <= q->radius_squared;
}
static bool edge_queue_vert_in_circle(const EdgeQueue *q, BMVert *v)
{
float c[3];
project_plane_normalized_v3_v3v3(c, v->co, q->view_normal);
return len_squared_v3v3(q->center_proj, c) <= q->radius_squared;
}
/* Return true if the vertex mask is less than 1.0, false otherwise */
static bool check_mask(EdgeQueueContext *eq_ctx, BMVert *v)
{
@@ -834,6 +917,10 @@ static bool check_mask(EdgeQueueContext *eq_ctx, BMVert *v)
static void edge_queue_insert(EdgeQueueContext *eq_ctx, BMEdge *e, float priority)
{
void **elems = eq_ctx->q->elems;
BLI_array_declare(elems);
BLI_array_len_set(elems, eq_ctx->q->totelems);
/* Don't let topology update affect fully masked vertices. This used to
* have a 50% mask cutoff, with the reasoning that you can't do a 50%
* topology update. But this gives an ugly border in the mesh. The mask
@@ -847,7 +934,14 @@ static void edge_queue_insert(EdgeQueueContext *eq_ctx, BMEdge *e, float priorit
BMVert **pair = BLI_mempool_alloc(eq_ctx->pool);
pair[0] = e->v1;
pair[1] = e->v2;
#ifdef DYNTOPO_USE_HEAP
BLI_heapsimple_insert(eq_ctx->q->heap, priority, pair);
#endif
BLI_array_append(elems, pair);
eq_ctx->q->elems = elems;
eq_ctx->q->totelems = BLI_array_len(elems);
#ifdef USE_EDGEQUEUE_TAG
BLI_assert(EDGE_QUEUE_TEST(e) == false);
EDGE_QUEUE_ENABLE(e);
@@ -932,7 +1026,7 @@ static void short_edge_queue_edge_add(EdgeQueueContext *eq_ctx, BMEdge *e)
if (EDGE_QUEUE_TEST(e) == false)
#endif
{
const float len_sq = BM_edge_calc_length_squared(e);
const float len_sq = calc_weighted_edge(eq_ctx, e);
if (len_sq < eq_ctx->q->limit_len_squared) {
edge_queue_insert(eq_ctx, e, len_sq);
}
@@ -1007,6 +1101,8 @@ static void long_edge_queue_create(EdgeQueueContext *eq_ctx,
const bool use_projected)
{
eq_ctx->q->heap = BLI_heapsimple_new();
eq_ctx->q->elems = NULL;
eq_ctx->q->totelems = 0;
eq_ctx->q->center = center;
eq_ctx->q->radius_squared = radius * radius;
eq_ctx->q->limit_len_squared = pbvh->bm_max_edge_len * pbvh->bm_max_edge_len;
@@ -1024,10 +1120,12 @@ static void long_edge_queue_create(EdgeQueueContext *eq_ctx,
if (use_projected) {
eq_ctx->q->edge_queue_tri_in_range = edge_queue_tri_in_circle;
eq_ctx->q->edge_queue_vert_in_range = edge_queue_vert_in_circle;
project_plane_normalized_v3_v3v3(eq_ctx->q->center_proj, center, view_normal);
}
else {
eq_ctx->q->edge_queue_tri_in_range = edge_queue_tri_in_sphere;
eq_ctx->q->edge_queue_vert_in_range = edge_queue_vert_in_sphere;
}
#ifdef USE_EDGEQUEUE_TAG_VERIFY
@@ -1070,6 +1168,8 @@ static void short_edge_queue_create(EdgeQueueContext *eq_ctx,
const bool use_projected)
{
eq_ctx->q->heap = BLI_heapsimple_new();
eq_ctx->q->elems = NULL;
eq_ctx->q->totelems = 0;
eq_ctx->q->center = center;
eq_ctx->q->radius_squared = radius * radius;
eq_ctx->q->limit_len_squared = pbvh->bm_min_edge_len * pbvh->bm_min_edge_len;
@@ -1087,10 +1187,12 @@ static void short_edge_queue_create(EdgeQueueContext *eq_ctx,
if (use_projected) {
eq_ctx->q->edge_queue_tri_in_range = edge_queue_tri_in_circle;
eq_ctx->q->edge_queue_vert_in_range = edge_queue_vert_in_circle;
project_plane_normalized_v3_v3v3(eq_ctx->q->center_proj, center, view_normal);
}
else {
eq_ctx->q->edge_queue_tri_in_range = edge_queue_tri_in_sphere;
eq_ctx->q->edge_queue_vert_in_range = edge_queue_vert_in_sphere;
}
for (int n = 0; n < pbvh->totnode; n++) {
@@ -1217,15 +1319,23 @@ static void pbvh_bmesh_split_edge(EdgeQueueContext *eq_ctx,
if (!BLI_gset_haskey(pbvh->nodes[ni].bm_unique_verts, v_new)) {
BLI_gset_add(pbvh->nodes[ni].bm_other_verts, v_new);
}
//*
if (BM_vert_edge_count_is_over(v_opp, 8)) {
BMIter bm_iter;
BMEdge *e2;
BM_ITER_ELEM (e2, &bm_iter, v_opp, BM_EDGES_OF_VERT) {
long_edge_queue_edge_add(eq_ctx, e2);
BMVert *v2 = BM_edge_other_vert(e2, v_opp);
bool add = eq_ctx->q->edge_queue_vert_in_range(eq_ctx->q, v2);
add = add && BM_edge_calc_length_squared(e2) > eq_ctx->q->limit_len_squared;
if (add) {
long_edge_queue_edge_add(eq_ctx, e2);
}
}
}
//*/
}
BM_edge_kill(pbvh->bm, e);
@@ -1236,9 +1346,28 @@ static bool pbvh_bmesh_subdivide_long_edges(EdgeQueueContext *eq_ctx,
BLI_Buffer *edge_loops)
{
bool any_subdivided = false;
double time = PIL_check_seconds_timer();
RNG *rng = BLI_rng_new((int)(time * 1000.0f));
while (!BLI_heapsimple_is_empty(eq_ctx->q->heap)) {
if (PIL_check_seconds_timer() - time > DYNTOPO_TIME_LIMIT) {
break;
}
#ifndef DYNTOPO_USE_HEAP
if (eq_ctx->q->totelems == 0) {
break;
}
int ri = BLI_rng_get_int(rng) % eq_ctx->q->totelems;
BMVert **pair = eq_ctx->q->elems[ri];
eq_ctx->q->elems[ri] = eq_ctx->q->elems[eq_ctx->q->totelems - 1];
eq_ctx->q->totelems--;
#else
BMVert **pair = BLI_heapsimple_pop_min(eq_ctx->q->heap);
#endif
BMVert *v1 = pair[0], *v2 = pair[1];
BMEdge *e;
@@ -1277,10 +1406,25 @@ static bool pbvh_bmesh_subdivide_long_edges(EdgeQueueContext *eq_ctx,
pbvh_bmesh_split_edge(eq_ctx, pbvh, e, edge_loops);
}
#if !defined(DYNTOPO_USE_HEAP) && defined(USE_EDGEQUEUE_TAG)
for (int i = 0; i < eq_ctx->q->totelems; i++) {
BMVert **pair = eq_ctx->q->elems[i];
BMVert *v1 = pair[0], *v2 = pair[1];
BMEdge *e = BM_edge_exists(v1, v2);
if (e) {
EDGE_QUEUE_DISABLE(e);
}
}
#endif
#ifdef USE_EDGEQUEUE_TAG_VERIFY
pbvh_bmesh_edge_tag_verify(pbvh);
#endif
BLI_rng_free(rng);
return any_subdivided;
}
@@ -1451,6 +1595,17 @@ static void pbvh_bmesh_collapse_edge(PBVH *pbvh,
BM_vert_kill(pbvh->bm, v_del);
}
void BKE_pbvh_bmesh_update_origvert(PBVH *pbvh, BMVert *v)
{
BM_log_vert_before_modified(pbvh->bm_log, v, pbvh->cd_vert_mask_offset);
float *co = BM_ELEM_CD_GET_VOID_P(v, pbvh->cd_origco_offset);
float *no = BM_ELEM_CD_GET_VOID_P(v, pbvh->cd_origno_offset);
copy_v3_v3(co, v->co);
copy_v3_v3(no, v->no);
}
static bool pbvh_bmesh_collapse_short_edges(EdgeQueueContext *eq_ctx,
PBVH *pbvh,
BLI_Buffer *deleted_faces)
@@ -1460,8 +1615,26 @@ static bool pbvh_bmesh_collapse_short_edges(EdgeQueueContext *eq_ctx,
/* deleted verts point to vertices they were merged into, or NULL when removed. */
GHash *deleted_verts = BLI_ghash_ptr_new("deleted_verts");
double time = PIL_check_seconds_timer();
RNG *rng = BLI_rng_new(time * 1000.0);
while (!BLI_heapsimple_is_empty(eq_ctx->q->heap)) {
if (PIL_check_seconds_timer() - time > DYNTOPO_TIME_LIMIT) {
break;
}
#ifndef DYNTOPO_USE_HEAP
if (eq_ctx->q->totelems == 0) {
break;
}
int ri = BLI_rng_get_int(rng) % eq_ctx->q->totelems;
BMVert **pair = eq_ctx->q->elems[ri];
eq_ctx->q->elems[ri] = eq_ctx->q->elems[eq_ctx->q->totelems - 1];
eq_ctx->q->totelems--;
#else
BMVert **pair = BLI_heapsimple_pop_min(eq_ctx->q->heap);
#endif
BMVert *v1 = pair[0], *v2 = pair[1];
BLI_mempool_free(eq_ctx->pool, pair);
pair = NULL;
@@ -1499,6 +1672,24 @@ static bool pbvh_bmesh_collapse_short_edges(EdgeQueueContext *eq_ctx,
pbvh_bmesh_collapse_edge(pbvh, e, v1, v2, deleted_verts, deleted_faces, eq_ctx);
}
#if !defined(DYNTOPO_USE_HEAP) && defined(USE_EDGEQUEUE_TAG)
for (int i = 0; i < eq_ctx->q->totelems; i++) {
BMVert **pair = eq_ctx->q->elems[i];
BMVert *v1 = pair[0], *v2 = pair[1];
/* Check the verts still exist */
if (!(v1 = bm_vert_hash_lookup_chain(deleted_verts, v1)) ||
!(v2 = bm_vert_hash_lookup_chain(deleted_verts, v2)) || (v1 == v2)) {
continue;
}
BMEdge *e = BM_edge_exists(v1, v2);
if (e) {
EDGE_QUEUE_DISABLE(e);
}
}
#endif
BLI_rng_free(rng);
BLI_ghash_free(deleted_verts, NULL, NULL);
return any_collapsed;
@@ -1512,7 +1703,7 @@ bool pbvh_bmesh_node_raycast(PBVHNode *node,
struct IsectRayPrecalc *isect_precalc,
float *depth,
bool use_original,
int *r_active_vertex_index,
SculptIdx *r_active_vertex_index,
float *r_face_normal)
{
bool hit = false;
@@ -1556,7 +1747,7 @@ bool pbvh_bmesh_node_raycast(PBVHNode *node,
if (len_squared_v3v3(location, v_tri[j]->co) <
len_squared_v3v3(location, nearest_vertex_co)) {
copy_v3_v3(nearest_vertex_co, v_tri[j]->co);
*r_active_vertex_index = BM_elem_index_get(v_tri[j]);
*r_active_vertex_index = (SculptIdx)v_tri[j]; // BM_elem_index_get(v_tri[j]);
}
}
}
@@ -1880,10 +2071,16 @@ void BKE_pbvh_build_bmesh(PBVH *pbvh,
bool smooth_shading,
BMLog *log,
const int cd_vert_node_offset,
const int cd_face_node_offset)
const int cd_face_node_offset,
const int cd_origco_offset,
const int cd_origno_offset)
{
pbvh->cd_vert_node_offset = cd_vert_node_offset;
pbvh->cd_face_node_offset = cd_face_node_offset;
pbvh->cd_origco_offset = cd_origco_offset;
pbvh->cd_origno_offset = cd_origno_offset;
pbvh->cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
pbvh->bm = bm;
BKE_pbvh_bmesh_detail_size_set(pbvh, 0.75);
@@ -1892,7 +2089,7 @@ void BKE_pbvh_build_bmesh(PBVH *pbvh,
pbvh->bm_log = log;
/* TODO: choose leaf limit better */
pbvh->leaf_limit = 100;
pbvh->leaf_limit = 3000;
if (smooth_shading) {
pbvh->flags |= PBVH_DYNTOPO_SMOOTH_SHADING;
@@ -1952,6 +2149,10 @@ void BKE_pbvh_build_bmesh(PBVH *pbvh,
MEM_freeN(nodeinfo);
}
static double last_update_time[128] = {
0,
};
/* Collapse short edges, subdivide long edges */
bool BKE_pbvh_bmesh_update_topology(PBVH *pbvh,
PBVHTopologyUpdateMode mode,
@@ -1959,8 +2160,18 @@ bool BKE_pbvh_bmesh_update_topology(PBVH *pbvh,
const float view_normal[3],
float radius,
const bool use_frontface,
const bool use_projected)
const bool use_projected,
int sym_axis)
{
if (sym_axis >= 0 &&
PIL_check_seconds_timer() - last_update_time[sym_axis] < DYNTOPO_RUN_INTERVAL) {
return false;
}
if (sym_axis >= 0) {
last_update_time[sym_axis] = PIL_check_seconds_timer();
}
/* 2 is enough for edge faces - manifold edge */
BLI_buffer_declare_static(BMLoop *, edge_loops, BLI_BUFFER_NOP, 2);
BLI_buffer_declare_static(BMFace *, deleted_faces, BLI_BUFFER_NOP, 32);
@@ -1990,6 +2201,9 @@ bool BKE_pbvh_bmesh_update_topology(PBVH *pbvh,
&eq_ctx, pbvh, center, view_normal, radius, use_frontface, use_projected);
modified |= pbvh_bmesh_collapse_short_edges(&eq_ctx, pbvh, &deleted_faces);
BLI_heapsimple_free(q.heap, NULL);
if (q.elems) {
MEM_freeN(q.elems);
}
BLI_mempool_destroy(queue_pool);
}
@@ -2008,18 +2222,37 @@ bool BKE_pbvh_bmesh_update_topology(PBVH *pbvh,
long_edge_queue_create(
&eq_ctx, pbvh, center, view_normal, radius, use_frontface, use_projected);
modified |= pbvh_bmesh_subdivide_long_edges(&eq_ctx, pbvh, &edge_loops);
if (q.elems) {
MEM_freeN(q.elems);
}
BLI_heapsimple_free(q.heap, NULL);
BLI_mempool_destroy(queue_pool);
}
/* Unmark nodes */
for (int n = 0; n < pbvh->totnode; n++) {
PBVHNode *node = &pbvh->nodes[n];
if (modified) {
for (int i = 0; i < pbvh->totnode; i++) {
PBVHNode *node = pbvh->nodes + i;
if (node->flag & PBVH_Leaf && node->flag & PBVH_UpdateTopology) {
node->flag &= ~PBVH_UpdateTopology;
if ((node->flag & PBVH_Leaf) && (node->flag & PBVH_UpdateTopology) &&
!(node->flag & PBVH_FullyHidden)) {
node->flag &= ~PBVH_UpdateTopology;
/* Recursively split nodes that have gotten too many
* elements */
pbvh_bmesh_node_limit_ensure(pbvh, i);
}
}
}
else { // still unmark nodes
for (int i = 0; i < pbvh->totnode; i++) {
PBVHNode *node = pbvh->nodes + i;
if ((node->flag & PBVH_Leaf) && (node->flag & PBVH_UpdateTopology)) {
node->flag &= ~PBVH_UpdateTopology;
}
}
}
BLI_buffer_free(&edge_loops);
BLI_buffer_free(&deleted_faces);
@@ -2091,10 +2324,362 @@ void BKE_pbvh_bmesh_node_save_orig(BMesh *bm, PBVHNode *node)
node->bm_tot_ortri = i;
}
static int pbvh_count_subtree_verts(PBVH *pbvh, PBVHNode *n)
{
if (n->flag & PBVH_Leaf) {
n->tm_subtree_tottri = BLI_gset_len(
n->bm_faces); // n->tm_unique_verts->length + n->tm_other_verts->length;
return n->tm_subtree_tottri;
}
int ni = n->children_offset;
int ret = pbvh_count_subtree_verts(pbvh, pbvh->nodes + ni);
ret += pbvh_count_subtree_verts(pbvh, pbvh->nodes + ni + 1);
n->tm_subtree_tottri = ret;
return ret;
}
static void pbvh_bmesh_join_subnodes(PBVH *pbvh, PBVHNode *node, PBVHNode *parent)
{
if (!(node->flag & PBVH_Leaf)) {
int ni = node->children_offset;
if (ni > 0 && ni < pbvh->totnode - 1) {
pbvh_bmesh_join_subnodes(pbvh, pbvh->nodes + ni, parent);
pbvh_bmesh_join_subnodes(pbvh, pbvh->nodes + ni + 1, parent);
}
else {
printf("node corruption: %d\n", ni);
return;
}
if (node != parent) {
node->flag |= PBVH_Delete; // mark for deletion
}
return;
}
if (node != parent) {
node->flag |= PBVH_Delete; // mark for deletion
}
BMVert *v;
GSetIterator gsiter;
GSET_ITER (gsiter, node->bm_unique_verts) {
BMVert *v = BLI_gsetIterator_getKey(&gsiter);
BLI_gset_add(parent->bm_unique_verts, v);
BM_ELEM_CD_SET_INT(v, pbvh->cd_vert_node_offset, DYNTOPO_NODE_NONE);
}
// printf(" subtotface: %d\n", BLI_gset_len(node->bm_faces));
GSET_ITER (gsiter, node->bm_faces) {
BMFace *f = BLI_gsetIterator_getKey(&gsiter);
BLI_gset_add(parent->bm_faces, f);
BM_ELEM_CD_SET_INT(f, pbvh->cd_face_node_offset, DYNTOPO_NODE_NONE);
}
}
static void BKE_pbvh_bmesh_corect_tree(PBVH *pbvh, PBVHNode *node, PBVHNode *parent)
{
const int size_lower = pbvh->leaf_limit - (pbvh->leaf_limit >> 1);
const int size_higher = pbvh->leaf_limit + (pbvh->leaf_limit >> 1);
if (node->flag & PBVH_Leaf) {
// pbvh_trimesh_node_limit_ensure(pbvh, (int)(node - pbvh->nodes));
return;
// join nodes if subtree lacks verts, unless node is root
}
if (node->tm_subtree_tottri < size_lower && node != pbvh->nodes) {
node->bm_unique_verts = BLI_gset_ptr_new("bm_unique_verts");
node->bm_other_verts = BLI_gset_ptr_new("bm_other_verts");
node->bm_faces = BLI_gset_ptr_new("bm_faces");
pbvh_bmesh_join_subnodes(pbvh, pbvh->nodes + node->children_offset, node);
pbvh_bmesh_join_subnodes(pbvh, pbvh->nodes + node->children_offset + 1, node);
node->children_offset = 0;
node->flag |= PBVH_Leaf | PBVH_UpdateRedraw | PBVH_UpdateBB | PBVH_UpdateDrawBuffers |
PBVH_RebuildDrawBuffers | PBVH_UpdateOriginalBB | PBVH_UpdateMask |
PBVH_UpdateVisibility | PBVH_UpdateColor | PBVH_UpdateTopology |
PBVH_UpdateNormals;
GSet *other = BLI_gset_ptr_new(__func__);
GSetIterator gsiter;
BMVert *v;
node->children_offset = 0;
node->draw_buffers = NULL;
// make sure other list isn't overlapping with unique
GSET_ITER (gsiter, node->bm_other_verts) {
BMVert *v = BLI_gsetIterator_getKey(&gsiter);
if (!BLI_gset_haskey(node->bm_unique_verts, v)) {
// BLI_gset_add(other, v);
}
}
GSET_ITER (gsiter, node->bm_faces) {
BMFace *f = BLI_gsetIterator_getKey(&gsiter);
BMLoop *l = f->l_first;
BM_ELEM_CD_SET_INT(f, pbvh->cd_face_node_offset, DYNTOPO_NODE_NONE);
do {
if (!BLI_gset_haskey(node->bm_unique_verts, l->v)) {
BLI_gset_add(other, l->v);
}
l = l->next;
} while (l != f->l_first);
}
BLI_gset_free(node->bm_other_verts, NULL);
node->bm_other_verts = other;
BB_reset(&node->vb);
//*
GSET_ITER (gsiter, node->bm_unique_verts) {
BMVert *v = BLI_gsetIterator_getKey(&gsiter);
BB_expand(&node->vb, v->co);
}
GSET_ITER (gsiter, node->bm_other_verts) {
BMVert *v = BLI_gsetIterator_getKey(&gsiter);
BB_expand(&node->vb, v->co);
} //*/
// printf("totface: %d\n", BLI_gset_len(node->bm_faces));
node->orig_vb = node->vb;
return;
}
int ni = node->children_offset;
for (int i = 0; i < 2; i++, ni++) {
PBVHNode *child = pbvh->nodes + ni;
BKE_pbvh_bmesh_corect_tree(pbvh, child, node);
}
}
static void pbvh_bmesh_join_nodes(PBVH *bvh)
{
pbvh_count_subtree_verts(bvh, bvh->nodes);
BKE_pbvh_bmesh_corect_tree(bvh, bvh->nodes, NULL);
// compact nodes
int totnode = 0;
for (int i = 0; i < bvh->totnode; i++) {
PBVHNode *n = bvh->nodes + i;
if (!(n->flag & PBVH_Delete)) {
if (!(n->flag & PBVH_Leaf)) {
PBVHNode *n1 = bvh->nodes + n->children_offset;
PBVHNode *n2 = bvh->nodes + n->children_offset + 1;
if ((n1->flag & PBVH_Delete) != (n2->flag & PBVH_Delete)) {
printf("un-deleting an empty node\n");
PBVHNode *n3 = n1->flag & PBVH_Delete ? n1 : n2;
n3->flag = PBVH_Leaf;
n3->bm_unique_verts = BLI_gset_ptr_new("bm_unique_verts");
n3->bm_other_verts = BLI_gset_ptr_new("bm_other_verts");
n3->bm_faces = BLI_gset_ptr_new("bm_faces");
}
else if ((n1->flag & PBVH_Delete) && (n2->flag & PBVH_Delete)) {
n->children_offset = 0;
n->flag |= PBVH_Leaf;
if (!n->bm_unique_verts) {
// should not happen
n->bm_unique_verts = BLI_gset_ptr_new("bm_unique_verts");
n->bm_other_verts = BLI_gset_ptr_new("bm_other_verts");
n->bm_faces = BLI_gset_ptr_new("bm_faces");
}
}
}
totnode++;
}
}
int *map = MEM_callocN(sizeof(int) * bvh->totnode, "bmesh map temp");
// build idx map for child offsets
int j = 0;
for (int i = 0; i < bvh->totnode; i++) {
PBVHNode *n = bvh->nodes + i;
if (!(n->flag & PBVH_Delete)) {
map[i] = j++;
}
else if (1) {
if (n->layer_disp) {
MEM_freeN(n->layer_disp);
n->layer_disp = NULL;
}
if (n->draw_buffers) {
GPU_pbvh_buffers_free(n->draw_buffers);
n->draw_buffers = NULL;
}
if (n->vert_indices) {
MEM_freeN((void *)n->vert_indices);
n->vert_indices = NULL;
}
if (n->face_vert_indices) {
MEM_freeN((void *)n->face_vert_indices);
n->face_vert_indices = NULL;
}
if (n->bm_orco) {
MEM_freeN(n->bm_orco);
n->bm_orco = NULL;
}
if (n->bm_ortri) {
MEM_freeN(n->bm_ortri);
n->bm_ortri = NULL;
}
if (n->bm_unique_verts) {
BLI_gset_free(n->bm_unique_verts, NULL);
n->bm_unique_verts = NULL;
}
if (n->bm_other_verts) {
BLI_gset_free(n->bm_other_verts, NULL);
n->bm_other_verts = NULL;
}
if (n->bm_faces) {
BLI_gset_free(n->bm_faces, NULL);
n->bm_faces = NULL;
}
}
}
// compact node array
j = 0;
for (int i = 0; i < bvh->totnode; i++) {
if (!(bvh->nodes[i].flag & PBVH_Delete)) {
int i1 = map[bvh->nodes[i].children_offset];
int i2 = map[bvh->nodes[i].children_offset + 1];
if (bvh->nodes[i].children_offset >= bvh->totnode) {
printf("bad child node reference %d->%d, totnode: %d\n",
i,
bvh->nodes[i].children_offset,
bvh->totnode);
continue;
}
if (bvh->nodes[i].children_offset && i2 != i1 + 1) {
printf(" pbvh corruption during node join %d %d\n", i1, i2);
}
bvh->nodes[j] = bvh->nodes[i];
bvh->nodes[j].children_offset = i1;
j++;
}
}
if (j != totnode) {
printf("eek!");
}
if (bvh->totnode != j) {
memset(bvh->nodes + j, 0, sizeof(*bvh->nodes) * (bvh->totnode - j));
bvh->node_mem_count = j;
}
bvh->totnode = j;
GSetIterator gsiter;
BMVert *v;
// set vert/face node indices again
for (int i = 0; i < bvh->totnode; i++) {
PBVHNode *n = bvh->nodes + i;
if (!(n->flag & PBVH_Leaf)) {
continue;
}
if (!n->bm_unique_verts) {
printf("ERROR!\n");
n->bm_unique_verts = BLI_gset_ptr_new("bleh");
n->bm_other_verts = BLI_gset_ptr_new("bleh");
n->bm_faces = BLI_gset_ptr_new("bleh");
}
GSET_ITER (gsiter, n->bm_unique_verts) {
BMVert *v = BLI_gsetIterator_getKey(&gsiter);
BM_ELEM_CD_SET_INT(v, bvh->cd_vert_node_offset, i);
}
GSET_ITER (gsiter, n->bm_faces) {
BMFace *f = BLI_gsetIterator_getKey(&gsiter);
BM_ELEM_CD_SET_INT(f, bvh->cd_face_node_offset, i);
}
}
BMVert **scratch = NULL;
BLI_array_declare(scratch);
for (int i = 0; i < bvh->totnode; i++) {
PBVHNode *n = bvh->nodes + i;
if (!(n->flag & PBVH_Leaf)) {
continue;
}
BLI_array_clear(scratch);
GSET_ITER (gsiter, n->bm_other_verts) {
BMVert *v = BLI_gsetIterator_getKey(&gsiter);
int ni = BM_ELEM_CD_GET_INT(v, bvh->cd_vert_node_offset);
if (ni == DYNTOPO_NODE_NONE) {
BLI_array_append(scratch, v);
}
// BM_ELEM_CD_SET_INT(v, bvh->cd_vert_node_offset, i);
}
int slen = BLI_array_len(scratch);
for (int j = 0; j < slen; j++) {
BMVert *v = scratch[j];
BLI_gset_remove(n->bm_other_verts, v, NULL);
BLI_gset_add(n->bm_unique_verts, v);
BM_ELEM_CD_SET_INT(v, bvh->cd_vert_node_offset, i);
}
}
BLI_array_free(scratch);
MEM_freeN(map);
}
void BKE_pbvh_bmesh_after_stroke(PBVH *pbvh)
{
check_heap();
pbvh_bmesh_join_nodes(pbvh);
check_heap();
for (int i = 0; i < pbvh->totnode; i++) {
PBVHNode *n = &pbvh->nodes[i];
PBVHNode *n = pbvh->nodes + i;
if (n->flag & PBVH_Leaf) {
/* Free orco/ortri data */
pbvh_bmesh_node_drop_orig(n);
@@ -2104,6 +2689,8 @@ void BKE_pbvh_bmesh_after_stroke(PBVH *pbvh)
pbvh_bmesh_node_limit_ensure(pbvh, i);
}
}
BKE_pbvh_update_bounds(pbvh, (PBVH_UpdateBB | PBVH_UpdateOriginalBB | PBVH_UpdateRedraw));
}
void BKE_pbvh_bmesh_detail_size_set(PBVH *pbvh, float detail_size)

View File

@@ -1,3 +1,6 @@
#ifndef _PBVH_INTERN_H
#define _PBVH_INTERN_H
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -30,6 +33,7 @@ typedef struct {
float bmin[3], bmax[3], bcentroid[3];
} BBC;
struct GHash;
/* Note: this structure is getting large, might want to split it into
* union'd structs */
struct PBVHNode {
@@ -86,7 +90,7 @@ struct PBVHNode {
/* Indicates whether this node is a leaf or not; also used for
* marking various updates that need to be applied. */
PBVHNodeFlags flag : 16;
PBVHNodeFlags flag : 32;
/* Used for raycasting: how close bb is to the ray point. */
float tmin;
@@ -105,8 +109,24 @@ struct PBVHNode {
int (*bm_ortri)[3];
int bm_tot_ortri;
/* trimesh */
GSet *tm_faces;
TableGSet *tm_unique_verts;
TableGSet *tm_other_verts;
float (*tm_orco)[3];
int (*tm_ortri)[3];
int tm_tot_ortri;
int tm_tot_orco;
int tm_subtree_tottri;
/* Used to store the brush color during a stroke and composite it over the original color */
PBVHColorBufferNode color_buffer;
#ifdef PROXY_ADVANCED
ProxyVertArray proxyverts;
#endif
};
typedef enum {
@@ -168,13 +188,25 @@ struct PBVH {
BMesh *bm;
float bm_max_edge_len;
float bm_min_edge_len;
int cd_vert_node_offset;
int cd_face_node_offset;
int cd_origco_offset;
int cd_origno_offset;
int cd_vert_mask_offset;
float planes[6][4];
int num_planes;
struct BMLog *bm_log;
/* trimesh data */
struct TM_TriMesh *tm;
float tm_max_edge_len;
float tm_min_edge_len;
struct TriMeshLog *tm_log;
struct SubdivCCG *subdiv_ccg;
};
@@ -224,7 +256,7 @@ bool pbvh_bmesh_node_raycast(PBVHNode *node,
struct IsectRayPrecalc *isect_precalc,
float *dist,
bool use_original,
int *r_active_vertex_index,
SculptIdx *r_active_vertex_index,
float *r_face_normal);
bool pbvh_bmesh_node_nearest_to_ray(PBVHNode *node,
const float ray_start[3],
@@ -234,3 +266,23 @@ bool pbvh_bmesh_node_nearest_to_ray(PBVHNode *node,
bool use_original);
void pbvh_bmesh_normals_update(PBVHNode **nodes, int totnode);
/* pbvh_bmesh.c */
bool pbvh_trimesh_node_raycast(PBVHNode *node,
const float ray_start[3],
const float ray_normal[3],
struct IsectRayPrecalc *isect_precalc,
float *dist,
bool use_original,
SculptIdx *r_active_vertex_index,
float *r_face_normal);
bool pbvh_trimesh_node_nearest_to_ray(PBVHNode *node,
const float ray_start[3],
const float ray_normal[3],
float *depth,
float *dist_sq,
bool use_original);
void pbvh_trimesh_normals_update(PBVHNode **nodes, int totnode);
#endif /* _PBVH_INTERN_H */

View File

@@ -53,6 +53,42 @@ typedef void *(*GHashValCopyFP)(const void *val);
typedef struct GHash GHash;
#define GHASH_USE_MODULO_BUCKETS
/* WARNING! Keep in sync with ugly _gh_Entry in header!!! */
typedef struct Entry {
struct Entry *next;
void *key;
} Entry;
typedef struct GHashEntry {
Entry e;
void *val;
} GHashEntry;
typedef Entry GSetEntry;
#define GHASH_ENTRY_SIZE(_is_gset) ((_is_gset) ? sizeof(GSetEntry) : sizeof(GHashEntry))
struct GHash {
GHashHashFP hashfp;
GHashCmpFP cmpfp;
Entry **buckets;
struct BLI_mempool *entrypool;
uint nbuckets;
uint limit_grow, limit_shrink;
#ifdef GHASH_USE_MODULO_BUCKETS
uint cursize, size_min;
#else
uint bucket_mask, bucket_bit, bucket_bit_min;
#endif
uint nentries;
uint flag;
};
typedef struct GHashIterator {
GHash *gh;
struct Entry *curEntry;
@@ -131,7 +167,26 @@ GHashIterator *BLI_ghashIterator_new(GHash *gh) ATTR_MALLOC ATTR_WARN_UNUSED_RES
void BLI_ghashIterator_init(GHashIterator *ghi, GHash *gh);
void BLI_ghashIterator_free(GHashIterator *ghi);
void BLI_ghashIterator_step(GHashIterator *ghi);
//void BLI_ghashIterator_step(GHashIterator *ghi);
/**
* Steps the iterator to the next index.
*
* \param ghi: The iterator.
*/
BLI_INLINE void BLI_ghashIterator_step(GHashIterator *ghi)
{
if (ghi->curEntry) {
ghi->curEntry = ghi->curEntry->next;
while (!ghi->curEntry) {
ghi->curBucket++;
if (ghi->curBucket == ghi->gh->nbuckets) {
break;
}
ghi->curEntry = ghi->gh->buckets[ghi->curBucket];
}
}
}
BLI_INLINE void *BLI_ghashIterator_getKey(GHashIterator *ghi) ATTR_WARN_UNUSED_RESULT;
BLI_INLINE void *BLI_ghashIterator_getValue(GHashIterator *ghi) ATTR_WARN_UNUSED_RESULT;

View File

@@ -23,7 +23,14 @@
* \ingroup bli
*/
#define USE_SMALLHASH_REMOVE
#include "BLI_utildefines.h"
#include "BLI_compiler_attrs.h"
#include "BLI_compiler_compat.h"
#include "BLI_compiler_typecheck.h"
#include "BLI_assert.h"
#ifdef __cplusplus
extern "C" {
@@ -48,29 +55,287 @@ typedef struct SmallHash {
typedef struct {
const SmallHash *sh;
unsigned int i;
uintptr_t key;
void *val;
unsigned int i, done;
} SmallHashIter;
#define SMHASH_KEY_UNUSED ((uintptr_t)(UINTPTR_MAX - 0))
#define SMHASH_CELL_FREE ((void *)(UINTPTR_MAX - 1))
#define SMHASH_CELL_UNUSED ((void *)(UINTPTR_MAX - 2))
/* typically this re-assigns 'h' */
#define SMHASH_NEXT(h, hoff) \
(CHECK_TYPE_INLINE(&(h), uintptr_t *), \
CHECK_TYPE_INLINE(&(hoff), uintptr_t *), \
((h) + (((hoff) = ((hoff)*2) + 1), (hoff))))
BLI_INLINE int smallhash_val_is_used(const void *val)
{
#ifdef USE_SMALLHASH_REMOVE
return !ELEM(val, SMHASH_CELL_FREE, SMHASH_CELL_UNUSED);
#else
return (val != SMHASH_CELL_FREE);
#endif
}
BLI_INLINE uintptr_t smallhash_key(const uintptr_t key)
{
return key;
}
BLI_INLINE SmallHashEntry *smallhash_lookup(const SmallHash *sh, const uintptr_t key)
{
SmallHashEntry *e;
uintptr_t h = smallhash_key(key);
uintptr_t hoff = 1;
BLI_assert(key != SMHASH_KEY_UNUSED);
/* note: there are always more buckets than entries,
* so we know there will always be a free bucket if the key isn't found. */
for (e = &sh->buckets[h % sh->nbuckets]; e->val != SMHASH_CELL_FREE;)
{
if (e->key == key) {
/* should never happen because unused keys are zero'd */
BLI_assert(e->val != SMHASH_CELL_UNUSED);
return e;
}
h = SMHASH_NEXT(h, hoff);
e = &sh->buckets[h % sh->nbuckets];
}
return NULL;
}
BLI_INLINE void *BLI_smallhash_lookup(const SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
return e ? e->val : NULL;
}
BLI_INLINE void **BLI_smallhash_lookup_p(const SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
return e ? &e->val : NULL;
}
BLI_INLINE bool BLI_smallhash_haskey(const SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
return (e != NULL);
}
BLI_INLINE int BLI_smallhash_len(const SmallHash *sh)
{
return (int)sh->nentries;
}
BLI_INLINE SmallHashEntry *smallhash_iternext(SmallHashIter *iter, uintptr_t *key)
{
while (iter->i < iter->sh->nbuckets) {
if (smallhash_val_is_used(iter->sh->buckets[iter->i].val)) {
SmallHashEntry *e = iter->sh->buckets + iter->i;
iter->key = e->key;
iter->val = e->val;
if (key) {
*key = iter->key;
}
iter->i++;
return e;
}
iter->i++;
}
iter->done = 1;
return NULL;
}
BLI_INLINE void *BLI_smallhash_iternext(SmallHashIter *iter, uintptr_t *key)
{
SmallHashEntry *e = smallhash_iternext(iter, key);
return e ? e->val : NULL;
}
BLI_INLINE void **BLI_smallhash_iternext_p(SmallHashIter *iter, uintptr_t *key)
{
SmallHashEntry *e = smallhash_iternext(iter, key);
return e ? &e->val : NULL;
}
BLI_INLINE void *BLI_smallhash_iternew(const SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
{
iter->sh = sh;
iter->i = 0;
iter->key = 0;
iter->val = NULL;
iter->done = 0;
return BLI_smallhash_iternext(iter, key);
}
void BLI_smallhash_clear(SmallHash *sh);
BLI_INLINE void smallhash_resize_buckets(SmallHash *sh, const uint nbuckets);
extern const uint BLI_ghash_hash_sizes[];
/**
* Check if the number of items in the smallhash is large enough to require more buckets.
*/
BLI_INLINE bool smallhash_test_expand_buckets(const uint nentries, const uint nbuckets)
{
/* (approx * 1.25) */
return (nentries + (nentries >> 1) - (nentries >> 2)) > nbuckets;
}
# ifdef __SSE2__
//BLI_INLINE SmallHashEntry *smallhash_lookup_simd(SmallHash *sh, const uintptr_t key) {
//__m128
//}
#endif
BLI_INLINE SmallHashEntry *smallhash_lookup_first_free(SmallHash *sh, const uintptr_t key)
{
SmallHashEntry *e;
uintptr_t h = smallhash_key(key);
uintptr_t hoff = 1;
for (e = &sh->buckets[h % sh->nbuckets]; smallhash_val_is_used(e->val);
h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
/* pass */
}
return e;
}
BLI_INLINE void smallhash_init_empty(SmallHash *sh)
{
uint i;
for (i = 0; i < sh->nbuckets; i++) {
sh->buckets[i].key = SMHASH_KEY_UNUSED;
sh->buckets[i].val = SMHASH_CELL_FREE;
}
}
BLI_INLINE void smallhash_resize_buckets(SmallHash *sh, const uint nbuckets)
{
SmallHashEntry *buckets_old = sh->buckets;
const uint nbuckets_old = sh->nbuckets;
const bool was_alloc = (buckets_old != sh->buckets_stack);
uint i = 0;
BLI_assert(sh->nbuckets != nbuckets);
if (nbuckets <= SMSTACKSIZE) {
const size_t size = sizeof(*buckets_old) * nbuckets_old;
buckets_old = alloca(size);
memcpy(buckets_old, sh->buckets, size);
sh->buckets = sh->buckets_stack;
}
else {
sh->buckets = MEM_mallocN(sizeof(*sh->buckets) * nbuckets, __func__);
}
sh->nbuckets = nbuckets;
smallhash_init_empty(sh);
for (i = 0; i < nbuckets_old; i++) {
if (smallhash_val_is_used(buckets_old[i].val)) {
SmallHashEntry *e = smallhash_lookup_first_free(sh, buckets_old[i].key);
e->key = buckets_old[i].key;
e->val = buckets_old[i].val;
}
}
if (was_alloc) {
MEM_freeN(buckets_old);
}
}
BLI_INLINE bool BLI_smallhash_ensure_p(SmallHash *sh, uintptr_t key, void ***item) {
SmallHashEntry *e;
if (UNLIKELY(smallhash_test_expand_buckets(++sh->nentries, sh->nbuckets))) {
smallhash_resize_buckets(sh, BLI_ghash_hash_sizes[++sh->cursize]);
}
uintptr_t h = smallhash_key(key);
uintptr_t hoff = 1;
e = smallhash_lookup(sh, key);
//*
for (e = &sh->buckets[h % sh->nbuckets]; e->key != key && smallhash_val_is_used((void*)e->val);
h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
}//*/
bool ret = true;
if (e->key != key) {
//e = smallhash_lookup_first_free(sh, key);
ret = false;
e->key = key;
} else {
e->val = NULL;
}
*item = &e->val;
return ret;
}
BLI_INLINE uintptr_t BLI_smallhash_iterkey(SmallHashIter *iter) {
return iter->key;
}
BLI_INLINE void *BLI_smallhash_iterval(SmallHashIter *iter) {
return (void*)iter->val;
}
BLI_INLINE void **BLI_smallhash_iternew_p(const SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
{
iter->sh = sh;
iter->i = 0;
iter->key = 0;
iter->val = NULL;
iter->done = 0;
return BLI_smallhash_iternext_p(iter, key);
}
#define SMALLHASH_ITER(iter, sh)\
for (BLI_smallhash_iternew(sh, &(iter), NULL); !(iter).done; BLI_smallhash_iternext(&(iter), NULL))
void BLI_smallhash_init_ex(SmallHash *sh, const unsigned int nentries_reserve) ATTR_NONNULL(1);
void BLI_smallhash_init(SmallHash *sh) ATTR_NONNULL(1);
void BLI_smallhash_release(SmallHash *sh) ATTR_NONNULL(1);
void BLI_smallhash_insert(SmallHash *sh, uintptr_t key, void *item) ATTR_NONNULL(1);
bool BLI_smallhash_reinsert(SmallHash *sh, uintptr_t key, void *item) ATTR_NONNULL(1);
bool BLI_smallhash_remove(SmallHash *sh, uintptr_t key) ATTR_NONNULL(1);
void *BLI_smallhash_lookup(const SmallHash *sh, uintptr_t key)
ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT;
void **BLI_smallhash_lookup_p(const SmallHash *sh, uintptr_t key)
ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT;
bool BLI_smallhash_haskey(const SmallHash *sh, uintptr_t key) ATTR_NONNULL(1);
int BLI_smallhash_len(const SmallHash *sh) ATTR_NONNULL(1);
void *BLI_smallhash_iternext(SmallHashIter *iter, uintptr_t *key)
ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT;
void **BLI_smallhash_iternext_p(SmallHashIter *iter, uintptr_t *key)
ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT;
void *BLI_smallhash_iternew(const SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT;
void **BLI_smallhash_iternew_p(const SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT;
bool BLI_smallhash_remove(SmallHash *sh, uintptr_t key);
void BLI_smallhash_reserve(SmallHash *sh, uint size);
SmallHash *BLI_smallhash_new();
SmallHash *BLI_smallhash_new_ex(int reserve);
void BLI_smallhash_free(SmallHash *sh);
//void *BLI_smallhash_lookup(const SmallHash *sh, uintptr_t key)
// ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT;
//void **BLI_smallhash_lookup_p(const SmallHash *sh, uintptr_t key)
// ATTR_NONNULL(1) ATTR_WARN_UNUSED_RESULT;
//bool BLI_smallhash_haskey(const SmallHash *sh, uintptr_t key) ATTR_NONNULL(1);
//int BLI_smallhash_len(const SmallHash *sh) ATTR_NONNULL(1);
/* void BLI_smallhash_print(SmallHash *sh); */ /* UNUSED */
#ifdef DEBUG

View File

@@ -300,6 +300,70 @@ template<typename T> class Span {
return counter;
}
#ifdef _clang__
class iterator {
private:
int i_;
public:
explicit iterator(int i_ = 0) : i_(i_)
{
}
typedef T value_type;
typedef std::ptrdiff_t difference_type;
typedef T *pointer_type;
typedef T &reference_type;
iterator &operator++()
{
i_++;
return *this;
}
iterator operator++(int) const {
iterator ret = *this;
++(*this);
return ret;
}
bool operator==(iterator other)
{
return i_ == other.i_;
}
bool operator!=(iterator other)
{
return i_ != other.i_;
}
iterator begin()
{
return iterator(span, 0);
}
iterator end()
{
return iterator(span, span.size_);
}
T &operator*() const
{
return data_[i_];
}
};
friend class iterator;
const iterator first() const
{
return iterator(0);
}
const iterator last() const {
return iterator(size_ - 1);
}
#else
/**
* Return a reference to the first element in the array. This invokes undefined behavior when the
* array is empty.
@@ -319,6 +383,7 @@ template<typename T> class Span {
BLI_assert(size_ > 0);
return data_[size_ - 1];
}
#endif
/**
* Returns the element at the given index. If the index is out of range, return the fallback

View File

@@ -122,9 +122,16 @@ void BLI_spin_end(SpinLock *spin);
#define THREAD_LOCK_READ 1
#define THREAD_LOCK_WRITE 2
#define BLI_RWLOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER
#ifndef WIN32
typedef pthread_rwlock_t ThreadRWMutex;
#define BLI_RWLOCK_INITIALIZER PTHREAD_RWLOCK_INITIALIZER
#else
typedef struct {
void *data[4]; //stupid, to avoid windows.h here in the header
int have_exclusive;
} ThreadRWMutex;
#define BLI_RWLOCK_INITIALIZER {0} //just do what windows does
#endif
void BLI_rw_mutex_init(ThreadRWMutex *mutex);
void BLI_rw_mutex_end(ThreadRWMutex *mutex);

View File

@@ -27,6 +27,7 @@ set(INC
../../../intern/guardedalloc
../../../intern/numaapi/include
../../../extern/wcwidth
../../../extern
)
set(INC_SYS
@@ -42,6 +43,7 @@ set(SRC
intern/BLI_dial_2d.c
intern/BLI_dynstr.c
intern/BLI_filelist.c
intern/hashmap.cc
intern/BLI_ghash.c
intern/BLI_ghash_utils.c
intern/BLI_heap.c
@@ -52,6 +54,7 @@ set(SRC
intern/BLI_linklist_lockfree.c
intern/BLI_memarena.c
intern/BLI_memblock.c
intern/BLI_threadsafe_mempool.c
intern/BLI_memiter.c
intern/BLI_mempool.c
intern/BLI_timer.c
@@ -133,6 +136,7 @@ set(SRC
intern/task_pool.cc
intern/task_range.cc
intern/task_scheduler.cc
intern/hashmap_gen.h
intern/threads.cc
intern/time.c
intern/timecode.c
@@ -295,6 +299,7 @@ set(SRC
BLI_winstuff.h
PIL_time.h
PIL_time_utildefines.h
BLI_hashmap.h
)
set(LIB
@@ -372,6 +377,15 @@ set_source_files_properties(
PROPERTIES HEADER_FILE_ONLY TRUE
)
#hashmap needs /Ob2 which is only used in Release, not RelWithDebInfo
if(MSVC)
set(_findtemp "")
string(REPLACE "/Ob1" "/Ob3" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}")
string(REPLACE "/Ob1" "/Ob3" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO}")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GS- ")
set(CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_C_FLAGS_RELWITHDEBINFO} /GS- ")
endif()
blender_add_lib(bf_blenlib "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")
if(WITH_GTESTS)
@@ -425,6 +439,7 @@ if(WITH_GTESTS)
tests/BLI_exception_safety_test_utils.hh
)
set(TEST_INC
../imbuf
)

View File

@@ -45,7 +45,7 @@
/** \name Structs & Constants
* \{ */
#define GHASH_USE_MODULO_BUCKETS
/**
* Next prime after `2^n` (skipping 2 & 3).
@@ -79,40 +79,6 @@ BLI_STATIC_ASSERT(ARRAY_SIZE(hashsizes) == GHASH_MAX_SIZE, "Invalid 'hashsizes'
#define GHASH_LIMIT_GROW(_nbkt) (((_nbkt)*3) / 4)
#define GHASH_LIMIT_SHRINK(_nbkt) (((_nbkt)*3) / 16)
/* WARNING! Keep in sync with ugly _gh_Entry in header!!! */
typedef struct Entry {
struct Entry *next;
void *key;
} Entry;
typedef struct GHashEntry {
Entry e;
void *val;
} GHashEntry;
typedef Entry GSetEntry;
#define GHASH_ENTRY_SIZE(_is_gset) ((_is_gset) ? sizeof(GSetEntry) : sizeof(GHashEntry))
struct GHash {
GHashHashFP hashfp;
GHashCmpFP cmpfp;
Entry **buckets;
struct BLI_mempool *entrypool;
uint nbuckets;
uint limit_grow, limit_shrink;
#ifdef GHASH_USE_MODULO_BUCKETS
uint cursize, size_min;
#else
uint bucket_mask, bucket_bit, bucket_bit_min;
#endif
uint nentries;
uint flag;
};
/** \} */
@@ -1083,6 +1049,7 @@ void BLI_ghashIterator_init(GHashIterator *ghi, GHash *gh)
*
* \param ghi: The iterator.
*/
#if 0
void BLI_ghashIterator_step(GHashIterator *ghi)
{
if (ghi->curEntry) {
@@ -1096,6 +1063,7 @@ void BLI_ghashIterator_step(GHashIterator *ghi)
}
}
}
#endif
/**
* Free a GHashIterator.

View File

@@ -45,6 +45,22 @@ int orient2d(const mpq2 &a, const mpq2 &b, const mpq2 &c)
return sgn(det);
}
extern "C" int orient2d_exact(const float v1[2], const float v2[2], const float v3[2]) {
mpq2 a(v1[0], v1[1]);
mpq2 b(v2[0], v2[1]);
mpq2 c(v3[0], v3[1]);
return orient2d(a, b, c);
}
extern "C" int dorient2d_exact(const double v1[2], const double v2[2], const double v3[2]) {
mpq2 a(v1[0], v1[1]);
mpq2 b(v2[0], v2[1]);
mpq2 c(v3[0], v3[1]);
return orient2d(a, b, c);
}
/**
Return +1 if d is in the oriented circle through a, b, and c.
* The oriented circle goes CCW through a, b, and c.

View File

@@ -54,139 +54,24 @@
#include "BLI_utildefines.h"
#include "BLI_smallhash.h"
#include "BLI_strict_flags.h"
#define SMHASH_KEY_UNUSED ((uintptr_t)(UINTPTR_MAX - 0))
#define SMHASH_CELL_FREE ((void *)(UINTPTR_MAX - 1))
#define SMHASH_CELL_UNUSED ((void *)(UINTPTR_MAX - 2))
/* typically this re-assigns 'h' */
#define SMHASH_NEXT(h, hoff) \
(CHECK_TYPE_INLINE(&(h), uint *), \
CHECK_TYPE_INLINE(&(hoff), uint *), \
((h) + (((hoff) = ((hoff)*2) + 1), (hoff))))
/* nothing uses BLI_smallhash_remove yet */
// #define USE_REMOVE
BLI_INLINE bool smallhash_val_is_used(const void *val)
{
#ifdef USE_REMOVE
return !ELEM(val, SMHASH_CELL_FREE, SMHASH_CELL_UNUSED);
#else
return (val != SMHASH_CELL_FREE);
#endif
}
extern const uint BLI_ghash_hash_sizes[];
#define hashsizes BLI_ghash_hash_sizes
BLI_INLINE uint smallhash_key(const uintptr_t key)
{
return (uint)key;
}
/**
* Check if the number of items in the smallhash is large enough to require more buckets.
*/
BLI_INLINE bool smallhash_test_expand_buckets(const uint nentries, const uint nbuckets)
{
/* (approx * 1.5) */
return (nentries + (nentries >> 1)) > nbuckets;
}
BLI_INLINE void smallhash_init_empty(SmallHash *sh)
{
uint i;
for (i = 0; i < sh->nbuckets; i++) {
sh->buckets[i].key = SMHASH_KEY_UNUSED;
sh->buckets[i].val = SMHASH_CELL_FREE;
}
}
/**
* Increase initial bucket size to match a reserved amount.
*/
BLI_INLINE void smallhash_buckets_reserve(SmallHash *sh, const uint nentries_reserve)
{
while (smallhash_test_expand_buckets(nentries_reserve, sh->nbuckets)) {
while (smallhash_test_expand_buckets(nentries_reserve, hashsizes[++sh->cursize])) {
sh->nbuckets = hashsizes[++sh->cursize];
}
}
BLI_INLINE SmallHashEntry *smallhash_lookup(const SmallHash *sh, const uintptr_t key)
{
SmallHashEntry *e;
uint h = smallhash_key(key);
uint hoff = 1;
BLI_assert(key != SMHASH_KEY_UNUSED);
/* note: there are always more buckets than entries,
* so we know there will always be a free bucket if the key isn't found. */
for (e = &sh->buckets[h % sh->nbuckets]; e->val != SMHASH_CELL_FREE;
h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
if (e->key == key) {
/* should never happen because unused keys are zero'd */
BLI_assert(e->val != SMHASH_CELL_UNUSED);
return e;
}
}
return NULL;
}
BLI_INLINE SmallHashEntry *smallhash_lookup_first_free(SmallHash *sh, const uintptr_t key)
{
SmallHashEntry *e;
uint h = smallhash_key(key);
uint hoff = 1;
for (e = &sh->buckets[h % sh->nbuckets]; smallhash_val_is_used(e->val);
h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
/* pass */
}
return e;
}
BLI_INLINE void smallhash_resize_buckets(SmallHash *sh, const uint nbuckets)
{
SmallHashEntry *buckets_old = sh->buckets;
const uint nbuckets_old = sh->nbuckets;
const bool was_alloc = (buckets_old != sh->buckets_stack);
uint i = 0;
BLI_assert(sh->nbuckets != nbuckets);
if (nbuckets <= SMSTACKSIZE) {
const size_t size = sizeof(*buckets_old) * nbuckets_old;
buckets_old = alloca(size);
memcpy(buckets_old, sh->buckets, size);
sh->buckets = sh->buckets_stack;
}
else {
sh->buckets = MEM_mallocN(sizeof(*sh->buckets) * nbuckets, __func__);
}
sh->nbuckets = nbuckets;
smallhash_init_empty(sh);
for (i = 0; i < nbuckets_old; i++) {
if (smallhash_val_is_used(buckets_old[i].val)) {
SmallHashEntry *e = smallhash_lookup_first_free(sh, buckets_old[i].key);
e->key = buckets_old[i].key;
e->val = buckets_old[i].val;
}
}
if (was_alloc) {
MEM_freeN(buckets_old);
}
}
void BLI_smallhash_init_ex(SmallHash *sh, const uint nentries_reserve)
{
@@ -222,6 +107,29 @@ void BLI_smallhash_release(SmallHash *sh)
}
}
void BLI_smallhash_clear(SmallHash *sh) {
unsigned int i;
SmallHashEntry *entry = sh->buckets;
for (i=0; i<sh->nbuckets; i++, entry++) {
entry->key = SMHASH_KEY_UNUSED;
entry->val = SMHASH_CELL_FREE;
}
sh->nentries = 0;
}
void BLI_smallhash_reserve(SmallHash *sh, uint size) {
int cursize = sh->cursize;
while (smallhash_test_expand_buckets(size, hashsizes[cursize])) {
cursize++;
}
sh->cursize = cursize;
smallhash_resize_buckets(sh, hashsizes[cursize]);
}
void BLI_smallhash_insert(SmallHash *sh, uintptr_t key, void *item)
{
SmallHashEntry *e;
@@ -239,6 +147,7 @@ void BLI_smallhash_insert(SmallHash *sh, uintptr_t key, void *item)
e->val = item;
}
/**
* Inserts a new value to a key that may already be in ghash.
*
@@ -258,7 +167,7 @@ bool BLI_smallhash_reinsert(SmallHash *sh, uintptr_t key, void *item)
return true;
}
#ifdef USE_REMOVE
#ifdef USE_SMALLHASH_REMOVE
bool BLI_smallhash_remove(SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
@@ -276,78 +185,7 @@ bool BLI_smallhash_remove(SmallHash *sh, uintptr_t key)
}
#endif
void *BLI_smallhash_lookup(const SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
return e ? e->val : NULL;
}
void **BLI_smallhash_lookup_p(const SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
return e ? &e->val : NULL;
}
bool BLI_smallhash_haskey(const SmallHash *sh, uintptr_t key)
{
SmallHashEntry *e = smallhash_lookup(sh, key);
return (e != NULL);
}
int BLI_smallhash_len(const SmallHash *sh)
{
return (int)sh->nentries;
}
BLI_INLINE SmallHashEntry *smallhash_iternext(SmallHashIter *iter, uintptr_t *key)
{
while (iter->i < iter->sh->nbuckets) {
if (smallhash_val_is_used(iter->sh->buckets[iter->i].val)) {
if (key) {
*key = iter->sh->buckets[iter->i].key;
}
return &iter->sh->buckets[iter->i++];
}
iter->i++;
}
return NULL;
}
void *BLI_smallhash_iternext(SmallHashIter *iter, uintptr_t *key)
{
SmallHashEntry *e = smallhash_iternext(iter, key);
return e ? e->val : NULL;
}
void **BLI_smallhash_iternext_p(SmallHashIter *iter, uintptr_t *key)
{
SmallHashEntry *e = smallhash_iternext(iter, key);
return e ? &e->val : NULL;
}
void *BLI_smallhash_iternew(const SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
{
iter->sh = sh;
iter->i = 0;
return BLI_smallhash_iternext(iter, key);
}
void **BLI_smallhash_iternew_p(const SmallHash *sh, SmallHashIter *iter, uintptr_t *key)
{
iter->sh = sh;
iter->i = 0;
return BLI_smallhash_iternext_p(iter, key);
}
/** \name Debugging & Introspection
* \{ */
@@ -407,8 +245,8 @@ double BLI_smallhash_calc_quality(SmallHash *sh)
if (sh->buckets[i].key != SMHASH_KEY_UNUSED) {
uint64_t count = 0;
SmallHashEntry *e, *e_final = &sh->buckets[i];
uint h = smallhash_key(e_final->key);
uint hoff = 1;
uintptr_t h = smallhash_key(e_final->key);
uintptr_t hoff = 1;
for (e = &sh->buckets[h % sh->nbuckets]; e != e_final;
h = SMHASH_NEXT(h, hoff), e = &sh->buckets[h % sh->nbuckets]) {
@@ -423,3 +261,19 @@ double BLI_smallhash_calc_quality(SmallHash *sh)
#endif
/** \} */
SmallHash *BLI_smallhash_new() {
SmallHash *sh = MEM_callocN(sizeof(SmallHash), "SmallHash");
BLI_smallhash_init(sh);
return sh;
}
SmallHash *BLI_smallhash_new_ex(int reserve) {
SmallHash *sh = MEM_callocN(sizeof(SmallHash), "SmallHash");
BLI_smallhash_init_ex(sh, reserve);
return sh;
}
void BLI_smallhash_free(SmallHash *sh) {
BLI_smallhash_release(sh);
MEM_freeN(sh);
}

View File

@@ -90,7 +90,7 @@ class Task {
other.freedata = NULL;
}
#if defined(WITH_TBB) && TBB_INTERFACE_VERSION_MAJOR < 10
#if 1 //defined(WITH_TBB) && TBB_INTERFACE_VERSION_MAJOR < 10
Task(const Task &other)
: pool(other.pool),
run(other.run),

View File

@@ -509,29 +509,54 @@ void BLI_spin_end(SpinLock *spin)
/* Read/Write Mutex Lock */
#define GET_RW_LOCK(mutex) reinterpret_cast<SRWLOCK*>(&mutex->data[0])
void BLI_rw_mutex_init(ThreadRWMutex *mutex)
{
#ifndef WIN32
pthread_rwlock_init(mutex, nullptr);
#else
InitializeSRWLock(GET_RW_LOCK(mutex));
#endif
}
void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode)
{
#ifndef WIN32
if (mode == THREAD_LOCK_READ) {
pthread_rwlock_rdlock(mutex);
}
else {
pthread_rwlock_wrlock(mutex);
}
#else
if (mode == THREAD_LOCK_READ) {
AcquireSRWLockShared(GET_RW_LOCK(mutex));
} else {
AcquireSRWLockExclusive(GET_RW_LOCK(mutex));
mutex->have_exclusive = 1;
}
#endif
}
void BLI_rw_mutex_unlock(ThreadRWMutex *mutex)
{
#ifndef WIN32
pthread_rwlock_unlock(mutex);
#else
if (!mutex->have_exclusive) {
ReleaseSRWLockShared(GET_RW_LOCK(mutex));
} else {
mutex->have_exclusive = 0;
ReleaseSRWLockExclusive(GET_RW_LOCK(mutex));
}
#endif
}
void BLI_rw_mutex_end(ThreadRWMutex *mutex)
{
pthread_rwlock_destroy(mutex);
}
ThreadRWMutex *BLI_rw_mutex_alloc(void)

View File

@@ -4981,6 +4981,12 @@ void blo_do_versions_280(FileData *fd, Library *UNUSED(lib), Main *bmain)
}
if (!MAIN_VERSION_ATLEAST(bmain, 283, 12)) {
for (Scene* scene = bmain->scenes.first; scene; scene = scene->id.next) {
if (scene->r.hair_cyl_res == 0) {
scene->r.hair_cyl_res = 6;
}
}
/* Activate f-curve drawing in the sequencer. */
for (bScreen *screen = bmain->screens.first; screen; screen = screen->id.next) {
for (ScrArea *area = screen->areabase.first; area; area = area->next) {

View File

@@ -33,6 +33,8 @@
#include "BLI_memarena.h"
#include "BLI_task.h"
#include "BLI_threadsafe_mempool.h"
#include "BKE_customdata.h"
#include "BKE_multires.h"
@@ -816,7 +818,7 @@ void BM_vert_interp_from_face(BMesh *bm, BMVert *v_dst, const BMFace *f_src)
static void update_data_blocks(BMesh *bm, CustomData *olddata, CustomData *data)
{
BMIter iter;
BLI_mempool *oldpool = olddata->pool;
BLI_ThreadSafePool *oldpool = olddata->tpool;
void *block;
if (data == &bm->vdata) {
@@ -880,13 +882,14 @@ static void update_data_blocks(BMesh *bm, CustomData *olddata, CustomData *data)
}
if (oldpool) {
/* this should never happen but can when dissolve fails - T28960. */
BLI_assert(data->pool != oldpool);
/* this should never happen but can when dissolve fails - [#28960] */
BLI_assert(data->tpool != oldpool);
BLI_mempool_destroy(oldpool);
BLI_safepool_destroy(oldpool);
}
}
void BM_data_layer_add(BMesh *bm, CustomData *data, int type)
{
CustomData olddata;
@@ -895,7 +898,7 @@ void BM_data_layer_add(BMesh *bm, CustomData *data, int type)
olddata.layers = (olddata.layers) ? MEM_dupallocN(olddata.layers) : NULL;
/* the pool is now owned by olddata and must not be shared */
data->pool = NULL;
data->tpool = NULL;
CustomData_add_layer(data, type, CD_DEFAULT, NULL, 0);
@@ -913,7 +916,7 @@ void BM_data_layer_add_named(BMesh *bm, CustomData *data, int type, const char *
olddata.layers = (olddata.layers) ? MEM_dupallocN(olddata.layers) : NULL;
/* the pool is now owned by olddata and must not be shared */
data->pool = NULL;
data->tpool = NULL;
CustomData_add_layer_named(data, type, CD_DEFAULT, NULL, 0, name);
@@ -932,7 +935,7 @@ void BM_data_layer_free(BMesh *bm, CustomData *data, int type)
olddata.layers = (olddata.layers) ? MEM_dupallocN(olddata.layers) : NULL;
/* the pool is now owned by olddata and must not be shared */
data->pool = NULL;
data->tpool = NULL;
has_layer = CustomData_free_layer_active(data, type, 0);
/* assert because its expensive to realloc - better not do if layer isnt present */
@@ -954,7 +957,7 @@ void BM_data_layer_free_n(BMesh *bm, CustomData *data, int type, int n)
olddata.layers = (olddata.layers) ? MEM_dupallocN(olddata.layers) : NULL;
/* the pool is now owned by olddata and must not be shared */
data->pool = NULL;
data->tpool = NULL;
has_layer = CustomData_free_layer(data, type, 0, CustomData_get_layer_index_n(data, type, n));
/* assert because its expensive to realloc - better not do if layer isnt present */

View File

@@ -32,6 +32,7 @@
#include "BLI_stack.h"
#include "BLI_task.h"
#include "BLI_utildefines.h"
#include "BLI_threadsafe_mempool.h"
#include "BKE_editmesh.h"
#include "BKE_global.h"
@@ -222,16 +223,16 @@ void BM_mesh_data_free(BMesh *bm)
/* Free custom data pools, This should probably go in CustomData_free? */
if (bm->vdata.totlayer) {
BLI_mempool_destroy(bm->vdata.pool);
BLI_safepool_destroy(bm->vdata.tpool);
}
if (bm->edata.totlayer) {
BLI_mempool_destroy(bm->edata.pool);
BLI_safepool_destroy(bm->edata.tpool);
}
if (bm->ldata.totlayer) {
BLI_mempool_destroy(bm->ldata.pool);
BLI_safepool_destroy(bm->ldata.tpool);
}
if (bm->pdata.totlayer) {
BLI_mempool_destroy(bm->pdata.pool);
BLI_safepool_destroy(bm->pdata.tpool);
}
/* free custom data */

View File

@@ -107,9 +107,9 @@ void BM_mesh_cd_flag_ensure(BMesh *bm, Mesh *mesh, const char cd_flag)
void BM_mesh_cd_flag_apply(BMesh *bm, const char cd_flag)
{
/* CustomData_bmesh_init_pool() must run first */
BLI_assert(bm->vdata.totlayer == 0 || bm->vdata.pool != NULL);
BLI_assert(bm->edata.totlayer == 0 || bm->edata.pool != NULL);
BLI_assert(bm->pdata.totlayer == 0 || bm->pdata.pool != NULL);
BLI_assert(bm->vdata.totlayer == 0 || bm->vdata.tpool != NULL);
BLI_assert(bm->edata.totlayer == 0 || bm->edata.tpool != NULL);
BLI_assert(bm->pdata.totlayer == 0 || bm->pdata.tpool != NULL);
if (cd_flag & ME_CDFLAG_VERT_BWEIGHT) {
if (!CustomData_has_layer(&bm->vdata, CD_BWEIGHT)) {

View File

@@ -28,6 +28,7 @@
#include "BKE_customdata.h"
#include "bmesh.h"
#include "BLI_ghash.h"
#include "intern/bmesh_operators_private.h" /* own include */

View File

@@ -34,6 +34,7 @@
* values would be though */
# include "bmesh.h"
#include "BLI_ghash.h"
# include "intern/bmesh_operators_private.h" /* own include */

View File

@@ -693,7 +693,7 @@ static void bm_edge_collapse_loop_customdata(
{
/* Disable seam check - the seam check would have to be done per layer,
* its not really that important. */
//#define USE_SEAM
#define USE_SEAM
/* these don't need to be updated, since they will get removed when the edge collapses */
BMLoop *l_clear, *l_other;
const bool is_manifold = BM_edge_is_manifold(l->e);

View File

@@ -1031,7 +1031,7 @@ static void particle_batch_cache_ensure_procedural_indices(PTCacheEdit *edit,
int thickness_res,
int subdiv)
{
BLI_assert(thickness_res <= MAX_THICKRES); /* Cylinder strip not currently supported. */
BLI_assert(thickness_res <= MAX_THICKRES);
if (cache->final[subdiv].proc_hairs[thickness_res - 1] != NULL) {
return;

View File

@@ -177,7 +177,19 @@ GPUVertBuf *DRW_hair_pos_buffer_get(Object *object, ParticleSystem *psys, Modifi
Scene *scene = draw_ctx->scene;
int subdiv = scene->r.hair_subdiv;
int thickness_res = (scene->r.hair_type == SCE_HAIR_SHAPE_STRAND) ? 1 : 2;
int thickness_res = 1;
switch (scene->r.hair_type) {
case SCE_HAIR_SHAPE_STRAND:
thickness_res = 1;
break;
case SCE_HAIR_SHAPE_STRIP:
thickness_res = 2;
break;
case SCE_HAIR_SHAPE_CYLINDER:
thickness_res = MAX2(scene->r.hair_cyl_res, 3)*2;
break;
}
ParticleHairCache *cache = drw_hair_particle_cache_get(object, psys, md, subdiv, thickness_res);

View File

@@ -25,7 +25,7 @@
#define MAX_LAYER_NAME_CT 4 /* u0123456789, u, au, a0123456789 */
#define MAX_LAYER_NAME_LEN GPU_MAX_SAFE_ATTR_NAME + 2
#define MAX_THICKRES 2 /* see eHairType */
#define MAX_THICKRES 128 /* see eHairType */
#define MAX_HAIR_SUBDIV 4 /* see hair_subdiv rna */
struct ModifierData;

View File

@@ -12,6 +12,13 @@
*/
uniform int hairStrandsRes = 8;
#ifndef M_PI
#define M_PI 3.14159265358979323846 /* pi */
#endif
#ifndef M_2PI
#define M_2PI 6.28318530717958647692 /* 2*pi */
#endif
/**
* hairThicknessRes : Subdiv around the hair.
* 1 - Wire Hair: Only one pixel thick, independent of view distance.
@@ -170,7 +177,7 @@ void hair_get_pos_tan_binor_time(bool is_persp,
thickness = hair_shaperadius(hairRadShape, hairRadRoot, hairRadTip, time);
if (hairThicknessRes > 1) {
if (hairThicknessRes == 2) {
thick_time = float(gl_VertexID % hairThicknessRes) / float(hairThicknessRes - 1);
thick_time = thickness * (thick_time * 2.0 - 1.0);
@@ -179,6 +186,72 @@ void hair_get_pos_tan_binor_time(bool is_persp,
float scale = 1.0 / length(mat3(invmodel_mat) * wbinor);
wpos += wbinor * thick_time * scale;
} else if (hairThicknessRes > 2) { //cylinder
vec4 data2;
vec4 data3;
vec3 wpos2;
vec3 wtan2;
int id2 = time > 0.0 ? id - 1 : id;
data2 = texelFetch(hairPointBuffer, id2);
int id3 = data2.point_time > 0.0 ? id2 - 1 : id2;
data3 = texelFetch(hairPointBuffer, id3);
wtan = data.point_position - data2.point_position;
wtan2 = data2.point_position - data3.point_position;
wtan = -normalize(mat3(obmat) * wtan);
wtan2 = -normalize(mat3(obmat) * wtan2);
wpos2 = data2.point_position;
wpos2 = (obmat * vec4(wpos2, 1.0)).xyz;
//as a triangle strip, we alternative between current and next ring every other vert
if (gl_VertexID % 2 == 0) {
thickness = hair_shaperadius(hairRadShape, hairRadRoot, hairRadTip, data2.point_time);
wtan = wtan2;
time = data2.point_time;
}
thick_time = float((gl_VertexID/2) % (hairThicknessRes/2)) / float((hairThicknessRes/2) - 1);
thick_time *= M_2PI;
//build reference frame
//find compatible world axis
vec3 axis;
if (abs(wtan[0]) >= abs(wtan[1]) && abs(wtan[0]) >= abs(wtan[2])) {
axis = vec3(0.0, 1.0, 0.0);
} else if (abs(wtan[1]) >= abs(wtan[0]) && abs(wtan[1]) >= abs(wtan[2])) {
axis = vec3(0.0, 0.0, 1.0);
} else {
axis = vec3(1.0, 0.0, 0.0);
}
//make frame
vec3 dx = normalize(cross(axis, wtan));
vec3 dy = normalize(cross(wtan, dx));
float x = sin(thick_time);
float y = cos(thick_time);
wbinor = dx*x + dy*y;
wbinor = normalize(mat3(obmat) * wbinor);
/* Take object scale into account.
* NOTE: This only works fine with uniform scaling. */
float scale = 1.0 / length(mat3(invmodel_mat) * wbinor);
x *= scale * thickness;
y *= scale * thickness;
if (gl_VertexID % 2 == 1) {
wpos += dx*x + dy*y;
} else {
wpos = wpos2 + (dx*x + dy*y);
}
}
}

View File

@@ -33,6 +33,7 @@
#include "DNA_shader_fx_types.h"
#include "DNA_texture_types.h"
#include "BLI_ghash.h"
#include "BLI_alloca.h"
#include "BLI_dynstr.h"
#include "BLI_ghash.h"

View File

@@ -27,6 +27,7 @@
#include "BLI_kdtree.h"
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_ghash.h"
#include "BKE_context.h"
#include "BKE_editmesh.h"

View File

@@ -113,7 +113,7 @@ static bool object_remesh_poll(bContext *C)
return false;
}
if (ob->mode == OB_MODE_SCULPT && ob->sculpt->bm) {
if (ob->mode == OB_MODE_SCULPT && (ob->sculpt->bm || ob->sculpt->tm)) {
CTX_wm_operator_poll_msg_set(C, "The remesher cannot run with dyntopo activated");
return false;
}

View File

@@ -624,6 +624,7 @@ void ED_region_do_draw(bContext *C, ARegion *region)
SpaceLink *sl = area->spacedata.first;
PointerRNA ptr;
RNA_pointer_create(&screen->id, &RNA_Space, sl, &ptr);
wmMsgSubscribeValue msg_sub_value_region_tag_redraw = {

View File

@@ -22,6 +22,7 @@ set(INC
../../blenlib
../../blentranslation
../../bmesh
../../trimesh
../../depsgraph
../../draw
../../gpu
@@ -80,6 +81,7 @@ set(SRC
set(LIB
bf_blenkernel
bf_blenlib
bf_trimesh
)
if(WITH_INTERNATIONAL)

View File

@@ -1214,7 +1214,7 @@ typedef struct PaintCursorContext {
/* Sculpt related data. */
Sculpt *sd;
SculptSession *ss;
int prev_active_vertex_index;
SculptIdx prev_active_vertex_index;
bool is_stroke_active;
bool is_cursor_over_mesh;
bool is_multires;

View File

@@ -53,6 +53,7 @@
#include "RNA_define.h"
#include "bmesh.h"
#include "trimesh.h"
#include "paint_intern.h"
@@ -286,6 +287,94 @@ static void partialvis_update_bmesh(Object *ob,
}
}
static void partialvis_update_trimesh_verts(TM_TriMesh *bm,
TableGSet *verts,
PartialVisAction action,
PartialVisArea area,
float planes[4][4],
bool *any_changed,
bool *any_visible)
{
TMVert *v;
TMS_ITER (v, verts) {
float *vmask = CustomData_bmesh_get(&bm->vdata, v->customdata, CD_PAINT_MASK);
/* Hide vertex if in the hide volume. */
if (is_effected(area, planes, v->co, *vmask)) {
if (action == PARTIALVIS_HIDE) {
TM_elem_flag_enable(v, TM_ELEM_HIDDEN);
}
else {
TM_elem_flag_disable(v, TM_ELEM_HIDDEN);
}
(*any_changed) = true;
}
if (!TM_elem_flag_test(v, TM_ELEM_HIDDEN)) {
(*any_visible) = true;
}
} TMS_ITER_END
}
/* Return true if all vertices in the face are visible, false otherwise */
static bool paint_is_trimesh_face_hidden(TMFace *f)
{
bool ret = f->v1->flag & TM_ELEM_HIDDEN;
ret = ret || (f->v2->flag & TM_ELEM_HIDDEN);
ret = ret || (f->v3->flag & TM_ELEM_HIDDEN);
return ret;
}
static void partialvis_update_trimesh_faces(GSet *faces)
{
GSetIterator gs_iter;
GSET_ITER (gs_iter, faces) {
TMFace *f = BLI_gsetIterator_getKey(&gs_iter);
if (paint_is_trimesh_face_hidden(f)) {
TM_elem_flag_enable(f, TM_ELEM_HIDDEN);
}
else {
TM_elem_flag_disable(f, TM_ELEM_HIDDEN);
}
}
}
static void partialvis_update_trimesh(Object *ob,
PBVH *pbvh,
PBVHNode *node,
PartialVisAction action,
PartialVisArea area,
float planes[4][4])
{
TM_TriMesh *bm;
TableGSet *unique, *other;
GSet *faces;
bool any_changed = false, any_visible = false;
bm = BKE_pbvh_get_trimesh(pbvh);
unique = BKE_pbvh_trimesh_node_unique_verts(node);
other = BKE_pbvh_trimesh_node_other_verts(node);
faces = BKE_pbvh_trimesh_node_faces(node);
SCULPT_undo_push_node(ob, node, SCULPT_UNDO_HIDDEN);
partialvis_update_trimesh_verts(bm, unique, action, area, planes, &any_changed, &any_visible);
partialvis_update_trimesh_verts(bm, other, action, area, planes, &any_changed, &any_visible);
/* Finally loop over node faces and tag the ones that are fully hidden. */
partialvis_update_trimesh_faces(faces);
if (any_changed) {
BKE_pbvh_node_mark_rebuild_draw(node);
BKE_pbvh_node_fully_hidden_set(node, !any_visible);
}
}
static void rect_from_props(rcti *rect, PointerRNA *ptr)
{
rect->xmin = RNA_int_get(ptr, "xmin");
@@ -384,6 +473,9 @@ static int hide_show_exec(bContext *C, wmOperator *op)
case PBVH_BMESH:
partialvis_update_bmesh(ob, pbvh, nodes[i], action, area, clip_planes);
break;
case PBVH_TRIMESH:
partialvis_update_trimesh(ob, pbvh, nodes[i], action, area, clip_planes);
break;
}
}

View File

@@ -62,6 +62,7 @@
#include "bmesh.h"
#include "bmesh_tools.h"
#include "trimesh.h"
#include "tools/bmesh_boolean.h"
#include "paint_intern.h"

View File

@@ -74,6 +74,7 @@
#include "BKE_ccg.h"
#include "bmesh.h"
#include "trimesh.h"
#include "paint_intern.h" /* own include */
#include "sculpt_intern.h"

File diff suppressed because it is too large Load Diff

View File

@@ -127,7 +127,7 @@ static bool SCULPT_automasking_needs_factors_cache(const Sculpt *sd, const Brush
return false;
}
float SCULPT_automasking_factor_get(AutomaskingCache *automasking, SculptSession *ss, int vert)
float SCULPT_automasking_factor_get(AutomaskingCache *automasking, SculptSession *ss, SculptIdx vert)
{
if (!automasking) {
return 1.0f;
@@ -192,7 +192,7 @@ typedef struct AutomaskFloodFillData {
} AutomaskFloodFillData;
static bool automask_floodfill_cb(
SculptSession *ss, int from_v, int to_v, bool UNUSED(is_duplicate), void *userdata)
SculptSession *ss, SculptIdx from_v, SculptIdx to_v, bool UNUSED(is_duplicate), void *userdata)
{
AutomaskFloodFillData *data = userdata;

View File

@@ -54,6 +54,7 @@
#include "GPU_state.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>
@@ -62,15 +63,15 @@
#define BOUNDARY_STEPS_NONE -1
typedef struct BoundaryInitialVertexFloodFillData {
int initial_vertex;
SculptIdx initial_vertex;
int boundary_initial_vertex_steps;
int boundary_initial_vertex;
SculptIdx boundary_initial_vertex;
int *floodfill_steps;
float radius_sq;
} BoundaryInitialVertexFloodFillData;
static bool boundary_initial_vertex_floodfill_cb(
SculptSession *ss, int from_v, int to_v, bool is_duplicate, void *userdata)
SculptSession *ss, SculptIdx from_v, SculptIdx to_v, bool is_duplicate, void *userdata)
{
BoundaryInitialVertexFloodFillData *data = userdata;
@@ -99,8 +100,8 @@ static bool boundary_initial_vertex_floodfill_cb(
/* From a vertex index anywhere in the mesh, returns the closest vertex in a mesh boundary inside
* the given radius, if it exists. */
static int sculpt_boundary_get_closest_boundary_vertex(SculptSession *ss,
const int initial_vertex,
static SculptIdx sculpt_boundary_get_closest_boundary_vertex(SculptSession *ss,
const SculptIdx initial_vertex,
const float radius)
{
@@ -120,7 +121,7 @@ static int sculpt_boundary_get_closest_boundary_vertex(SculptSession *ss,
};
fdata.floodfill_steps = MEM_calloc_arrayN(
SCULPT_vertex_count_get(ss), sizeof(int), "floodfill steps");
SCULPT_vertex_count_get(ss), sizeof(SculptIdx), "floodfill steps");
SCULPT_floodfill_execute(ss, &flood, boundary_initial_vertex_floodfill_cb, &fdata);
SCULPT_floodfill_free(&flood);
@@ -135,7 +136,7 @@ static int sculpt_boundary_get_closest_boundary_vertex(SculptSession *ss,
static int BOUNDARY_INDICES_BLOCK_SIZE = 300;
static void sculpt_boundary_index_add(SculptBoundary *boundary,
const int new_index,
const SculptIdx new_index,
const float distance,
GSet *included_vertices)
{
@@ -151,11 +152,11 @@ static void sculpt_boundary_index_add(SculptBoundary *boundary,
if (boundary->num_vertices >= boundary->vertices_capacity) {
boundary->vertices_capacity += BOUNDARY_INDICES_BLOCK_SIZE;
boundary->vertices = MEM_reallocN_id(
boundary->vertices, boundary->vertices_capacity * sizeof(int), "boundary indices");
boundary->vertices, boundary->vertices_capacity * sizeof(SculptIdx), "boundary indices");
}
};
static void sculpt_boundary_preview_edge_add(SculptBoundary *boundary, const int v1, const int v2)
static void sculpt_boundary_preview_edge_add(SculptBoundary *boundary, const SculptIdx v1, const SculptIdx v2)
{
boundary->edges[boundary->num_edges].v1 = v1;
@@ -175,7 +176,7 @@ static void sculpt_boundary_preview_edge_add(SculptBoundary *boundary, const int
* as well as to check if the initial vertex is valid.
*/
static bool sculpt_boundary_is_vertex_in_editable_boundary(SculptSession *ss,
const int initial_vertex)
const SculptIdx initial_vertex)
{
if (!SCULPT_vertex_visible_get(ss, initial_vertex)) {
@@ -223,7 +224,7 @@ typedef struct BoundaryFloodFillData {
} BoundaryFloodFillData;
static bool boundary_floodfill_cb(
SculptSession *ss, int from_v, int to_v, bool is_duplicate, void *userdata)
SculptSession *ss, SculptIdx from_v, SculptIdx to_v, bool is_duplicate, void *userdata)
{
BoundaryFloodFillData *data = userdata;
SculptBoundary *boundary = data->boundary;
@@ -245,12 +246,12 @@ static bool boundary_floodfill_cb(
static void sculpt_boundary_indices_init(SculptSession *ss,
SculptBoundary *boundary,
const bool init_boundary_distances,
const int initial_boundary_index)
const SculptIdx initial_boundary_index)
{
const int totvert = SCULPT_vertex_count_get(ss);
boundary->vertices = MEM_malloc_arrayN(
BOUNDARY_INDICES_BLOCK_SIZE, sizeof(int), "boundary indices");
BOUNDARY_INDICES_BLOCK_SIZE, sizeof(SculptIdx), "boundary indices");
if (init_boundary_distances) {
boundary->distance = MEM_calloc_arrayN(totvert, sizeof(float), "boundary distances");
}
@@ -302,7 +303,7 @@ static void sculpt_boundary_indices_init(SculptSession *ss,
*/
static void sculpt_boundary_edit_data_init(SculptSession *ss,
SculptBoundary *boundary,
const int initial_vertex,
const SculptIdx initial_vertex,
const float radius)
{
const int totvert = SCULPT_vertex_count_get(ss);
@@ -317,8 +318,8 @@ static void sculpt_boundary_edit_data_init(SculptSession *ss,
boundary->edit_info[i].num_propagation_steps = BOUNDARY_STEPS_NONE;
}
GSQueue *current_iteration = BLI_gsqueue_new(sizeof(int));
GSQueue *next_iteration = BLI_gsqueue_new(sizeof(int));
GSQueue *current_iteration = BLI_gsqueue_new(sizeof(SculptIdx));
GSQueue *next_iteration = BLI_gsqueue_new(sizeof(SculptIdx));
/* Initialized the first iteration with the vertices already in the boundary. This is propagation
* step 0. */
@@ -496,7 +497,7 @@ static void sculpt_boundary_falloff_factor_init(SculptSession *ss,
* return NULL if there is no boundary from the given vertex using the given radius. */
SculptBoundary *SCULPT_boundary_data_init(Object *object,
Brush *brush,
const int initial_vertex,
const SculptIdx initial_vertex,
const float radius)
{
SculptSession *ss = object->sculpt;
@@ -508,7 +509,7 @@ SculptBoundary *SCULPT_boundary_data_init(Object *object,
SCULPT_vertex_random_access_ensure(ss);
SCULPT_boundary_info_ensure(object);
const int boundary_initial_vertex = sculpt_boundary_get_closest_boundary_vertex(
const SculptIdx boundary_initial_vertex = sculpt_boundary_get_closest_boundary_vertex(
ss, initial_vertex, radius);
if (boundary_initial_vertex == BOUNDARY_VERTEX_NONE) {
@@ -877,7 +878,7 @@ void SCULPT_do_boundary_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totn
const int symm_area = ss->cache->mirror_symmetry_pass;
if (SCULPT_stroke_is_first_brush_step_of_symmetry_pass(ss->cache)) {
int initial_vertex;
SculptIdx initial_vertex;
if (ss->cache->mirror_symmetry_pass == 0) {
initial_vertex = SCULPT_active_vertex_get(ss);
}

View File

@@ -98,6 +98,7 @@
#include "bmesh.h"
#include "bmesh_tools.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>

View File

@@ -126,7 +126,7 @@ static int sculpt_detail_flood_fill_exec(bContext *C, wmOperator *UNUSED(op))
SCULPT_undo_push_node(ob, NULL, SCULPT_UNDO_COORDS);
while (BKE_pbvh_bmesh_update_topology(
ss->pbvh, PBVH_Collapse | PBVH_Subdivide, center, NULL, size, false, false)) {
ss->pbvh, PBVH_Collapse | PBVH_Subdivide, center, NULL, size, false, false, -1)) {
for (int i = 0; i < totnodes; i++) {
BKE_pbvh_node_mark_topology_update(nodes[i]);
}

View File

@@ -72,6 +72,7 @@
#include "bmesh.h"
#include "bmesh_tools.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>
@@ -110,19 +111,249 @@ void SCULPT_pbvh_clear(Object *ob)
DEG_id_tag_update(&ob->id, ID_RECALC_GEOMETRY);
}
#ifdef WITH_TRIMESH
void SCULPT_dyntopo_node_layers_add(SculptSession *ss)
{
int cd_node_layer_index;
char layer_id[] = "_dyntopo_node_id";
cd_node_layer_index = CustomData_get_named_layer_index(&ss->tm->vdata, CD_PROP_INT32, layer_id);
if (cd_node_layer_index == -1) {
TM_data_layer_add_named(ss->tm, &ss->tm->vdata, CD_PROP_INT32, layer_id);
cd_node_layer_index = CustomData_get_named_layer_index(
&ss->tm->vdata, CD_PROP_INT32, layer_id);
}
ss->cd_vert_node_offset = CustomData_get_n_offset(
&ss->tm->vdata,
CD_PROP_INT32,
cd_node_layer_index - CustomData_get_layer_index(&ss->tm->vdata, CD_PROP_INT32));
ss->tm->vdata.layers[cd_node_layer_index].flag |= CD_FLAG_TEMPORARY;
cd_node_layer_index = CustomData_get_named_layer_index(&ss->tm->tdata, CD_PROP_INT32, layer_id);
if (cd_node_layer_index == -1) {
TM_data_layer_add_named(ss->tm, &ss->tm->tdata, CD_PROP_INT32, layer_id);
cd_node_layer_index = CustomData_get_named_layer_index(
&ss->tm->tdata, CD_PROP_INT32, layer_id);
}
ss->cd_face_node_offset = CustomData_get_n_offset(
&ss->tm->tdata,
CD_PROP_INT32,
cd_node_layer_index - CustomData_get_layer_index(&ss->tm->tdata, CD_PROP_INT32));
ss->tm->tdata.layers[cd_node_layer_index].flag |= CD_FLAG_TEMPORARY;
}
void SCULPT_dynamic_topology_enable_ex(Main *bmain, Depsgraph *depsgraph, Scene *scene, Object *ob)
{
SculptSession *ss = ob->sculpt;
Mesh *me = ob->data;
const BMAllocTemplate allocsize = BMALLOC_TEMPLATE_FROM_ME(me);
SCULPT_pbvh_clear(ob);
ss->bm_smooth_shading = (scene->toolsettings->sculpt->flags & SCULPT_DYNTOPO_SMOOTH_SHADING) !=
0;
/* Dynamic topology doesn't ensure selection state is valid, so remove T36280. */
BKE_mesh_mselect_clear(me);
/* Create triangles-only BMesh. */
ss->tm = TMesh_new(6); // XXX set maxthread properly
TM_mesh_tm_from_me(ss->tm,
me,
(&(struct TriMeshFromMeshParams){
.calc_face_normal = true,
.use_shapekey = true,
.active_shapekey = ob->shapenr,
}));
// SCULPT_dynamic_topology_triangulate(ss->bm);
TM_data_layer_add(ss->tm, &ss->tm->vdata, CD_PAINT_MASK);
SCULPT_dyntopo_node_layers_add(ss);
/* Make sure the data for existing faces are initialized. */
if (me->totpoly != ss->tm->tottri) {
TM_mesh_normals_update(ss->tm);
}
/* Enable dynamic topology. */
me->flag |= ME_SCULPT_DYNAMIC_TOPOLOGY;
/* Enable logging for undo/redo. */
ss->tm_log = TM_log_new(ss->tm, CustomData_get_layer_index(&ss->tm->vdata, CD_PAINT_MASK));
/* Update dependency graph, so modifiers that depend on dyntopo being enabled
* are re-evaluated and the PBVH is re-created. */
DEG_id_tag_update(&ob->id, ID_RECALC_GEOMETRY);
BKE_scene_graph_update_tagged(depsgraph, bmain);
}
/* Free the sculpt BMesh and BMLog
*
* If 'unode' is given, the BMesh's data is copied out to the unode
* before the BMesh is deleted so that it can be restored from. */
static void SCULPT_dynamic_topology_disable_ex(
Main *bmain, Depsgraph *depsgraph, Scene *scene, Object *ob, SculptUndoNode *unode)
{
SculptSession *ss = ob->sculpt;
Mesh *me = ob->data;
SCULPT_pbvh_clear(ob);
if (unode) {
/* Free all existing custom data. */
CustomData_free(&me->vdata, me->totvert);
CustomData_free(&me->edata, me->totedge);
CustomData_free(&me->fdata, me->totface);
CustomData_free(&me->ldata, me->totloop);
CustomData_free(&me->pdata, me->totpoly);
/* Copy over stored custom data. */
SculptUndoNodeGeometry *geometry = &unode->geometry_bmesh_enter;
me->totvert = geometry->totvert;
me->totloop = geometry->totloop;
me->totpoly = geometry->totpoly;
me->totedge = geometry->totedge;
me->totface = 0;
CustomData_copy(
&geometry->vdata, &me->vdata, CD_MASK_MESH.vmask, CD_DUPLICATE, geometry->totvert);
CustomData_copy(
&geometry->edata, &me->edata, CD_MASK_MESH.emask, CD_DUPLICATE, geometry->totedge);
CustomData_copy(
&geometry->ldata, &me->ldata, CD_MASK_MESH.lmask, CD_DUPLICATE, geometry->totloop);
CustomData_copy(
&geometry->pdata, &me->pdata, CD_MASK_MESH.pmask, CD_DUPLICATE, geometry->totpoly);
BKE_mesh_update_customdata_pointers(me, false);
}
else {
BKE_sculptsession_tm_to_me(ob, true);
/* Reset Face Sets as they are no longer valid. */
if (!CustomData_has_layer(&me->pdata, CD_SCULPT_FACE_SETS)) {
CustomData_add_layer(&me->pdata, CD_SCULPT_FACE_SETS, CD_CALLOC, NULL, me->totpoly);
}
ss->face_sets = CustomData_get_layer(&me->pdata, CD_SCULPT_FACE_SETS);
for (int i = 0; i < me->totpoly; i++) {
ss->face_sets[i] = 1;
}
me->face_sets_color_default = 1;
/* Sync the visibility to vertices manually as the pmap is still not initialized. */
for (int i = 0; i < me->totvert; i++) {
me->mvert[i].flag &= ~ME_HIDE;
me->mvert[i].flag |= ME_VERT_PBVH_UPDATE;
}
}
/* Clear data. */
me->flag &= ~ME_SCULPT_DYNAMIC_TOPOLOGY;
/* Typically valid but with global-undo they can be NULL, see: T36234. */
if (ss->bm) {
BM_mesh_free(ss->bm);
ss->bm = NULL;
}
if (ss->bm_log) {
BM_log_free(ss->bm_log);
ss->bm_log = NULL;
}
if (ss->tm) {
TMesh_free(ss->tm);
ss->tm = NULL;
}
if (ss->tm_log) {
TM_log_free(ss->tm_log);
ss->tm_log = NULL;
}
BKE_particlesystem_reset_all(ob);
BKE_ptcache_object_reset(scene, ob, PTCACHE_RESET_OUTDATED);
/* Update dependency graph, so modifiers that depend on dyntopo being enabled
* are re-evaluated and the PBVH is re-created. */
DEG_id_tag_update(&ob->id, ID_RECALC_GEOMETRY);
BKE_scene_graph_update_tagged(depsgraph, bmain);
}
void SCULPT_dynamic_topology_disable(bContext *C, SculptUndoNode *unode)
{
Main *bmain = CTX_data_main(C);
Depsgraph *depsgraph = CTX_data_ensure_evaluated_depsgraph(C);
Scene *scene = CTX_data_scene(C);
Object *ob = CTX_data_active_object(C);
SCULPT_dynamic_topology_disable_ex(bmain, depsgraph, scene, ob, unode);
}
#else
void SCULPT_dyntopo_save_origverts(SculptSession *ss)
{
BMIter iter;
BMVert *v;
BM_ITER_MESH (v, &iter, ss->bm, BM_VERTS_OF_MESH) {
float *co = BM_ELEM_CD_GET_VOID_P(v, ss->cd_origco_offset);
float *no = BM_ELEM_CD_GET_VOID_P(v, ss->cd_origno_offset);
copy_v3_v3(co, v->co);
copy_v3_v3(co, v->no);
}
}
void SCULPT_dyntopo_node_layers_add(SculptSession *ss)
{
int cd_node_layer_index, cd_face_node_layer_index;
char layer_id[] = "_dyntopo_node_id";
char origco_id[] = "_dyntopop_orig_co";
char origno_id[] = "_dyntopop_orig_no";
int cd_origco_index, cd_origno_index;
cd_origco_index = CustomData_get_named_layer_index(&ss->bm->vdata, CD_PROP_FLOAT3, origco_id);
if (cd_origco_index == -1) {
BM_data_layer_add_named(ss->bm, &ss->bm->vdata, CD_PROP_FLOAT3, origco_id);
}
cd_origno_index = CustomData_get_named_layer_index(&ss->bm->vdata, CD_PROP_FLOAT3, origno_id);
if (cd_origno_index == -1) {
BM_data_layer_add_named(ss->bm, &ss->bm->vdata, CD_PROP_FLOAT3, origno_id);
}
cd_node_layer_index = CustomData_get_named_layer_index(&ss->bm->vdata, CD_PROP_INT32, layer_id);
if (cd_node_layer_index == -1) {
BM_data_layer_add_named(ss->bm, &ss->bm->vdata, CD_PROP_INT32, layer_id);
cd_node_layer_index = CustomData_get_named_layer_index(
&ss->bm->vdata, CD_PROP_INT32, layer_id);
}
cd_face_node_layer_index = CustomData_get_named_layer_index(&ss->bm->pdata, CD_PROP_INT32, layer_id);
if (cd_face_node_layer_index == -1) {
BM_data_layer_add_named(ss->bm, &ss->bm->pdata, CD_PROP_INT32, layer_id);
}
//get indices again, as they might have changed after adding new layers
cd_origco_index = CustomData_get_named_layer_index(&ss->bm->vdata, CD_PROP_FLOAT3, origco_id);
cd_origno_index = CustomData_get_named_layer_index(&ss->bm->vdata, CD_PROP_FLOAT3, origno_id);
cd_node_layer_index = CustomData_get_named_layer_index(&ss->bm->vdata, CD_PROP_INT32, layer_id);
cd_face_node_layer_index = CustomData_get_named_layer_index(&ss->bm->pdata, CD_PROP_INT32, layer_id);
ss->cd_origco_offset = CustomData_get_n_offset(
&ss->bm->vdata,
CD_PROP_FLOAT3,
cd_origco_index - CustomData_get_layer_index(&ss->bm->vdata, CD_PROP_FLOAT3));
ss->bm->vdata.layers[cd_origco_index].flag |= CD_FLAG_TEMPORARY;
ss->cd_origno_offset = CustomData_get_n_offset(
&ss->bm->vdata,
CD_PROP_FLOAT3,
cd_origno_index - CustomData_get_layer_index(&ss->bm->vdata, CD_PROP_FLOAT3));
ss->bm->vdata.layers[cd_origno_index].flag |= CD_FLAG_TEMPORARY;
ss->cd_vert_node_offset = CustomData_get_n_offset(
&ss->bm->vdata,
CD_PROP_INT32,
@@ -130,19 +361,14 @@ void SCULPT_dyntopo_node_layers_add(SculptSession *ss)
ss->bm->vdata.layers[cd_node_layer_index].flag |= CD_FLAG_TEMPORARY;
cd_node_layer_index = CustomData_get_named_layer_index(&ss->bm->pdata, CD_PROP_INT32, layer_id);
if (cd_node_layer_index == -1) {
BM_data_layer_add_named(ss->bm, &ss->bm->pdata, CD_PROP_INT32, layer_id);
cd_node_layer_index = CustomData_get_named_layer_index(
&ss->bm->pdata, CD_PROP_INT32, layer_id);
}
ss->cd_face_node_offset = CustomData_get_n_offset(
&ss->bm->pdata,
CD_PROP_INT32,
cd_node_layer_index - CustomData_get_layer_index(&ss->bm->pdata, CD_PROP_INT32));
cd_face_node_layer_index - CustomData_get_layer_index(&ss->bm->pdata, CD_PROP_INT32));
ss->bm->pdata.layers[cd_node_layer_index].flag |= CD_FLAG_TEMPORARY;
ss->bm->pdata.layers[cd_face_node_layer_index].flag |= CD_FLAG_TEMPORARY;
SCULPT_dyntopo_save_origverts(ss);
}
void SCULPT_dynamic_topology_enable_ex(Main *bmain, Depsgraph *depsgraph, Scene *scene, Object *ob)
@@ -280,6 +506,7 @@ void SCULPT_dynamic_topology_disable(bContext *C, SculptUndoNode *unode)
Object *ob = CTX_data_active_object(C);
SCULPT_dynamic_topology_disable_ex(bmain, depsgraph, scene, ob, unode);
}
#endif
void sculpt_dynamic_topology_disable_with_undo(Main *bmain,
Depsgraph *depsgraph,
@@ -287,7 +514,7 @@ void sculpt_dynamic_topology_disable_with_undo(Main *bmain,
Object *ob)
{
SculptSession *ss = ob->sculpt;
if (ss->bm != NULL) {
if (ss->tm != NULL || ss->bm != NULL) {
/* May be false in background mode. */
const bool use_undo = G.background ? (ED_undo_stack_get() != NULL) : true;
if (use_undo) {
@@ -307,7 +534,7 @@ static void sculpt_dynamic_topology_enable_with_undo(Main *bmain,
Object *ob)
{
SculptSession *ss = ob->sculpt;
if (ss->bm == NULL) {
if (ss->tm == NULL || ss->bm != NULL) {
/* May be false in background mode. */
const bool use_undo = G.background ? (ED_undo_stack_get() != NULL) : true;
if (use_undo) {
@@ -331,7 +558,7 @@ static int sculpt_dynamic_topology_toggle_exec(bContext *C, wmOperator *UNUSED(o
WM_cursor_wait(true);
if (ss->bm) {
if (ss->tm || ss->bm) {
sculpt_dynamic_topology_disable_with_undo(bmain, depsgraph, scene, ob);
}
else {
@@ -381,9 +608,9 @@ enum eDynTopoWarnFlag SCULPT_dynamic_topology_check(Scene *scene, Object *ob)
enum eDynTopoWarnFlag flag = 0;
BLI_assert(ss->bm == NULL);
BLI_assert(ss->tm == NULL || ss->bm != NULL);
UNUSED_VARS_NDEBUG(ss);
#if 0
for (int i = 0; i < CD_NUMTYPES; i++) {
if (!ELEM(i, CD_MVERT, CD_MEDGE, CD_MFACE, CD_MLOOP, CD_MPOLY, CD_PAINT_MASK, CD_ORIGINDEX)) {
if (CustomData_has_layer(&me->vdata, i)) {
@@ -397,7 +624,7 @@ enum eDynTopoWarnFlag SCULPT_dynamic_topology_check(Scene *scene, Object *ob)
}
}
}
#endif
{
VirtualModifierData virtualModifierData;
ModifierData *md = BKE_modifiers_get_virtual_modifierlist(ob, &virtualModifierData);
@@ -426,7 +653,7 @@ static int sculpt_dynamic_topology_toggle_invoke(bContext *C,
Object *ob = CTX_data_active_object(C);
SculptSession *ss = ob->sculpt;
if (!ss->bm) {
if (!ss->tm || !ss->bm) {
Scene *scene = CTX_data_scene(C);
enum eDynTopoWarnFlag flag = SCULPT_dynamic_topology_check(scene, ob);

View File

@@ -67,6 +67,7 @@
#include "RNA_define.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>

View File

@@ -63,6 +63,7 @@
#include "UI_interface.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>

View File

@@ -59,6 +59,7 @@
#include "UI_interface.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>

View File

@@ -60,6 +60,7 @@
#include "UI_interface.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>

View File

@@ -96,20 +96,20 @@ char SCULPT_mesh_symmetry_xyz_get(Object *object);
void SCULPT_vertex_random_access_ensure(struct SculptSession *ss);
int SCULPT_vertex_count_get(struct SculptSession *ss);
const float *SCULPT_vertex_co_get(struct SculptSession *ss, int index);
void SCULPT_vertex_normal_get(SculptSession *ss, int index, float no[3]);
float SCULPT_vertex_mask_get(struct SculptSession *ss, int index);
const float *SCULPT_vertex_color_get(SculptSession *ss, int index);
const float *SCULPT_vertex_co_get(struct SculptSession *ss, SculptIdx index);
void SCULPT_vertex_normal_get(SculptSession *ss, SculptIdx index, float no[3]);
float SCULPT_vertex_mask_get(struct SculptSession *ss, SculptIdx index);
const float *SCULPT_vertex_color_get(SculptSession *ss, SculptIdx index);
const float *SCULPT_vertex_persistent_co_get(SculptSession *ss, int index);
void SCULPT_vertex_persistent_normal_get(SculptSession *ss, int index, float no[3]);
const float *SCULPT_vertex_persistent_co_get(SculptSession *ss, SculptIdx index);
void SCULPT_vertex_persistent_normal_get(SculptSession *ss, SculptIdx index, float no[3]);
/* Coordinates used for manipulating the base mesh when Grab Active Vertex is enabled. */
const float *SCULPT_vertex_co_for_grab_active_get(SculptSession *ss, int index);
const float *SCULPT_vertex_co_for_grab_active_get(SculptSession *ss, SculptIdx index);
/* Returns the info of the limit surface when Multires is available, otherwise it returns the
* current coordinate of the vertex. */
void SCULPT_vertex_limit_surface_get(SculptSession *ss, int index, float r_co[3]);
void SCULPT_vertex_limit_surface_get(SculptSession *ss, SculptIdx index, float r_co[3]);
/* Returns the pointer to the coordinates that should be edited from a brush tool iterator
* depending on the given deformation target. */
@@ -120,22 +120,23 @@ float *SCULPT_brush_deform_target_vertex_co_get(SculptSession *ss,
#define SCULPT_VERTEX_NEIGHBOR_FIXED_CAPACITY 256
typedef struct SculptVertexNeighborIter {
/* Storage */
int *neighbors;
SculptIdx *neighbors;
int size;
int capacity;
int neighbors_fixed[SCULPT_VERTEX_NEIGHBOR_FIXED_CAPACITY];
SculptIdx neighbors_fixed[SCULPT_VERTEX_NEIGHBOR_FIXED_CAPACITY];
int neighbors_nindex[SCULPT_VERTEX_NEIGHBOR_FIXED_CAPACITY];
/* Internal iterator. */
int num_duplicates;
int i;
/* Public */
int index;
SculptIdx index;
bool is_duplicate;
} SculptVertexNeighborIter;
void SCULPT_vertex_neighbors_get(struct SculptSession *ss,
const int index,
const SculptIdx index,
const bool include_duplicates,
SculptVertexNeighborIter *iter);
@@ -163,7 +164,7 @@ void SCULPT_vertex_neighbors_get(struct SculptSession *ss,
} \
((void)0)
int SCULPT_active_vertex_get(SculptSession *ss);
SculptIdx SCULPT_active_vertex_get(SculptSession *ss);
const float *SCULPT_active_vertex_co_get(SculptSession *ss);
void SCULPT_active_vertex_normal_get(SculptSession *ss, float normal[3]);
@@ -179,12 +180,12 @@ void SCULPT_fake_neighbors_free(struct Object *ob);
/* Vertex Info. */
void SCULPT_boundary_info_ensure(Object *object);
/* Boundary Info needs to be initialized in order to use this function. */
bool SCULPT_vertex_is_boundary(const SculptSession *ss, const int index);
bool SCULPT_vertex_is_boundary(const SculptSession *ss, const SculptIdx index);
/* Sculpt Visibility API */
void SCULPT_vertex_visible_set(SculptSession *ss, int index, bool visible);
bool SCULPT_vertex_visible_get(SculptSession *ss, int index);
void SCULPT_vertex_visible_set(SculptSession *ss, SculptIdx index, bool visible);
bool SCULPT_vertex_visible_get(SculptSession *ss, SculptIdx index);
void SCULPT_visibility_sync_all_face_sets_to_vertices(struct Object *ob);
void SCULPT_visibility_sync_all_vertex_to_face_sets(struct SculptSession *ss);
@@ -192,17 +193,17 @@ void SCULPT_visibility_sync_all_vertex_to_face_sets(struct SculptSession *ss);
/* Face Sets API */
int SCULPT_active_face_set_get(SculptSession *ss);
int SCULPT_vertex_face_set_get(SculptSession *ss, int index);
void SCULPT_vertex_face_set_set(SculptSession *ss, int index, int face_set);
int SCULPT_vertex_face_set_get(SculptSession *ss, SculptIdx index);
void SCULPT_vertex_face_set_set(SculptSession *ss, SculptIdx index, int face_set);
bool SCULPT_vertex_has_face_set(SculptSession *ss, int index, int face_set);
bool SCULPT_vertex_has_unique_face_set(SculptSession *ss, int index);
bool SCULPT_vertex_has_face_set(SculptSession *ss, SculptIdx index, int face_set);
bool SCULPT_vertex_has_unique_face_set(SculptSession *ss, SculptIdx index);
int SCULPT_face_set_next_available_get(SculptSession *ss);
void SCULPT_face_set_visibility_set(SculptSession *ss, int face_set, bool visible);
bool SCULPT_vertex_all_face_sets_visible_get(const SculptSession *ss, int index);
bool SCULPT_vertex_any_face_set_visible_get(SculptSession *ss, int index);
bool SCULPT_vertex_all_face_sets_visible_get(const SculptSession *ss, SculptIdx index);
bool SCULPT_vertex_any_face_set_visible_get(SculptSession *ss, SculptIdx index);
void SCULPT_face_sets_visibility_invert(SculptSession *ss);
void SCULPT_face_sets_visibility_all_set(SculptSession *ss, bool visible);
@@ -211,9 +212,12 @@ bool SCULPT_stroke_is_main_symmetry_pass(struct StrokeCache *cache);
bool SCULPT_stroke_is_first_brush_step(struct StrokeCache *cache);
bool SCULPT_stroke_is_first_brush_step_of_symmetry_pass(struct StrokeCache *cache);
struct TriMeshLog;
/* Sculpt Original Data */
typedef struct {
struct BMLog *bm_log;
struct TriMeshLog *tm_log;
struct SculptUndoNode *unode;
float (*coords)[3];
@@ -226,6 +230,7 @@ typedef struct {
const short *no;
float mask;
const float *col;
const short _no[3];
} SculptOrigVertData;
void SCULPT_orig_vert_data_init(SculptOrigVertData *data, Object *ob, PBVHNode *node);
@@ -245,7 +250,7 @@ void SCULPT_calc_brush_plane(struct Sculpt *sd,
void SCULPT_calc_area_normal(
Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode, float r_area_no[3]);
int SCULPT_nearest_vertex_get(struct Sculpt *sd,
SculptIdx SCULPT_nearest_vertex_get(struct Sculpt *sd,
struct Object *ob,
const float co[3],
float max_distance,
@@ -291,13 +296,13 @@ void SCULPT_floodfill_add_initial_with_symmetry(struct Sculpt *sd,
struct Object *ob,
struct SculptSession *ss,
SculptFloodFill *flood,
int index,
SculptIdx index,
float radius);
void SCULPT_floodfill_add_initial(SculptFloodFill *flood, int index);
void SCULPT_floodfill_add_initial(SculptFloodFill *flood, SculptIdx index);
void SCULPT_floodfill_execute(
struct SculptSession *ss,
SculptFloodFill *flood,
bool (*func)(SculptSession *ss, int from_v, int to_v, bool is_duplicate, void *userdata),
bool (*func)(SculptSession *ss, SculptIdx from_v, SculptIdx to_v, bool is_duplicate, void *userdata),
void *userdata);
void SCULPT_floodfill_free(SculptFloodFill *flood);
@@ -332,7 +337,7 @@ void SCULPT_pbvh_clear(Object *ob);
/* Automasking. */
float SCULPT_automasking_factor_get(struct AutomaskingCache *automasking,
SculptSession *ss,
int vert);
SculptIdx vert);
/* Returns the automasking cache depending on the active tool. Used for code that can run both for
* brushes and filter. */
@@ -455,7 +460,7 @@ void SCULPT_pose_ik_chain_free(struct SculptPoseIKChain *ik_chain);
/* Boundary Brush. */
struct SculptBoundary *SCULPT_boundary_data_init(Object *object,
Brush *brush,
const int initial_vertex,
const SculptIdx initial_vertex,
const float radius);
void SCULPT_boundary_data_free(struct SculptBoundary *boundary);
void SCULPT_do_boundary_brush(struct Sculpt *sd,
@@ -488,12 +493,15 @@ void SCULPT_do_smear_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode
/* Smooth Brush. */
void SCULPT_bmesh_four_neighbor_average(float avg[3], float direction[3], struct BMVert *v);
void SCULPT_neighbor_coords_average(SculptSession *ss, float result[3], int index);
float SCULPT_neighbor_mask_average(SculptSession *ss, int index);
void SCULPT_neighbor_color_average(SculptSession *ss, float result[4], int index);
struct TMVert;
void SCULPT_trimesh_four_neighbor_average(float avg[3], float direction[3], struct TMVert *v);
void SCULPT_neighbor_coords_average(SculptSession *ss, float result[3], SculptIdx index);
float SCULPT_neighbor_mask_average(SculptSession *ss, SculptIdx index);
void SCULPT_neighbor_color_average(SculptSession *ss, float result[4], SculptIdx index);
/* Mask the mesh boundaries smoothing only the mesh surface without using automasking. */
void SCULPT_neighbor_coords_average_interior(SculptSession *ss, float result[3], int index);
void SCULPT_neighbor_coords_average_interior(SculptSession *ss, float result[3], SculptIdx index);
void SCULPT_smooth(Sculpt *sd,
Object *ob,
@@ -509,13 +517,13 @@ void SCULPT_surface_smooth_laplacian_step(SculptSession *ss,
float *disp,
const float co[3],
float (*laplacian_disp)[3],
const int v_index,
const SculptIdx v_index,
const float origco[3],
const float alpha);
void SCULPT_surface_smooth_displace_step(SculptSession *ss,
float *co,
float (*laplacian_disp)[3],
const int v_index,
const SculptIdx v_index,
const float beta,
const float fade);
void SCULPT_do_surface_smooth_brush(Sculpt *sd, Object *ob, PBVHNode **nodes, int totnode);
@@ -726,7 +734,7 @@ typedef struct SculptThreadedTaskData {
bool mask_by_color_preserve_mask;
/* Index of the vertex that is going to be used as a reference for the colors. */
int mask_by_color_vertex;
SculptIdx mask_by_color_vertex;
float *mask_by_color_floodfill;
int face_set;
@@ -803,7 +811,7 @@ float SCULPT_brush_strength_factor(struct SculptSession *ss,
const short vno[3],
const float fno[3],
const float mask,
const int vertex_index,
const SculptIdx vertex_index,
const int thread_id);
/* Tilts a normal by the x and y tilt values using the view axis. */

View File

@@ -64,6 +64,7 @@
#include "sculpt_intern.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>
@@ -116,7 +117,7 @@ static void sculpt_expand_task_cb(void *__restrict userdata,
BKE_pbvh_vertex_iter_begin(ss->pbvh, node, vd, PBVH_ITER_ALL)
{
int vi = vd.index;
SculptIdx vi = vd.index;
float final_mask = *vd.mask;
if (data->mask_expand_use_normals) {
if (ss->filter_cache->normal_factor[SCULPT_active_vertex_get(ss)] <
@@ -312,7 +313,7 @@ typedef struct MaskExpandFloodFillData {
} MaskExpandFloodFillData;
static bool mask_expand_floodfill_cb(
SculptSession *ss, int from_v, int to_v, bool is_duplicate, void *userdata)
SculptSession *ss, SculptIdx from_v, SculptIdx to_v, bool is_duplicate, void *userdata)
{
MaskExpandFloodFillData *data = userdata;
@@ -393,7 +394,7 @@ static int sculpt_mask_expand_invoke(bContext *C, wmOperator *op, const wmEvent
}
}
ss->filter_cache->mask_update_it = MEM_callocN(sizeof(int) * vertex_count,
ss->filter_cache->mask_update_it = MEM_callocN(sizeof(SculptIdx) * vertex_count,
"mask update iteration");
if (use_normals) {
ss->filter_cache->normal_factor = MEM_callocN(sizeof(float) * vertex_count,

View File

@@ -53,6 +53,7 @@
#include "GPU_state.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>

View File

@@ -65,6 +65,7 @@
#include "IMB_imbuf.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>

View File

@@ -48,6 +48,7 @@
#include "sculpt_intern.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>
@@ -384,7 +385,7 @@ typedef struct PoseFloodFillData {
int current_face_set;
int next_face_set;
int prev_face_set;
int next_vertex;
SculptIdx next_vertex;
bool next_face_set_found;
@@ -406,7 +407,7 @@ typedef struct PoseFloodFillData {
int fallback_count;
/* Face Set FK mode. */
int *floodfill_it;
SculptIdx *floodfill_it;
float *fk_weights;
int initial_face_set;
int masked_face_set_it;
@@ -415,7 +416,7 @@ typedef struct PoseFloodFillData {
} PoseFloodFillData;
static bool pose_topology_floodfill_cb(
SculptSession *ss, int UNUSED(from_v), int to_v, bool is_duplicate, void *userdata)
SculptSession *ss, SculptIdx UNUSED(from_v), SculptIdx to_v, bool is_duplicate, void *userdata)
{
PoseFloodFillData *data = userdata;
const float *co = SCULPT_vertex_co_get(ss, to_v);
@@ -444,11 +445,11 @@ static bool pose_topology_floodfill_cb(
}
static bool pose_face_sets_floodfill_cb(
SculptSession *ss, int UNUSED(from_v), int to_v, bool is_duplicate, void *userdata)
SculptSession *ss, SculptIdx UNUSED(from_v), SculptIdx to_v, bool is_duplicate, void *userdata)
{
PoseFloodFillData *data = userdata;
const int index = to_v;
const SculptIdx index = to_v;
bool visit_next = false;
const float *co = SCULPT_vertex_co_get(ss, index);
@@ -683,7 +684,7 @@ static SculptPoseIKChain *pose_ik_chain_init_topology(Sculpt *sd,
float next_chain_segment_target[3];
int totvert = SCULPT_vertex_count_get(ss);
int nearest_vertex_index = SCULPT_nearest_vertex_get(sd, ob, initial_location, FLT_MAX, true);
SculptIdx nearest_vertex_index = SCULPT_nearest_vertex_get(sd, ob, initial_location, FLT_MAX, true);
/* Init the buffers used to keep track of the changes in the pose factors as more segments are
* added to the IK chain. */
@@ -768,7 +769,7 @@ static SculptPoseIKChain *pose_ik_chain_init_face_sets(
int current_face_set = SCULPT_FACE_SET_NONE;
int prev_face_set = SCULPT_FACE_SET_NONE;
int current_vertex = SCULPT_active_vertex_get(ss);
SculptIdx current_vertex = SCULPT_active_vertex_get(ss);
for (int s = 0; s < ik_chain->tot_segments; s++) {
@@ -824,7 +825,7 @@ static SculptPoseIKChain *pose_ik_chain_init_face_sets(
}
static bool pose_face_sets_fk_find_masked_floodfill_cb(
SculptSession *ss, int from_v, int to_v, bool is_duplicate, void *userdata)
SculptSession *ss, SculptIdx from_v, SculptIdx to_v, bool is_duplicate, void *userdata)
{
PoseFloodFillData *data = userdata;
@@ -858,7 +859,7 @@ static bool pose_face_sets_fk_find_masked_floodfill_cb(
}
static bool pose_face_sets_fk_set_weights_floodfill_cb(
SculptSession *ss, int UNUSED(from_v), int to_v, bool UNUSED(is_duplicate), void *userdata)
SculptSession *ss, SculptIdx UNUSED(from_v), SculptIdx to_v, bool UNUSED(is_duplicate), void *userdata)
{
PoseFloodFillData *data = userdata;
data->fk_weights[to_v] = 1.0f;
@@ -872,14 +873,14 @@ static SculptPoseIKChain *pose_ik_chain_init_face_sets_fk(
SculptPoseIKChain *ik_chain = pose_ik_chain_new(1, totvert);
const int active_vertex = SCULPT_active_vertex_get(ss);
const int active_face_set = SCULPT_active_face_set_get(ss);
const SculptIdx active_vertex = SCULPT_active_vertex_get(ss);
const SculptIdx active_face_set = SCULPT_active_face_set_get(ss);
SculptFloodFill flood;
SCULPT_floodfill_init(ss, &flood);
SCULPT_floodfill_add_initial(&flood, active_vertex);
PoseFloodFillData fdata;
fdata.floodfill_it = MEM_calloc_arrayN(totvert, sizeof(int), "floodfill iteration");
fdata.floodfill_it = MEM_calloc_arrayN(totvert, sizeof(SculptIdx), "floodfill iteration");
fdata.floodfill_it[active_vertex] = 1;
fdata.initial_face_set = active_face_set;
fdata.masked_face_set = SCULPT_FACE_SET_NONE;

View File

@@ -58,11 +58,17 @@
#include "RNA_define.h"
#include "bmesh.h"
#include "trimesh.h"
#ifdef PROXY_ADVANCED
#include "BKE_DerivedMesh.h"
#include "../../blenkernel/intern/pbvh_intern.h"
#endif
#include <math.h>
#include <stdlib.h>
void SCULPT_neighbor_coords_average_interior(SculptSession *ss, float result[3], int index)
void SCULPT_neighbor_coords_average_interior(SculptSession *ss, float result[3], SculptIdx index)
{
float avg[3] = {0.0f, 0.0f, 0.0f};
int total = 0;
@@ -149,10 +155,57 @@ void SCULPT_bmesh_four_neighbor_average(float avg[3], float direction[3], BMVert
}
}
/* For bmesh: Average surrounding verts based on an orthogonality measure.
* Naturally converges to a quad-like structure. */
void SCULPT_trimesh_four_neighbor_average(float avg[3], float direction[3], TMVert *v)
{
float avg_co[3] = {0.0f, 0.0f, 0.0f};
float tot_co = 0.0f;
for (int i=0; i<v->edges.length; i++) {
TMEdge *e = v->edges.items[i];
if (TM_edge_is_boundary(e)) {
copy_v3_v3(avg, v->co);
return;
}
TMVert *v_other = (e->v1 == v) ? e->v2 : e->v1;
float vec[3];
sub_v3_v3v3(vec, v_other->co, v->co);
madd_v3_v3fl(vec, v->no, -dot_v3v3(vec, v->no));
normalize_v3(vec);
/* fac is a measure of how orthogonal or parallel the edge is
* relative to the direction. */
float fac = dot_v3v3(vec, direction);
fac = fac * fac - 0.5f;
fac *= fac;
madd_v3_v3fl(avg_co, v_other->co, fac);
tot_co += fac;
}
/* In case vert has no Edge s. */
if (tot_co > 0.0f) {
mul_v3_v3fl(avg, avg_co, 1.0f / tot_co);
/* Preserve volume. */
float vec[3];
sub_v3_v3(avg, v->co);
mul_v3_v3fl(vec, v->no, dot_v3v3(avg, v->no));
sub_v3_v3(avg, vec);
add_v3_v3(avg, v->co);
}
else {
zero_v3(avg);
}
}
/* Generic functions for laplacian smoothing. These functions do not take boundary vertices into
* account. */
void SCULPT_neighbor_coords_average(SculptSession *ss, float result[3], int index)
void SCULPT_neighbor_coords_average(SculptSession *ss, float result[3], SculptIdx index)
{
float avg[3] = {0.0f, 0.0f, 0.0f};
int total = 0;
@@ -172,7 +225,7 @@ void SCULPT_neighbor_coords_average(SculptSession *ss, float result[3], int inde
}
}
float SCULPT_neighbor_mask_average(SculptSession *ss, int index)
float SCULPT_neighbor_mask_average(SculptSession *ss, SculptIdx index)
{
float avg = 0.0f;
int total = 0;
@@ -190,7 +243,7 @@ float SCULPT_neighbor_mask_average(SculptSession *ss, int index)
return SCULPT_vertex_mask_get(ss, index);
}
void SCULPT_neighbor_color_average(SculptSession *ss, float result[4], int index)
void SCULPT_neighbor_color_average(SculptSession *ss, float result[4], SculptIdx index)
{
float avg[4] = {0.0f, 0.0f, 0.0f, 0.0f};
int total = 0;
@@ -289,6 +342,96 @@ static void SCULPT_enhance_details_brush(Sculpt *sd,
BLI_task_parallel_range(0, totnode, &data, do_enhance_details_brush_task_cb_ex, &settings);
}
#ifdef PROXY_ADVANCED
static void do_smooth_brush_task_cb_ex(void *__restrict userdata,
const int n,
const TaskParallelTLS *__restrict tls)
{
SculptThreadedTaskData *data = userdata;
SculptSession *ss = data->ob->sculpt;
Sculpt *sd = data->sd;
const Brush *brush = data->brush;
const bool smooth_mask = data->smooth_mask;
float bstrength = data->strength;
PBVHVertexIter vd;
CLAMP(bstrength, 0.0f, 1.0f);
SculptBrushTest test;
SculptBrushTestFn sculpt_brush_test_sq_fn = SCULPT_brush_test_init_with_falloff_shape(
ss, &test, data->brush->falloff_shape);
const int thread_id = BLI_task_parallel_thread_id(tls);
PBVHNode **nodes = data->nodes;
ProxyVertArray *p = &nodes[n]->proxyverts;
for (int i = 0; i < p->size; i++) {
float co[3] = {0.0f, 0.0f, 0.0f};
int ni = 0;
# if 1
if (sculpt_brush_test_sq_fn(&test, p->co[i])) {
const float fade = bstrength * SCULPT_brush_strength_factor(
ss,
brush,
p->co[i],
sqrtf(test.dist),
p->no[i],
p->fno[i],
smooth_mask ? 0.0f : (p->mask ? p->mask[i] : 0.0f),
p->index[i],
thread_id);
# else
if (1) {
const float fade = 1.0;
# endif
while (/*ni < MAX_PROXY_NEIGHBORS &&*/ p->neighbors[i][ni].node >= 0) {
ProxyKey *key = p->neighbors[i] + ni;
PBVHNode *n2 = ss->pbvh->nodes + key->node;
// printf("%d %d %d %p\n", key->node, key->pindex, ss->pbvh->totnode, n2);
float *co2 = n2->proxyverts.co[key->pindex];
co[0] += co2[0];
co[1] += co2[1];
co[2] += co2[2];
//add_v3_v3(co, n2->proxyverts.co[key->pindex]);
ni++;
}
// printf("ni %d\n", ni);
if (ni > 2) {
float mul = 1.0 / (float) ni;
co[0] *= mul;
co[1] *= mul;
co[2] *= mul;
// mul_v3_fl(co, 1.0f / (float)ni);
}
else {
co[0] = p->co[i][0];
co[1] = p->co[i][1];
co[2] = p->co[i][2];
//copy_v3_v3(co, p->co[i]);
}
// printf("%f %f %f ", co[0], co[1], co[2]);
p->co[i][0] += (co[0] - p->co[i][0]) * fade;
p->co[i][1] += (co[1] - p->co[i][1]) * fade;
p->co[i][2] += (co[2] - p->co[i][2]) * fade;
//interp_v3_v3v3(p->co[i], p->co[i], co, fade);
}
}
}
#else
static void do_smooth_brush_task_cb_ex(void *__restrict userdata,
const int n,
const TaskParallelTLS *__restrict tls)
@@ -343,6 +486,8 @@ static void do_smooth_brush_task_cb_ex(void *__restrict userdata,
}
BKE_pbvh_vertex_iter_end;
}
#endif
void SCULPT_smooth(Sculpt *sd,
Object *ob,
@@ -370,9 +515,18 @@ void SCULPT_smooth(Sculpt *sd,
return;
}
SCULPT_vertex_random_access_ensure(ss);
if (!ELEM(type, PBVH_TRIMESH, PBVH_BMESH)) {
SCULPT_vertex_random_access_ensure(ss);
}
SCULPT_boundary_info_ensure(ob);
#ifdef PROXY_ADVANCED
int datamask = PV_CO | PV_NEIGHBORS | PV_NO | PV_INDEX | PV_MASK;
BKE_pbvh_ensure_proxyarrays(ss, ss->pbvh, datamask);
BKE_pbvh_load_proxyarrays(ss->pbvh, nodes, totnode, PV_CO | PV_NO | PV_MASK);
#endif
for (iteration = 0; iteration <= count; iteration++) {
const float strength = (iteration != count) ? 1.0f : last;
@@ -388,6 +542,10 @@ void SCULPT_smooth(Sculpt *sd,
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BLI_task_parallel_range(0, totnode, &data, do_smooth_brush_task_cb_ex, &settings);
#ifdef PROXY_ADVANCED
BKE_pbvh_gather_proxyarray(ss->pbvh, nodes, totnode);
#endif
}
}
@@ -411,7 +569,7 @@ void SCULPT_surface_smooth_laplacian_step(SculptSession *ss,
float *disp,
const float co[3],
float (*laplacian_disp)[3],
const int v_index,
const SculptIdx v_index,
const float origco[3],
const float alpha)
{
@@ -430,7 +588,7 @@ void SCULPT_surface_smooth_laplacian_step(SculptSession *ss,
void SCULPT_surface_smooth_displace_step(SculptSession *ss,
float *co,
float (*laplacian_disp)[3],
const int v_index,
const SculptIdx v_index,
const float beta,
const float fade)
{

View File

@@ -56,6 +56,7 @@
#include "RNA_define.h"
#include "bmesh.h"
#include "trimesh.h"
#include <math.h>
#include <stdlib.h>

View File

@@ -69,6 +69,7 @@
#include "bmesh.h"
#include "sculpt_intern.h"
#include "trimesh.h"
/* Implementation of undo system for objects in sculpt mode.
*
@@ -402,6 +403,49 @@ static bool sculpt_undo_restore_face_sets(bContext *C, SculptUndoNode *unode)
return false;
}
static void sculpt_undo_trimesh_restore_generic_task_cb(
void *__restrict userdata, const int n, const TaskParallelTLS *__restrict UNUSED(tls))
{
PBVHNode **nodes = userdata;
BKE_pbvh_node_mark_redraw(nodes[n]);
}
static void sculpt_undo_trimesh_restore_generic(SculptUndoNode *unode,
Object *ob,
SculptSession *ss)
{
// XXX
return;
if (unode->applied) {
BM_log_undo(ss->bm, ss->bm_log);
unode->applied = false;
}
else {
BM_log_redo(ss->bm, ss->bm_log);
unode->applied = true;
}
if (unode->type == SCULPT_UNDO_MASK) {
int totnode;
PBVHNode **nodes;
BKE_pbvh_search_gather(ss->pbvh, NULL, NULL, &nodes, &totnode);
TaskParallelSettings settings;
BKE_pbvh_parallel_range_settings(&settings, true, totnode);
BLI_task_parallel_range(
0, totnode, nodes, sculpt_undo_trimesh_restore_generic_task_cb, &settings);
if (nodes) {
MEM_freeN(nodes);
}
}
else {
SCULPT_pbvh_clear(ob);
}
}
static void sculpt_undo_bmesh_restore_generic_task_cb(
void *__restrict userdata, const int n, const TaskParallelTLS *__restrict UNUSED(tls))
{
@@ -501,6 +545,63 @@ static void sculpt_undo_bmesh_restore_end(bContext *C,
}
}
/* Create empty sculpt BMesh and enable logging. */
static void sculpt_undo_trimesh_enable(Object *ob, SculptUndoNode *unode)
{
#if 0
SculptSession *ss = ob->sculpt;
Mesh *me = ob->data;
SCULPT_pbvh_clear(ob);
/* Create empty BMesh and enable logging. */
ss->bm = BM_mesh_create(&bm_mesh_allocsize_default,
&((struct BMeshCreateParams){
.use_toolflags = false,
}));
BM_data_layer_add(ss->bm, &ss->bm->vdata, CD_PAINT_MASK);
SCULPT_dyntopo_node_layers_add(ss);
me->flag |= ME_SCULPT_DYNAMIC_TOPOLOGY;
/* Restore the BMLog using saved entries. */
ss->bm_log = BM_log_from_existing_entries_create(ss->bm, unode->bm_entry);
#endif
}
static void sculpt_undo_trimesh_restore_begin(bContext *C,
SculptUndoNode *unode,
Object *ob,
SculptSession *ss)
{
if (unode->applied) {
SCULPT_dynamic_topology_disable(C, unode);
unode->applied = false;
}
else {
sculpt_undo_trimesh_enable(ob, unode);
unode->applied = true;
}
}
static void sculpt_undo_trimesh_restore_end(bContext *C,
SculptUndoNode *unode,
Object *ob,
SculptSession *ss)
{
if (unode->applied) {
sculpt_undo_trimesh_enable(ob, unode);
/* Restore the mesh from the last log entry. */
// XXX BM_log_undo(ss->bm, ss->bm_log);
unode->applied = false;
}
else {
/* Disable dynamic topology sculpting. */
SCULPT_dynamic_topology_disable(C, NULL);
unode->applied = true;
}
}
static void sculpt_undo_geometry_store_data(SculptUndoNodeGeometry *geometry, Object *object)
{
Mesh *mesh = object->data;
@@ -609,6 +710,51 @@ static int sculpt_undo_bmesh_restore(bContext *C,
return false;
}
/* Handle all dynamic-topology updates
*
* Returns true if this was a dynamic-topology undo step, otherwise
* returns false to indicate the non-dyntopo code should run. */
static int sculpt_undo_trimesh_restore(bContext *C,
SculptUndoNode *unode,
Object *ob,
SculptSession *ss)
{
#ifdef WITH_TRIMESH
switch (unode->type) {
case SCULPT_UNDO_DYNTOPO_BEGIN:
sculpt_undo_trimesh_restore_begin(C, unode, ob, ss);
return true;
case SCULPT_UNDO_DYNTOPO_END:
sculpt_undo_trimesh_restore_end(C, unode, ob, ss);
return true;
default:
if (ss->tm_log) {
sculpt_undo_trimesh_restore_generic(unode, ob, ss);
return true;
}
break;
}
#else
switch (unode->type) {
case SCULPT_UNDO_DYNTOPO_BEGIN:
sculpt_undo_bmesh_restore_begin(C, unode, ob, ss);
return true;
case SCULPT_UNDO_DYNTOPO_END:
sculpt_undo_bmesh_restore_end(C, unode, ob, ss);
return true;
default:
if (ss->bm_log) {
sculpt_undo_bmesh_restore_generic(unode, ob, ss);
return true;
}
break;
}
#endif
return false;
}
static void sculpt_undo_restore_list(bContext *C, Depsgraph *depsgraph, ListBase *lb)
{
Scene *scene = CTX_data_scene(C);
@@ -674,7 +820,7 @@ static void sculpt_undo_restore_list(bContext *C, Depsgraph *depsgraph, ListBase
BKE_sculpt_update_object_for_edit(depsgraph, ob, false, need_mask, false);
}
if (sculpt_undo_bmesh_restore(C, lb->first, ob, ss)) {
if (sculpt_undo_trimesh_restore(C, lb->first, ob, ss)) {
return;
}
}
@@ -1209,7 +1355,7 @@ static SculptUndoNode *sculpt_undo_bmesh_push(Object *ob, PBVHNode *node, Sculpt
* original positions are logged. */
BKE_pbvh_vertex_iter_begin(ss->pbvh, node, vd, PBVH_ITER_ALL)
{
BM_log_vert_before_modified(ss->bm_log, vd.bm_vert, vd.cd_vert_mask_offset);
BKE_pbvh_bmesh_update_origvert(ss->pbvh, vd.bm_vert);
}
BKE_pbvh_vertex_iter_end;
break;
@@ -1219,7 +1365,7 @@ static SculptUndoNode *sculpt_undo_bmesh_push(Object *ob, PBVHNode *node, Sculpt
GSet *faces = BKE_pbvh_bmesh_node_faces(node);
BKE_pbvh_vertex_iter_begin(ss->pbvh, node, vd, PBVH_ITER_ALL)
{
BM_log_vert_before_modified(ss->bm_log, vd.bm_vert, vd.cd_vert_mask_offset);
BKE_pbvh_bmesh_update_origvert(ss->pbvh, vd.bm_vert);
}
BKE_pbvh_vertex_iter_end;
@@ -1243,6 +1389,97 @@ static SculptUndoNode *sculpt_undo_bmesh_push(Object *ob, PBVHNode *node, Sculpt
return unode;
}
static SculptUndoNode *sculpt_undo_trimesh_push(Object *ob, PBVHNode *node, SculptUndoType type)
{
UndoSculpt *usculpt = sculpt_undo_get_nodes();
SculptSession *ss = ob->sculpt;
PBVHVertexIter vd;
SculptUndoNode *unode = usculpt->nodes.first;
if (unode == NULL) {
unode = MEM_callocN(sizeof(*unode), __func__);
BLI_strncpy(unode->idname, ob->id.name, sizeof(unode->idname));
unode->type = type;
unode->applied = true;
BLI_addtail(&usculpt->nodes, unode);
}
return unode;
#if 1
if (unode == NULL) {
unode = MEM_callocN(sizeof(*unode), __func__);
BLI_strncpy(unode->idname, ob->id.name, sizeof(unode->idname));
unode->type = type;
unode->applied = true;
if (type == SCULPT_UNDO_DYNTOPO_END) {
unode->bm_entry = BM_log_entry_add(ss->bm_log);
BM_log_before_all_removed(ss->bm, ss->bm_log);
}
else if (type == SCULPT_UNDO_DYNTOPO_BEGIN) {
/* Store a copy of the mesh's current vertices, loops, and
* polys. A full copy like this is needed because entering
* dynamic-topology immediately does topological edits
* (converting polys to triangles) that the BMLog can't
* fully restore from. */
SculptUndoNodeGeometry *geometry = &unode->geometry_bmesh_enter;
sculpt_undo_geometry_store_data(geometry, ob);
unode->bm_entry = BM_log_entry_add(ss->bm_log);
BM_log_all_added(ss->bm, ss->bm_log);
}
else {
unode->bm_entry = BM_log_entry_add(ss->bm_log);
}
BLI_addtail(&usculpt->nodes, unode);
}
if (node) {
switch (type) {
case SCULPT_UNDO_COORDS:
case SCULPT_UNDO_MASK:
/* Before any vertex values get modified, ensure their
* original positions are logged. */
BKE_pbvh_vertex_iter_begin(ss->pbvh, node, vd, PBVH_ITER_ALL)
{
BKE_pbvh_bmesh_update_origvert(ss->pbvh, vd.bm_vert);
}
BKE_pbvh_vertex_iter_end;
break;
case SCULPT_UNDO_HIDDEN: {
GSetIterator gs_iter;
GSet *faces = BKE_pbvh_bmesh_node_faces(node);
BKE_pbvh_vertex_iter_begin(ss->pbvh, node, vd, PBVH_ITER_ALL)
{
BKE_pbvh_bmesh_update_origvert(ss->pbvh, vd.bm_vert);
}
BKE_pbvh_vertex_iter_end;
GSET_ITER (gs_iter, faces) {
BMFace *f = BLI_gsetIterator_getKey(&gs_iter);
BM_log_face_modified(ss->bm_log, f);
}
break;
}
case SCULPT_UNDO_DYNTOPO_BEGIN:
case SCULPT_UNDO_DYNTOPO_END:
case SCULPT_UNDO_DYNTOPO_SYMMETRIZE:
case SCULPT_UNDO_GEOMETRY:
case SCULPT_UNDO_FACE_SETS:
case SCULPT_UNDO_COLOR:
break;
}
}
return unode;
#endif
}
SculptUndoNode *SCULPT_undo_push_node(Object *ob, PBVHNode *node, SculptUndoType type)
{
SculptSession *ss = ob->sculpt;
@@ -1253,6 +1490,15 @@ SculptUndoNode *SCULPT_undo_push_node(Object *ob, PBVHNode *node, SculptUndoType
ss->needs_flush_to_id = 1;
#ifdef WITH_TRIMESH
if (ss->tm || ELEM(type, SCULPT_UNDO_DYNTOPO_BEGIN, SCULPT_UNDO_DYNTOPO_END)) {
/* Dynamic topology stores only one undo node per stroke,
* regardless of the number of PBVH nodes modified. */
unode = sculpt_undo_trimesh_push(ob, node, type);
BLI_thread_unlock(LOCK_CUSTOM1);
return unode;
}
#else
if (ss->bm || ELEM(type, SCULPT_UNDO_DYNTOPO_BEGIN, SCULPT_UNDO_DYNTOPO_END)) {
/* Dynamic topology stores only one undo node per stroke,
* regardless of the number of PBVH nodes modified. */
@@ -1260,6 +1506,7 @@ SculptUndoNode *SCULPT_undo_push_node(Object *ob, PBVHNode *node, SculptUndoType
BLI_thread_unlock(LOCK_CUSTOM1);
return unode;
}
#endif
if (type == SCULPT_UNDO_GEOMETRY) {
unode = sculpt_undo_geometry_push(ob, type);
BLI_thread_unlock(LOCK_CUSTOM1);

View File

@@ -25,6 +25,7 @@ set(INC
../../bmesh
../../depsgraph
../../gpu
../../trimesh
../../imbuf
../../makesdna
../../makesrna

View File

@@ -66,6 +66,9 @@
#include "GPU_capabilities.h"
#include "trimesh.h"
#include "bmesh.h"
#define MAX_INFO_NUM_LEN 16
typedef struct SceneStats {
@@ -352,13 +355,18 @@ static void stats_object_pose(Object *ob, SceneStats *stats)
static void stats_object_sculpt_dynamic_topology(Object *ob, SceneStats *stats)
{
#ifdef WITH_TRIMESH
stats->totvert = ob->sculpt->tm->totvert;
stats->tottri = ob->sculpt->tm->tottri;
#else
stats->totvert = ob->sculpt->bm->totvert;
stats->tottri = ob->sculpt->bm->totface;
#endif
}
static bool stats_is_object_dynamic_topology_sculpt(Object *ob, const eObjectMode object_mode)
{
return (ob && (object_mode & OB_MODE_SCULPT) && ob->sculpt && ob->sculpt->bm);
return (ob && (object_mode & OB_MODE_SCULPT) && ob->sculpt && (ob->sculpt->tm || ob->sculpt->bm));
}
/* Statistics displayed in info header. Called regularly on scene changes. */

View File

@@ -33,6 +33,7 @@ set(INC
../blenkernel
../blenlib
../bmesh
../trimesh
../draw
../imbuf
../makesdna

View File

@@ -61,9 +61,11 @@ GPU_PBVH_Buffers *GPU_pbvh_mesh_buffers_build(const struct MPoly *mpoly,
GPU_PBVH_Buffers *GPU_pbvh_grid_buffers_build(int totgrid, unsigned int **grid_hidden);
GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading);
GPU_PBVH_Buffers *GPU_pbvh_trimesh_buffers_build(bool smooth_shading);
/* Free part of data for update. Not thread safe, must run in OpenGL main thread. */
void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers);
void GPU_pbvh_trimesh_buffers_update_free(GPU_PBVH_Buffers *buffers);
void GPU_pbvh_grid_buffers_update_free(GPU_PBVH_Buffers *buffers,
const struct DMFlagMat *grid_flag_mats,
const int *grid_indices);
@@ -90,7 +92,17 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
struct GSet *bm_faces,
struct GSet *bm_unique_verts,
struct GSet *bm_other_verts,
const int update_flags);
const int update_flags,
const int cd_vert_node_offset);
struct TM_TriMesh;
void GPU_pbvh_trimesh_buffers_update(GPU_PBVH_Buffers *buffers,
struct TM_TriMesh *bm,
struct GSet *bm_faces,
struct TableGSet *bm_unique_verts,
struct TableGSet *bm_other_verts,
const int update_flags,
const int cd_vert_node_offset);
void GPU_pbvh_grid_buffers_update(GPU_PBVH_Buffers *buffers,
struct SubdivCCG *subdiv_ccg,

View File

@@ -118,7 +118,6 @@ GPU_INLINE void *GPU_vertbuf_raw_step(GPUVertBufRaw *a)
{
unsigned char *data = a->data;
a->data += a->stride;
BLI_assert(data < a->_data_end);
return (void *)data;
}

View File

@@ -42,6 +42,7 @@
#include "BKE_DerivedMesh.h"
#include "BKE_ccg.h"
#include "BKE_global.h"
#include "BKE_mesh.h"
#include "BKE_paint.h"
#include "BKE_pbvh.h"
@@ -53,6 +54,7 @@
#include "gpu_private.h"
#include "bmesh.h"
#include "trimesh.h"
/* XXX: the rest of the code in this file is used for optimized PBVH
* drawing and doesn't interact at all with the buffer code above */
@@ -84,7 +86,7 @@ struct GPU_PBVH_Buffers {
const int *grid_indices;
int totgrid;
bool use_bmesh;
bool use_bmesh, use_trimesh;
bool clear_bmesh_on_flush;
uint tot_tri, tot_quad;
@@ -121,7 +123,7 @@ void gpu_pbvh_init()
g_vbo_id.msk = GPU_vertformat_attr_add(
&g_vbo_id.format, "msk", GPU_COMP_U8, 1, GPU_FETCH_INT_TO_FLOAT_UNIT);
g_vbo_id.col = GPU_vertformat_attr_add(
&g_vbo_id.format, "ac", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
&g_vbo_id.format, "c", GPU_COMP_U16, 4, GPU_FETCH_INT_TO_FLOAT_UNIT);
g_vbo_id.fset = GPU_vertformat_attr_add(
&g_vbo_id.format, "fset", GPU_COMP_U8, 3, GPU_FETCH_INT_TO_FLOAT_UNIT);
}
@@ -793,9 +795,11 @@ static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
const float fno[3],
const float *fmask,
const int cd_vert_mask_offset,
const int cd_vert_node_offset,
const bool show_mask,
const bool show_vcol,
bool *empty_mask)
bool *empty_mask,
int cd_vcol_offset)
{
/* Vertex should always be visible if it's used by a visible face. */
BLI_assert(!BM_elem_flag_test(v, BM_ELEM_HIDDEN));
@@ -809,14 +813,54 @@ static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
if (show_mask) {
float effective_mask = fmask ? *fmask : BM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
if (G.debug_value == 889) {
int ni = BM_ELEM_CD_GET_INT(v, cd_vert_node_offset);
effective_mask = ni == -1 ? 0.0f : (float)((ni*50) % 32) / 32.0f;
}
uchar cmask = (uchar)(effective_mask * 255);
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.msk, v_index, &cmask);
*empty_mask = *empty_mask && (cmask == 0);
}
if (show_vcol) {
if (show_vcol && cd_vcol_offset >= 0) {
ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
int col[4] = {0, 0, 0, 0};
int tot = 0;
BMIter iter;
BMLoop *l;
BM_ITER_ELEM (l, &iter, v, BM_LOOPS_OF_VERT) {
MLoopCol *ml = BM_ELEM_CD_GET_VOID_P(l, cd_vcol_offset);
col[0] += ml->r;
col[1] += ml->g;
col[2] += ml->b;
col[3] += ml->a;
tot++;
}
if (tot) {
col[0] /= tot;
col[1] /= tot;
col[2] /= tot;
col[3] /= tot;
vcol[0] = (ushort)(col[0] * 257);
vcol[1] = (ushort)(col[1] * 257);
vcol[2] = (ushort)(col[2] * 257);
vcol[3] = (ushort)(col[3] * 257);
// printf("%d %d %d %d %d\n", vcol[0], vcol[1], vcol[2], vcol[3], tot);
}
// const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, v_index, vcol);
}
else if (show_vcol) {
const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, v_index, &vcol);
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, v_index, vcol);
}
/* Add default face sets color to avoid artifacts. */
@@ -824,6 +868,111 @@ static void gpu_bmesh_vert_to_buffer_copy(BMVert *v,
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.fset, v_index, &face_set);
}
/* Output a BMVert into a VertexBufferFormat array at v_index. */
static void gpu_trimesh_vert_to_buffer_copy(TMVert *v,
GPUVertBuf *vert_buf,
int v_index,
const float fno[3],
const float *fmask,
const int cd_vert_mask_offset,
const int cd_vert_node_offset,
const bool show_mask,
const bool show_vcol,
bool *empty_mask,
int cd_vcol_offset)
{
/* Vertex should always be visible if it's used by a visible face. */
BLI_assert(!TM_elem_flag_test(v, TM_ELEM_HIDDEN));
/* Set coord, normal, and mask */
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.pos, v_index, v->co);
short no_short[3];
normal_float_to_short_v3(no_short, fno ? fno : v->no);
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.nor, v_index, no_short);
if (show_mask) {
float effective_mask = fmask ? *fmask : TM_ELEM_CD_GET_FLOAT(v, cd_vert_mask_offset);
// int ni = TM_ELEM_CD_GET_INT(v, cd_vert_node_offset);
// float effective_mask = (float)(ni % 64) / 64.0f;
uchar cmask = (uchar)(effective_mask * 255);
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.msk, v_index, &cmask);
*empty_mask = *empty_mask && (cmask == 0);
}
if (show_vcol && cd_vcol_offset >= 0) {
ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
int col[4] = {0, 0, 0, 0};
int tot = 0;
for (int i = 0; i < v->edges.length; i++) {
TMEdge *e = v->edges.items[i];
for (int j = 0; j < e->tris.length; j++) {
TMFace *t = e->tris.items[j];
TMLoopData *l = TM_GET_TRI_LOOP_EDGE(t, e);
MLoopCol *ml = TM_ELEM_CD_GET_VOID_P(l, cd_vcol_offset);
col[0] += ml->r;
col[1] += ml->g;
col[2] += ml->b;
col[3] += ml->a;
tot++;
}
}
if (tot) {
col[0] /= tot;
col[1] /= tot;
col[2] /= tot;
col[3] /= tot;
vcol[0] = (ushort)(col[0] * 257);
vcol[1] = (ushort)(col[1] * 257);
vcol[2] = (ushort)(col[2] * 257);
vcol[3] = (ushort)(col[3] * 257);
// printf("%d %d %d %d %d\n", vcol[0], vcol[1], vcol[2], vcol[3], tot);
}
// const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, v_index, vcol);
}
else if (show_vcol) {
const ushort vcol[4] = {USHRT_MAX, USHRT_MAX, USHRT_MAX, USHRT_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.col, v_index, vcol);
}
/* Add default face sets color to avoid artifacts. */
const uchar face_set[3] = {UCHAR_MAX, UCHAR_MAX, UCHAR_MAX};
GPU_vertbuf_attr_set(vert_buf, g_vbo_id.fset, v_index, face_set);
}
/* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
static int gpu_trimesh_vert_visible_count(TableGSet *bm_unique_verts, TableGSet *bm_other_verts)
{
int totvert = 0;
TMVert *v;
TMS_ITER (v, bm_unique_verts) {
if (!TM_elem_flag_test(v, TM_ELEM_HIDDEN)) {
totvert++;
}
}
TMS_ITER_END
TMS_ITER (v, bm_other_verts) {
if (!TM_elem_flag_test(v, TM_ELEM_HIDDEN)) {
totvert++;
}
}
TMS_ITER_END
return totvert;
}
/* Return the total number of vertices that don't have BM_ELEM_HIDDEN set */
static int gpu_bmesh_vert_visible_count(GSet *bm_unique_verts, GSet *bm_other_verts)
{
@@ -863,6 +1012,23 @@ static int gpu_bmesh_face_visible_count(GSet *bm_faces)
return totface;
}
/* Return the total number of visible faces */
static int gpu_trimesh_face_visible_count(GSet *bm_faces)
{
GSetIterator gh_iter;
int totface = 0;
GSET_ITER (gh_iter, bm_faces) {
TMFace *f = BLI_gsetIterator_getKey(&gh_iter);
if (!TM_elem_flag_test(f, TM_ELEM_HIDDEN)) {
totface++;
}
}
return totface;
}
void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers)
{
if (buffers->smooth) {
@@ -878,6 +1044,21 @@ void GPU_pbvh_bmesh_buffers_update_free(GPU_PBVH_Buffers *buffers)
}
}
void GPU_pbvh_trimesh_buffers_update_free(GPU_PBVH_Buffers *buffers)
{
if (buffers->smooth) {
/* Smooth needs to recreate index buffer, so we have to invalidate the batch. */
GPU_BATCH_DISCARD_SAFE(buffers->triangles);
GPU_BATCH_DISCARD_SAFE(buffers->lines);
GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
GPU_INDEXBUF_DISCARD_SAFE(buffers->index_buf);
}
else {
GPU_BATCH_DISCARD_SAFE(buffers->lines);
GPU_INDEXBUF_DISCARD_SAFE(buffers->index_lines_buf);
}
}
/* Creates a vertex buffer (coordinate, normal, color) and, if smooth
* shading, an element index buffer.
* Threaded - do not call any functions that use OpenGL calls! */
@@ -886,7 +1067,8 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
GSet *bm_faces,
GSet *bm_unique_verts,
GSet *bm_other_verts,
const int update_flags)
const int update_flags,
const int cd_vert_node_offset)
{
const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
const bool show_vcol = (update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
@@ -918,6 +1100,7 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
/* TODO, make mask layer optional for bmesh buffer */
const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
const int cd_vcol_offset = CustomData_get_offset(&bm->vdata, CD_MLOOPCOL);
/* Fill vertex buffer */
if (!gpu_pbvh_vert_buf_data_set(buffers, totvert)) {
@@ -956,9 +1139,11 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
NULL,
NULL,
cd_vert_mask_offset,
cd_vert_node_offset,
show_mask,
show_vcol,
&empty_mask);
&empty_mask,
cd_vcol_offset);
idx[i] = v_index;
v_index++;
@@ -1023,9 +1208,11 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
f->no,
&fmask,
cd_vert_mask_offset,
cd_vert_node_offset,
show_mask,
show_vcol,
&empty_mask);
&empty_mask,
cd_vcol_offset);
}
}
}
@@ -1042,6 +1229,178 @@ void GPU_pbvh_bmesh_buffers_update(GPU_PBVH_Buffers *buffers,
gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
}
/* Creates a vertex buffer (coordinate, normal, color) and, if smooth
* shading, an element index buffer.
* Threaded - do not call any functions that use OpenGL calls! */
void GPU_pbvh_trimesh_buffers_update(GPU_PBVH_Buffers *buffers,
TM_TriMesh *bm,
GSet *tm_faces,
TableGSet *bm_unique_verts,
TableGSet *bm_other_verts,
const int update_flags,
const int cd_vert_node_offset)
{
const bool show_mask = (update_flags & GPU_PBVH_BUFFERS_SHOW_MASK) != 0;
const bool show_vcol = true; //(update_flags & GPU_PBVH_BUFFERS_SHOW_VCOL) != 0;
int tottri, totvert;
bool empty_mask = true;
TMFace *f = NULL;
/* Count visible triangles */
tottri = gpu_trimesh_face_visible_count(tm_faces);
if (buffers->smooth) {
/* Count visible vertices */
totvert = gpu_trimesh_vert_visible_count(bm_unique_verts, bm_other_verts);
}
else {
totvert = tottri * 3;
}
if (!tottri) {
if (BLI_gset_len(tm_faces) != 0) {
/* Node is just hidden. */
}
else {
buffers->clear_bmesh_on_flush = true;
}
buffers->tot_tri = 0;
return;
}
/* TODO, make mask layer optional for bmesh buffer */
const int cd_vert_mask_offset = CustomData_get_offset(&bm->vdata, CD_PAINT_MASK);
/* Fill vertex buffer */
if (!gpu_pbvh_vert_buf_data_set(buffers, totvert)) {
/* Memory map failed */
return;
}
int v_index = 0;
int cd_vcol_off = CustomData_get_offset(&bm->ldata, CD_MLOOPCOL);
if (buffers->smooth) {
/* Fill the vertex and triangle buffer in one pass over faces. */
GPUIndexBufBuilder elb, elb_lines;
GPU_indexbuf_init(&elb, GPU_PRIM_TRIS, tottri, totvert);
GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, totvert);
GHash *bm_vert_to_index = BLI_ghash_int_new_ex("bm_vert_to_index", totvert);
GSetIterator gs_iter;
GSET_ITER (gs_iter, tm_faces) {
f = BLI_gsetIterator_getKey(&gs_iter);
if (!TM_elem_flag_test(f, TM_ELEM_HIDDEN)) {
uint idx[3];
for (int i = 0; i < 3; i++) {
TMVert *v = TM_GET_TRI_VERT(f, i);
void **idx_p;
if (!BLI_ghash_ensure_p(bm_vert_to_index, v, &idx_p)) {
/* Add vertex to the vertex buffer each time a new one is encountered */
*idx_p = POINTER_FROM_UINT(v_index);
gpu_trimesh_vert_to_buffer_copy(v,
buffers->vert_buf,
v_index,
NULL,
NULL,
cd_vert_mask_offset,
cd_vert_node_offset,
show_mask,
show_vcol,
&empty_mask,
cd_vcol_off);
idx[i] = v_index;
v_index++;
}
else {
/* Vertex already in the vertex buffer, just get the index. */
idx[i] = POINTER_AS_UINT(*idx_p);
}
}
GPU_indexbuf_add_tri_verts(&elb, idx[0], idx[1], idx[2]);
GPU_indexbuf_add_line_verts(&elb_lines, idx[0], idx[1]);
GPU_indexbuf_add_line_verts(&elb_lines, idx[1], idx[2]);
GPU_indexbuf_add_line_verts(&elb_lines, idx[2], idx[0]);
}
}
BLI_ghash_free(bm_vert_to_index, NULL, NULL);
buffers->tot_tri = tottri;
if (buffers->index_buf == NULL) {
buffers->index_buf = GPU_indexbuf_build(&elb);
}
else {
GPU_indexbuf_build_in_place(&elb, buffers->index_buf);
}
buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
}
else {
GSetIterator gs_iter;
GPUIndexBufBuilder elb_lines;
GPU_indexbuf_init(&elb_lines, GPU_PRIM_LINES, tottri * 3, tottri * 3);
int cd_vcol_offset = CustomData_get_offset(&bm->ldata, CD_MLOOPCOL);
GSET_ITER (gs_iter, tm_faces) {
f = BLI_gsetIterator_getKey(&gs_iter);
BLI_assert(f->len == 3);
if (!TM_elem_flag_test(f, TM_ELEM_HIDDEN)) {
float fmask = 0.0f;
int i;
/* Average mask value */
for (i = 0; i < 3; i++) {
TMVert *v2 = TM_GET_TRI_VERT(f, i);
fmask += TM_ELEM_CD_GET_FLOAT(v2, cd_vert_mask_offset);
}
fmask /= 3.0f;
GPU_indexbuf_add_line_verts(&elb_lines, v_index + 0, v_index + 1);
GPU_indexbuf_add_line_verts(&elb_lines, v_index + 1, v_index + 2);
GPU_indexbuf_add_line_verts(&elb_lines, v_index + 2, v_index + 0);
for (i = 0; i < 3; i++) {
TMVert *v2 = TM_GET_TRI_VERT(f, i);
gpu_trimesh_vert_to_buffer_copy(v2,
buffers->vert_buf,
v_index++,
f->no,
&fmask,
cd_vert_mask_offset,
cd_vert_node_offset,
show_mask,
show_vcol,
&empty_mask,
cd_vcol_offset);
}
}
}
buffers->index_lines_buf = GPU_indexbuf_build(&elb_lines);
buffers->tot_tri = tottri;
}
/* Get material index from the last face we iterated on. */
buffers->material_index = (f) ? f->mat_nr : 0;
buffers->show_overlay = !empty_mask;
gpu_pbvh_batch_init(buffers, GPU_PRIM_TRIS);
}
/** \} */
/* -------------------------------------------------------------------- */
@@ -1061,6 +1420,19 @@ GPU_PBVH_Buffers *GPU_pbvh_bmesh_buffers_build(bool smooth_shading)
return buffers;
}
/* Threaded - do not call any functions that use OpenGL calls! */
GPU_PBVH_Buffers *GPU_pbvh_trimesh_buffers_build(bool smooth_shading)
{
GPU_PBVH_Buffers *buffers;
buffers = MEM_callocN(sizeof(GPU_PBVH_Buffers), "GPU_Buffers");
buffers->use_trimesh = true;
buffers->smooth = smooth_shading;
buffers->show_overlay = true;
return buffers;
}
GPUBatch *GPU_pbvh_buffers_batch_get(GPU_PBVH_Buffers *buffers, bool fast, bool wires)
{
if (wires) {

View File

@@ -31,6 +31,8 @@
extern "C" {
#endif
struct BLI_ThreadSafePool;
/** Descriptor and storage for a custom data layer. */
typedef struct CustomDataLayer {
/** Type of data in layer. */
@@ -82,7 +84,8 @@ typedef struct CustomData {
/** In editmode, total size of all data layers. */
int totsize;
/** (BMesh Only): Memory pool for allocation of blocks. */
struct BLI_mempool *pool;
//struct BLI_mempool *pool;
struct BLI_ThreadSafePool *tpool;
/** External file storing customdata layers. */
CustomDataExternal *external;
} CustomData;

View File

@@ -141,6 +141,7 @@
.unit_line_thickness = 1.0f, \
\
.ffcodecdata = _DNA_DEFAULT_FFMpegCodecData, \
.hair_cyl_res = 6, \
}
#define _DNA_DEFAULT_AudioData \

View File

@@ -771,7 +771,7 @@ typedef struct RenderData {
short views_format;
/* Hair Display */
short hair_type, hair_subdiv;
short hair_type, hair_subdiv, hair_cyl_res, _pad10[3];
/* Motion blur shutter */
struct CurveMapping mblur_shutter_curve;
@@ -786,6 +786,7 @@ typedef enum eQualityOption {
typedef enum eHairType {
SCE_HAIR_SHAPE_STRAND = 0,
SCE_HAIR_SHAPE_STRIP = 1,
SCE_HAIR_SHAPE_CYLINDER = 2,
} eHairType;
/* *************************************************************** */

View File

@@ -203,6 +203,8 @@ static int rna_Context_mode_get(PointerRNA *ptr)
return CTX_data_mode_enum(C);
}
#include "BPY_extern.h"
static struct Depsgraph *rna_Context_evaluated_depsgraph_get(bContext *C)
{
struct Depsgraph *depsgraph;

View File

@@ -2036,7 +2036,7 @@ bool rna_GPencil_object_poll(PointerRNA *UNUSED(ptr), PointerRNA value)
int rna_Object_use_dynamic_topology_sculpting_get(PointerRNA *ptr)
{
SculptSession *ss = ((Object *)ptr->owner_id)->sculpt;
return (ss && ss->bm);
return (ss && (ss->tm || ss->bm));
}
#else

View File

@@ -5781,6 +5781,7 @@ static void rna_def_scene_render_data(BlenderRNA *brna)
static const EnumPropertyItem hair_shape_type_items[] = {
{SCE_HAIR_SHAPE_STRAND, "STRAND", 0, "Strand", ""},
{SCE_HAIR_SHAPE_STRIP, "STRIP", 0, "Strip", ""},
{SCE_HAIR_SHAPE_CYLINDER, "CYLINDER", 0, "Cylinder", ""},
{0, NULL, 0, NULL, NULL},
};
@@ -6002,6 +6003,11 @@ static void rna_def_scene_render_data(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "Additional Subdiv", "Additional subdivision along the hair");
RNA_def_property_update(prop, NC_SCENE | ND_RENDER_OPTIONS, "rna_Scene_glsl_update");
prop = RNA_def_property(srna, "hair_cyl_res", PROP_INT, PROP_NONE);
RNA_def_property_range(prop, 0, 64);
RNA_def_property_ui_text(prop, "Resolution", "Hair Cylinder Resolution");
RNA_def_property_update(prop, NC_SCENE | ND_RENDER_OPTIONS, "rna_Scene_glsl_update");
/* Performance */
prop = RNA_def_property(srna, "use_high_quality_normals", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, NULL, "perf_flag", SCE_PERF_HQ_NORMALS);

View File

@@ -85,22 +85,17 @@ static PyObject *bpygpu_offscreen_new(PyTypeObject *UNUSED(self), PyObject *args
{
BPYGPU_IS_INIT_OR_ERROR_OBJ;
GPUOffScreen *ofs = NULL;
int width, height;
GPUOffScreen *ofs;
int width, height, high_bitdepth=0, samples = 0;
char err_out[256];
static const char *_keywords[] = {"width", "height", NULL};
static _PyArg_Parser _parser = {"ii|i:GPUOffScreen.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &width, &height)) {
static const char *_keywords[] = {"width", "height", "samples", "high_bitdepth", NULL};
static _PyArg_Parser _parser = {"ii|ii:GPUOffScreen.__new__", _keywords, 0};
if (!_PyArg_ParseTupleAndKeywordsFast(args, kwds, &_parser, &width, &height, &samples, &high_bitdepth)) {
return NULL;
}
if (GPU_context_active_get()) {
ofs = GPU_offscreen_create(width, height, true, false, err_out);
}
else {
strncpy(err_out, "No active GPU context found", 256);
}
ofs = GPU_offscreen_create(width, height, true, high_bitdepth, err_out);
if (ofs == NULL) {
PyErr_Format(PyExc_RuntimeError,