Geometry Nodes: Node group operators initial phase #108947

Merged
Hans Goudey merged 72 commits from node-group-operators into main 2023-06-29 13:58:01 +02:00
11 changed files with 98 additions and 78 deletions
Showing only changes of commit 41c5490137 - Show all commits

View File

@ -245,6 +245,7 @@ if(WITH_BOOST)
if(WITH_USD AND USD_PYTHON_SUPPORT)
list(APPEND _boost_FIND_COMPONENTS python${PYTHON_VERSION_NO_DOTS})
endif()
set(Boost_NO_WARN_NEW_VERSIONS ON)
find_package(Boost COMPONENTS ${_boost_FIND_COMPONENTS})
# Boost Python is separate to avoid linking Python into tests that don't need it.

View File

@ -394,6 +394,7 @@ if(WITH_BOOST)
list(APPEND __boost_packages python${PYTHON_VERSION_NO_DOTS})
endif()
list(APPEND __boost_packages system)
set(Boost_NO_WARN_NEW_VERSIONS ON)
find_package(Boost 1.48 COMPONENTS ${__boost_packages})
if(NOT Boost_FOUND)
# try to find non-multithreaded if -mt not found, this flag

View File

@ -57,7 +57,9 @@ void workbench_engine_init(void *ved)
wpd->dummy_image_tx = txl->dummy_image_tx;
if (OBJECT_ID_PASS_ENABLED(wpd)) {
wpd->object_id_tx = DRW_texture_pool_query_fullscreen(GPU_R16UI, &draw_engine_workbench);
const eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
wpd->object_id_tx = DRW_texture_pool_query_fullscreen_ex(
GPU_R16UI, usage, &draw_engine_workbench);
}
else {
/* Don't free because it's a pool texture. */

View File

@ -29,9 +29,10 @@ void workbench_opaque_engine_init(WORKBENCH_Data *data)
/* Reused the same textures format for transparent pipeline to share the textures. */
const eGPUTextureFormat col_tex_format = GPU_RGBA16F;
const eGPUTextureFormat nor_tex_format = NORMAL_ENCODING_ENABLED() ? GPU_RG16F : GPU_RGBA16F;
const eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
wpd->material_buffer_tx = DRW_texture_pool_query_fullscreen(col_tex_format, owner);
wpd->normal_buffer_tx = DRW_texture_pool_query_fullscreen(nor_tex_format, owner);
wpd->material_buffer_tx = DRW_texture_pool_query_fullscreen_ex(col_tex_format, usage, owner);
wpd->normal_buffer_tx = DRW_texture_pool_query_fullscreen_ex(nor_tex_format, usage, owner);
GPU_framebuffer_ensure_config(&fbl->opaque_fb,
{

View File

@ -36,8 +36,9 @@ void workbench_transparent_engine_init(WORKBENCH_Data *data)
const eGPUTextureFormat accum_tex_format = GPU_RGBA16F;
const eGPUTextureFormat reveal_tex_format = NORMAL_ENCODING_ENABLED() ? GPU_RG16F : GPU_RGBA32F;
wpd->accum_buffer_tx = DRW_texture_pool_query_fullscreen(accum_tex_format, owner);
wpd->reveal_buffer_tx = DRW_texture_pool_query_fullscreen(reveal_tex_format, owner);
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_SHADER_READ;
wpd->accum_buffer_tx = DRW_texture_pool_query_fullscreen_ex(accum_tex_format, usage, owner);
wpd->reveal_buffer_tx = DRW_texture_pool_query_fullscreen_ex(reveal_tex_format, usage, owner);
GPU_framebuffer_ensure_config(&fbl->transp_accum_fb,
{

View File

@ -23,15 +23,13 @@ namespace blender::ed::curves {
static bool has_surface_deformation_node(const bNodeTree &ntree)
{
LISTBASE_FOREACH (const bNode *, node, &ntree.nodes) {
if (node->type == GEO_NODE_DEFORM_CURVES_ON_SURFACE) {
return true;
}
if (node->type == NODE_GROUP) {
if (node->id != nullptr) {
if (has_surface_deformation_node(*reinterpret_cast<const bNodeTree *>(node->id))) {
return true;
}
if (!ntree.nodes_by_type("GeometryNodeDeformCurvesOnSurface").is_empty()) {
return true;
}
for (const bNode *node : ntree.group_nodes()) {
if (const bNodeTree *sub_tree = reinterpret_cast<const bNodeTree *>(node->id)) {
if (has_surface_deformation_node(*sub_tree)) {
return true;
}
}
}

View File

@ -81,7 +81,7 @@ void immDrawPixelsTexScaledFullSize(const IMMDrawPixelsTexState *state,
const int mip_len = use_mipmap ? 9999 : 1;
GPUTexture *tex = GPU_texture_create_2d(
"immDrawPixels", img_w, img_h, mip_len, gpu_format, GPU_TEXTURE_USAGE_GENERAL, NULL);
"immDrawPixels", img_w, img_h, mip_len, gpu_format, GPU_TEXTURE_USAGE_SHADER_READ, NULL);
const bool use_float_data = ELEM(gpu_format, GPU_RGBA16F, GPU_RGB16F, GPU_R16F);
eGPUDataFormat gpu_data_format = (use_float_data) ? GPU_DATA_FLOAT : GPU_DATA_UBYTE;
@ -183,7 +183,7 @@ void immDrawPixelsTexTiled_scaling_clipping(IMMDrawPixelsTexState *state,
size_t stride = components * ((use_float_data) ? sizeof(float) : sizeof(uchar));
GPUTexture *tex = GPU_texture_create_2d(
"immDrawPixels", tex_w, tex_h, 1, gpu_format, GPU_TEXTURE_USAGE_GENERAL, NULL);
"immDrawPixels", tex_w, tex_h, 1, gpu_format, GPU_TEXTURE_USAGE_SHADER_READ, NULL);
GPU_texture_filter_mode(tex, use_filter);
GPU_texture_wrap_mode(tex, false, true);

View File

@ -225,8 +225,8 @@ static void pack_islands_alpaca_turbo(const Span<UVAABBIsland *> islands,
/* Visit every island in order. */
for (UVAABBIsland *island : islands) {
float dsm_u = island->uv_diagonal.x;
float dsm_v = island->uv_diagonal.y;
const float dsm_u = island->uv_diagonal.x;
const float dsm_v = island->uv_diagonal.y;
bool restart = false;
if (zigzag) {
@ -720,6 +720,15 @@ static void pack_islands_alpaca_rotate(const Span<UVAABBIsland *> islands,
*r_max_v = next_v1;
}
/**
* Pack islands using a mix of other strategies.
* \param islands: The islands to be packed. Will be modified with results.
* \param box_array: Transition storage, will soon be removed.
* \param scale: Scale islands by `scale` before packing.
* \param margin: Add `margin` units around islands before packing.
* \param params: Additional parameters. Scale and margin information is ignored.
* \return Size of square covering the resulting packed UVs. The maximum `u` or `v` co-ordinate.
*/
static float pack_islands_scale_margin(const Span<PackIsland *> islands,
BoxPack *box_array,
const float scale,
@ -830,6 +839,15 @@ static float pack_islands_scale_margin(const Span<PackIsland *> islands,
break;
}
/* Write back box_pack UVs. */
for (int64_t i = 0; i < max_box_pack; i++) {
PackIsland *pack_island = islands[aabbs[i]->index];
BoxPack *box = box_array + i;
pack_island->angle = 0.0f;
pack_island->pre_translate.x = (box->x + margin) / scale - pack_island->bounds_rect.xmin;
pack_island->pre_translate.y = (box->y + margin) / scale - pack_island->bounds_rect.ymin;
}
/* At this stage, `max_u` and `max_v` contain the box_pack UVs. */
/* Call Alpaca. */
@ -843,12 +861,22 @@ static float pack_islands_scale_margin(const Span<PackIsland *> islands,
/* Write back Alpaca UVs. */
for (int64_t i = max_box_pack; i < aabbs.size(); i++) {
UVAABBIsland *aabb = aabbs[i];
BoxPack *box = &box_array[i];
box->x = aabb->uv_placement.x;
box->y = aabb->uv_placement.y;
PackIsland *pack_island = islands[aabb->index];
pack_island->angle = aabb->angle;
if (aabb->angle) {
pack_island->pre_translate.x = (aabb->uv_placement.y + margin) / scale /
-pack_island->aspect_y -
pack_island->bounds_rect.xmax;
pack_island->pre_translate.y = (aabb->uv_placement.x + margin) / scale *
pack_island->aspect_y -
pack_island->bounds_rect.ymin;
}
else {
pack_island->pre_translate.x = (aabb->uv_placement.x + margin) / scale -
pack_island->bounds_rect.xmin;
pack_island->pre_translate.y = (aabb->uv_placement.y + margin) / scale -
pack_island->bounds_rect.ymin;
}
}
/* Memory management. */
@ -861,6 +889,9 @@ static float pack_islands_scale_margin(const Span<PackIsland *> islands,
return std::max(max_u, max_v);
}
/** Find the optimal scale to pack islands into the unit square.
* returns largest scale that will pack `islands` into the unit square.
*/
static float pack_islands_margin_fraction(const Span<PackIsland *> &island_vector,
BoxPack *box_array,
const float margin_fraction,
@ -952,7 +983,7 @@ static float pack_islands_margin_fraction(const Span<PackIsland *> &island_vecto
const bool flush = true;
if (flush) {
/* Write back best pack as a side-effect. First get best pack. */
/* Write back best pack as a side-effect. */
if (scale_last != scale_low) {
scale_last = scale_low;
const float max_uv = pack_islands_scale_margin(
@ -961,16 +992,6 @@ static float pack_islands_margin_fraction(const Span<PackIsland *> &island_vecto
UNUSED_VARS(max_uv);
/* TODO (?): `if (max_uv < 1.0f) { scale_last /= max_uv; }` */
}
/* Then expand PackIslands by the correct amount. */
for (const int64_t index : island_vector.index_range()) {
BoxPack *box = &box_array[index];
box->x /= scale_last;
box->y /= scale_last;
PackIsland *island = island_vector[index];
BLI_rctf_pad(
&island->bounds_rect, margin_fraction / scale_last, margin_fraction / scale_last);
}
}
return scale_last;
}
@ -1016,7 +1037,6 @@ static BoxPack *pack_islands_box_array(const Span<PackIsland *> &islands,
const float scale = pack_islands_margin_fraction(islands, box_array, params.margin, params);
r_scale[0] = scale;
r_scale[1] = scale;
/* pack_islands_margin_fraction will pad PackIslands, return early. */
return box_array;
}
@ -1038,10 +1058,6 @@ static BoxPack *pack_islands_box_array(const Span<PackIsland *> &islands,
r_scale[0] = 1.0f / max_uv;
r_scale[1] = r_scale[0];
for (int index = 0; index < islands.size(); index++) {
PackIsland *island = islands[index];
BLI_rctf_pad(&island->bounds_rect, margin, margin);
}
return box_array;
}
@ -1050,21 +1066,6 @@ void pack_islands(const Span<PackIsland *> &islands,
float r_scale[2])
{
BoxPack *box_array = pack_islands_box_array(islands, params, r_scale);
for (int64_t i : islands.index_range()) {
BoxPack *box = box_array + i;
PackIsland *island = islands[box->index];
if (island->angle) {
/* TODO: Apply proper rotation. */
island->pre_translate.x = (-box->y / island->aspect_y) - island->bounds_rect.xmax;
island->pre_translate.y = (box->x * island->aspect_y) - island->bounds_rect.ymin;
}
else {
island->pre_translate.x = box->x - island->bounds_rect.xmin;
island->pre_translate.y = box->y - island->bounds_rect.ymin;
}
}
MEM_freeN(box_array);
}

View File

@ -1041,14 +1041,17 @@ void gpu::MTLTexture::update_sub(
if (texture_.storageMode == MTLStorageModeManaged) {
[blit_encoder synchronizeResource:texture_];
}
[blit_encoder optimizeContentsForGPUAccess:texture_];
}
else {
/* Textures which use MTLStorageModeManaged need to have updated contents
* synced back to CPU to avoid an automatic flush overwriting contents. */
blit_encoder = ctx->main_command_buffer.ensure_begin_blit_encoder();
if (texture_.storageMode == MTLStorageModeManaged) {
blit_encoder = ctx->main_command_buffer.ensure_begin_blit_encoder();
[blit_encoder synchronizeResource:texture_];
}
[blit_encoder optimizeContentsForGPUAccess:texture_];
}
/* Decrement texture reference counts. This ensures temporary texture views are released. */
@ -1110,6 +1113,7 @@ void MTLTexture::update_sub(int offset[3],
if (texture_.storageMode == MTLStorageModeManaged) {
[blit_encoder synchronizeResource:texture_];
}
[blit_encoder optimizeContentsForGPUAccess:texture_];
}
else {
BLI_assert(false);
@ -1230,6 +1234,7 @@ void gpu::MTLTexture::copy_to(Texture *dst)
BLI_assert(mt_dst->d_ == d_);
[blit_encoder copyFromTexture:this->get_metal_handle_base()
toTexture:mt_dst->get_metal_handle_base()];
[blit_encoder optimizeContentsForGPUAccess:mt_dst->get_metal_handle_base()];
} break;
default: {
int slice = 0;

View File

@ -216,22 +216,25 @@ static bool node_needs_own_transform_relation(const bNode &node)
static void process_nodes_for_depsgraph(const bNodeTree &tree,
Set<ID *> &ids,
bool &r_needs_own_transform_relation)
bool &r_needs_own_transform_relation,
Set<const bNodeTree *> &checked_groups)
{
Set<const bNodeTree *> handled_groups;
if (!checked_groups.add(&tree)) {
return;
}
LISTBASE_FOREACH (const bNode *, node, &tree.nodes) {
tree.ensure_topology_cache();
for (const bNode *node : tree.all_nodes()) {
add_used_ids_from_sockets(node->inputs, ids);
add_used_ids_from_sockets(node->outputs, ids);
if (ELEM(node->type, NODE_GROUP, NODE_CUSTOM_GROUP)) {
const bNodeTree *group = (bNodeTree *)node->id;
if (group != nullptr && handled_groups.add(group)) {
process_nodes_for_depsgraph(*group, ids, r_needs_own_transform_relation);
}
}
r_needs_own_transform_relation |= node_needs_own_transform_relation(*node);
}
for (const bNode *node : tree.group_nodes()) {
if (const bNodeTree *sub_tree = reinterpret_cast<const bNodeTree *>(node->id)) {
process_nodes_for_depsgraph(*sub_tree, ids, r_needs_own_transform_relation, checked_groups);
}
}
}
static void find_used_ids_from_settings(const NodesModifierSettings &settings, Set<ID *> &ids)
@ -289,7 +292,9 @@ static void updateDepsgraph(ModifierData *md, const ModifierUpdateDepsgraphConte
bool needs_own_transform_relation = false;
Set<ID *> used_ids;
find_used_ids_from_settings(nmd->settings, used_ids);
process_nodes_for_depsgraph(*nmd->node_group, used_ids, needs_own_transform_relation);
Set<const bNodeTree *> checked_groups;
process_nodes_for_depsgraph(
*nmd->node_group, used_ids, needs_own_transform_relation, checked_groups);
if (ctx->object->type == OB_CURVES) {
Curves *curves_id = static_cast<Curves *>(ctx->object->data);
@ -328,19 +333,18 @@ static void updateDepsgraph(ModifierData *md, const ModifierUpdateDepsgraphConte
}
}
static bool check_tree_for_time_node(const bNodeTree &tree,
Set<const bNodeTree *> &r_checked_trees)
static bool check_tree_for_time_node(const bNodeTree &tree, Set<const bNodeTree *> &checked_groups)
{
if (!r_checked_trees.add(&tree)) {
if (!checked_groups.add(&tree)) {
return false;
}
LISTBASE_FOREACH (const bNode *, node, &tree.nodes) {
if (node->type == GEO_NODE_INPUT_SCENE_TIME) {
return true;
}
if (node->type == NODE_GROUP) {
const bNodeTree *sub_tree = reinterpret_cast<const bNodeTree *>(node->id);
if (sub_tree && check_tree_for_time_node(*sub_tree, r_checked_trees)) {
tree.ensure_topology_cache();
if (!tree.nodes_by_type("GeometryNodeInputSceneTime").is_empty()) {
return true;
}
for (const bNode *node : tree.group_nodes()) {
if (const bNodeTree *sub_tree = reinterpret_cast<const bNodeTree *>(node->id)) {
if (check_tree_for_time_node(*sub_tree, checked_groups)) {
return true;
}
}
@ -355,8 +359,8 @@ static bool dependsOnTime(struct Scene * /*scene*/, ModifierData *md)
if (tree == nullptr) {
return false;
}
Set<const bNodeTree *> checked_trees;
return check_tree_for_time_node(*tree, checked_trees);
Set<const bNodeTree *> checked_groups;
return check_tree_for_time_node(*tree, checked_groups);
}
static void foreachIDLink(ModifierData *md, Object *ob, IDWalkFunc walk, void *userData)

View File

@ -736,8 +736,9 @@ if(WITH_CYCLES OR WITH_OPENGL_RENDER_TESTS)
set(_cycles_render_tests bake;${render_tests};osl)
foreach(render_test ${_cycles_render_tests})
set(_cycles_test_name "cycles_${render_test}_${_cycles_device_lower}")
add_python_test(
cycles_${render_test}_${_cycles_device_lower}
${_cycles_test_name}
${CMAKE_CURRENT_LIST_DIR}/cycles_render_tests.py
-blender "${TEST_BLENDER_EXE}"
-testdir "${TEST_SRC_DIR}/render/${render_test}"
@ -746,6 +747,11 @@ if(WITH_CYCLES OR WITH_OPENGL_RENDER_TESTS)
-device ${_cycles_device}
-blacklist ${_cycles_blacklist}
)
if(NOT("${_cycles_device_lower}" STREQUAL "cpu"))
set_tests_properties(${_cycles_test_name} PROPERTIES RUN_SERIAL TRUE)
endif()
unset(_cycles_test_name)
endforeach()
endforeach()
unset(_cycles_blacklist)