WIP: Geometry Nodes: A Modeling Approach of Gizmo #108744
@ -215,6 +215,7 @@ class GHOST_DeviceVK {
|
||||
device_features.geometryShader = VK_TRUE;
|
||||
device_features.dualSrcBlend = VK_TRUE;
|
||||
device_features.logicOp = VK_TRUE;
|
||||
device_features.imageCubeArray = VK_TRUE;
|
||||
#endif
|
||||
|
||||
VkDeviceCreateInfo device_create_info = {};
|
||||
@ -309,7 +310,7 @@ static GHOST_TSuccess ensure_vulkan_device(VkInstance vk_instance,
|
||||
|
||||
#if STRICT_REQUIREMENTS
|
||||
if (!device_vk.features.geometryShader || !device_vk.features.dualSrcBlend ||
|
||||
!device_vk.features.logicOp)
|
||||
!device_vk.features.logicOp || !device_vk.features.imageCubeArray)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
@ -109,9 +109,6 @@ class MESH_UL_vgroups(UIList):
|
||||
layout.label(text="", icon_value=icon)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class MESH_UL_shape_keys(UIList):
|
||||
def draw_item(self, _context, layout, _data, item, icon, active_data, _active_propname, index):
|
||||
# assert(isinstance(item, bpy.types.ShapeKey))
|
||||
|
@ -2103,7 +2103,7 @@ class VIEW3D_MT_select_edit_curves(Menu):
|
||||
layout.separator()
|
||||
|
||||
layout.operator("curves.select_random", text="Random")
|
||||
layout.operator("curves.select_end", text="Endpoints")
|
||||
layout.operator("curves.select_ends", text="Endpoints")
|
||||
layout.operator("curves.select_linked", text="Linked")
|
||||
|
||||
layout.separator()
|
||||
@ -2121,7 +2121,7 @@ class VIEW3D_MT_select_sculpt_curves(Menu):
|
||||
layout.operator("curves.select_all", text="None").action = 'DESELECT'
|
||||
layout.operator("curves.select_all", text="Invert").action = 'INVERT'
|
||||
layout.operator("sculpt_curves.select_random", text="Random")
|
||||
layout.operator("curves.select_end", text="Endpoints")
|
||||
layout.operator("curves.select_ends", text="Endpoints")
|
||||
layout.operator("sculpt_curves.select_grow", text="Grow")
|
||||
|
||||
|
||||
|
@ -733,7 +733,7 @@ enum {
|
||||
typedef struct CustomDataTransferLayerMap {
|
||||
struct CustomDataTransferLayerMap *next, *prev;
|
||||
|
||||
eCustomDataType data_type;
|
||||
int data_type;
|
||||
int mix_mode;
|
||||
float mix_factor;
|
||||
/** If non-NULL, array of weights, one for each dest item, replaces mix_factor. */
|
||||
|
@ -180,7 +180,6 @@ struct Mesh *BKE_mesh_new_nomain_from_curve_displist(const struct Object *ob,
|
||||
|
||||
bool BKE_mesh_attribute_required(const char *name);
|
||||
|
||||
|
||||
float (*BKE_mesh_orco_verts_get(struct Object *ob))[3];
|
||||
void BKE_mesh_orco_verts_transform(struct Mesh *me, float (*orco)[3], int totvert, int invert);
|
||||
|
||||
|
@ -127,7 +127,7 @@ struct CornerNormalSpaceArray {
|
||||
*/
|
||||
Array<Array<int>> corners_by_space;
|
||||
/** Whether to create the above map when calculating normals. */
|
||||
bool create_corners_by_space;
|
||||
bool create_corners_by_space = false;
|
||||
};
|
||||
|
||||
void lnor_space_custom_normal_to_data(const CornerNormalSpace *lnor_space,
|
||||
|
@ -255,7 +255,7 @@ typedef struct PanelType {
|
||||
|
||||
char idname[BKE_ST_MAXNAME]; /* unique name */
|
||||
char label[BKE_ST_MAXNAME]; /* for panel header */
|
||||
char *description; /* for panel tooltip */
|
||||
const char *description; /* for panel tooltip */
|
||||
char translation_context[BKE_ST_MAXNAME];
|
||||
char context[BKE_ST_MAXNAME]; /* for buttons window */
|
||||
char category[BKE_ST_MAXNAME]; /* for category tabs */
|
||||
|
@ -4865,7 +4865,7 @@ static void customdata_data_transfer_interp_generic(const CustomDataTransferLaye
|
||||
* more than 0.5 of weight. */
|
||||
int best_src_idx = 0;
|
||||
|
||||
const eCustomDataType data_type = laymap->data_type;
|
||||
const int data_type = laymap->data_type;
|
||||
const int mix_mode = laymap->mix_mode;
|
||||
|
||||
size_t data_size;
|
||||
@ -4883,7 +4883,7 @@ static void customdata_data_transfer_interp_generic(const CustomDataTransferLaye
|
||||
data_size = laymap->data_size;
|
||||
}
|
||||
else {
|
||||
const LayerTypeInfo *type_info = layerType_getInfo(data_type);
|
||||
const LayerTypeInfo *type_info = layerType_getInfo(eCustomDataType(data_type));
|
||||
|
||||
data_size = size_t(type_info->size);
|
||||
interp_cd = type_info->interp;
|
||||
@ -4952,7 +4952,7 @@ static void customdata_data_transfer_interp_generic(const CustomDataTransferLaye
|
||||
}
|
||||
}
|
||||
else if (!(int(data_type) & CD_FAKE)) {
|
||||
CustomData_data_mix_value(data_type, tmp_dst, data_dst, mix_mode, mix_factor);
|
||||
CustomData_data_mix_value(eCustomDataType(data_type), tmp_dst, data_dst, mix_mode, mix_factor);
|
||||
}
|
||||
/* Else we can do nothing by default, needs custom interp func!
|
||||
* Note this is here only for sake of consistency, not expected to be used much actually? */
|
||||
@ -4975,7 +4975,8 @@ void customdata_data_transfer_interp_normal_normals(const CustomDataTransferLaye
|
||||
BLI_assert(weights != nullptr);
|
||||
BLI_assert(count > 0);
|
||||
|
||||
const eCustomDataType data_type = laymap->data_type;
|
||||
const eCustomDataType data_type = eCustomDataType(laymap->data_type);
|
||||
BLI_assert(data_type == CD_NORMAL);
|
||||
const int mix_mode = laymap->mix_mode;
|
||||
|
||||
SpaceTransform *space_transform = static_cast<SpaceTransform *>(laymap->interp_data);
|
||||
@ -4985,8 +4986,6 @@ void customdata_data_transfer_interp_normal_normals(const CustomDataTransferLaye
|
||||
|
||||
float tmp_dst[3];
|
||||
|
||||
BLI_assert(data_type == CD_NORMAL);
|
||||
|
||||
if (!sources) {
|
||||
/* Not supported here, abort. */
|
||||
return;
|
||||
@ -5007,7 +5006,7 @@ void CustomData_data_transfer(const MeshPairRemap *me_remap,
|
||||
MeshPairRemapItem *mapit = me_remap->items;
|
||||
const int totelem = me_remap->items_num;
|
||||
|
||||
const eCustomDataType data_type = laymap->data_type;
|
||||
const int data_type = laymap->data_type;
|
||||
const void *data_src = laymap->data_src;
|
||||
void *data_dst = laymap->data_dst;
|
||||
|
||||
@ -5036,7 +5035,7 @@ void CustomData_data_transfer(const MeshPairRemap *me_remap,
|
||||
data_offset = laymap->data_offset;
|
||||
}
|
||||
else {
|
||||
const LayerTypeInfo *type_info = layerType_getInfo(data_type);
|
||||
const LayerTypeInfo *type_info = layerType_getInfo(eCustomDataType(data_type));
|
||||
|
||||
/* NOTE: we can use 'fake' CDLayers for crease :/. */
|
||||
data_size = size_t(type_info->size);
|
||||
|
@ -628,7 +628,7 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
|
||||
|
||||
if (mesh_remap_bvhtree_query_nearest(
|
||||
&treedata, &nearest, tmp_co, max_dist_sq, &hit_dist)) {
|
||||
const int poly_index = looptri_polys[rayhit.index];
|
||||
const int poly_index = looptri_polys[nearest.index];
|
||||
|
||||
if (mode == MREMAP_MODE_VERT_POLY_NEAREST) {
|
||||
int index;
|
||||
@ -886,7 +886,7 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
|
||||
|
||||
if (mesh_remap_bvhtree_query_nearest(&treedata, &nearest, tmp_co, max_dist_sq, &hit_dist))
|
||||
{
|
||||
const int poly_index = looptri_polys[rayhit.index];
|
||||
const int poly_index = looptri_polys[nearest.index];
|
||||
const blender::IndexRange poly_src = polys_src[poly_index];
|
||||
const int *corner_edge_src = &corner_edges_src[poly_src.start()];
|
||||
int nloops = int(poly_src.size());
|
||||
|
@ -70,15 +70,8 @@ bool multiresModifier_reshapeFromObject(Depsgraph *depsgraph,
|
||||
return false;
|
||||
}
|
||||
|
||||
int num_deformed_verts;
|
||||
float(*deformed_verts)[3] = BKE_mesh_vert_coords_alloc(src_mesh_eval, &num_deformed_verts);
|
||||
|
||||
const bool result = multiresModifier_reshapeFromVertcos(
|
||||
depsgraph, dst, mmd, deformed_verts, num_deformed_verts);
|
||||
|
||||
MEM_freeN(deformed_verts);
|
||||
|
||||
return result;
|
||||
return multiresModifier_reshapeFromVertcos(
|
||||
depsgraph, dst, mmd, BKE_mesh_vert_positions(src_mesh_eval), src_mesh_eval->totvert);
|
||||
}
|
||||
|
||||
/** \} */
|
||||
|
@ -3449,7 +3449,6 @@ static void do_hair_dynamics(ParticleSimulationData *sim)
|
||||
EffectorWeights *clmd_effweights;
|
||||
int totpoint;
|
||||
int totedge;
|
||||
float(*deformedVerts)[3];
|
||||
bool realloc_roots;
|
||||
|
||||
if (!psys->clmd) {
|
||||
@ -3505,12 +3504,14 @@ static void do_hair_dynamics(ParticleSimulationData *sim)
|
||||
psys->clmd->sim_parms->effector_weights = psys->part->effector_weights;
|
||||
|
||||
BKE_id_copy_ex(NULL, &psys->hair_in_mesh->id, (ID **)&psys->hair_out_mesh, LIB_ID_COPY_LOCALIZE);
|
||||
deformedVerts = BKE_mesh_vert_coords_alloc(psys->hair_out_mesh, NULL);
|
||||
clothModifier_do(
|
||||
psys->clmd, sim->depsgraph, sim->scene, sim->ob, psys->hair_in_mesh, deformedVerts);
|
||||
BKE_mesh_vert_coords_apply(psys->hair_out_mesh, deformedVerts);
|
||||
|
||||
MEM_freeN(deformedVerts);
|
||||
clothModifier_do(psys->clmd,
|
||||
sim->depsgraph,
|
||||
sim->scene,
|
||||
sim->ob,
|
||||
psys->hair_in_mesh,
|
||||
BKE_mesh_vert_positions_for_write(psys->hair_out_mesh));
|
||||
BKE_mesh_tag_positions_changed(psys->hair_out_mesh);
|
||||
|
||||
/* restore cloth effector weights */
|
||||
psys->clmd->sim_parms->effector_weights = clmd_effweights;
|
||||
@ -5005,6 +5006,8 @@ static void particlesystem_modifiersForeachIDLink(void *user_data,
|
||||
void BKE_particlesystem_id_loop(ParticleSystem *psys, ParticleSystemIDFunc func, void *userdata)
|
||||
{
|
||||
ParticleTarget *pt;
|
||||
LibraryForeachIDData *foreachid_data = userdata;
|
||||
const int foreachid_data_flags = BKE_lib_query_foreachid_process_flags_get(foreachid_data);
|
||||
|
||||
func(psys, (ID **)&psys->part, userdata, IDWALK_CB_USER | IDWALK_CB_NEVER_NULL);
|
||||
func(psys, (ID **)&psys->target_ob, userdata, IDWALK_CB_NOP);
|
||||
@ -5024,14 +5027,19 @@ void BKE_particlesystem_id_loop(ParticleSystem *psys, ParticleSystemIDFunc func,
|
||||
func(psys, (ID **)&pt->ob, userdata, IDWALK_CB_NOP);
|
||||
}
|
||||
|
||||
/* Even though psys->part should never be NULL, this can happen as an exception during deletion.
|
||||
* See ID_REMAP_SKIP/FORCE/FLAG_NEVER_NULL_USAGE in BKE_library_remap. */
|
||||
if (psys->part && psys->part->phystype == PART_PHYS_BOIDS) {
|
||||
/* In case `psys->part` is NULL (See ID_REMAP_SKIP/FORCE/FLAG_NEVER_NULL_USAGE in
|
||||
* #BKE_library_remap), or accessing it is forbidden, always handle particles for potential boids
|
||||
* data. Unfortunate, but for now there is no other proper way to do this. */
|
||||
if (!(psys->part && (foreachid_data_flags & IDWALK_NO_ORIG_POINTERS_ACCESS) == 0) ||
|
||||
psys->part->phystype == PART_PHYS_BOIDS)
|
||||
{
|
||||
ParticleData *pa;
|
||||
int p;
|
||||
|
||||
for (p = 0, pa = psys->particles; p < psys->totpart; p++, pa++) {
|
||||
func(psys, (ID **)&pa->boid->ground, userdata, IDWALK_CB_NOP);
|
||||
if (pa->boid != NULL) {
|
||||
func(psys, (ID **)&pa->boid->ground, userdata, IDWALK_CB_NOP);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ static void shrinkwrap_calc_normal_projection_cb_ex(void *__restrict userdata,
|
||||
|
||||
const float proj_limit_squared = calc->smd->projLimit * calc->smd->projLimit;
|
||||
float *co = calc->vertexCos[i];
|
||||
float tmp_co[3], tmp_no[3];
|
||||
const float *tmp_co, *tmp_no;
|
||||
float weight = BKE_defvert_array_find_weight_safe(calc->dvert, i, calc->vgroup);
|
||||
|
||||
if (calc->invert_vgroup) {
|
||||
@ -530,12 +530,12 @@ static void shrinkwrap_calc_normal_projection_cb_ex(void *__restrict userdata,
|
||||
/* These coordinates are deformed by vertexCos only for normal projection
|
||||
* (to get correct normals) for other cases calc->verts contains undeformed coordinates and
|
||||
* vertexCos should be used */
|
||||
copy_v3_v3(tmp_co, calc->vert_positions[i]);
|
||||
copy_v3_v3(tmp_no, calc->vert_normals[i]);
|
||||
tmp_co = calc->vert_positions[i];
|
||||
tmp_no = calc->vert_normals[i];
|
||||
}
|
||||
else {
|
||||
copy_v3_v3(tmp_co, co);
|
||||
copy_v3_v3(tmp_no, proj_axis);
|
||||
tmp_co = co;
|
||||
tmp_no = proj_axis;
|
||||
}
|
||||
|
||||
hit->index = -1;
|
||||
@ -1568,7 +1568,6 @@ void BKE_shrinkwrap_mesh_nearest_surface_deform(bContext *C, Object *ob_source,
|
||||
void BKE_shrinkwrap_remesh_target_project(Mesh *src_me, Mesh *target_me, Object *ob_target)
|
||||
{
|
||||
ShrinkwrapModifierData ssmd = {{nullptr}};
|
||||
int totvert;
|
||||
|
||||
ssmd.target = ob_target;
|
||||
ssmd.shrinkType = MOD_SHRINKWRAP_PROJECT;
|
||||
@ -1581,13 +1580,11 @@ void BKE_shrinkwrap_remesh_target_project(Mesh *src_me, Mesh *target_me, Object
|
||||
const float projLimitTolerance = 5.0f;
|
||||
ssmd.projLimit = target_me->remesh_voxel_size * projLimitTolerance;
|
||||
|
||||
float(*vertexCos)[3] = BKE_mesh_vert_coords_alloc(src_me, &totvert);
|
||||
|
||||
ShrinkwrapCalcData calc = NULL_ShrinkwrapCalcData;
|
||||
|
||||
calc.smd = &ssmd;
|
||||
calc.numVerts = src_me->totvert;
|
||||
calc.vertexCos = vertexCos;
|
||||
calc.vertexCos = BKE_mesh_vert_positions_for_write(src_me);
|
||||
calc.vert_normals = src_me->vert_normals();
|
||||
calc.vgroup = -1;
|
||||
calc.target = target_me;
|
||||
@ -1602,7 +1599,5 @@ void BKE_shrinkwrap_remesh_target_project(Mesh *src_me, Mesh *target_me, Object
|
||||
BKE_shrinkwrap_free_tree(&tree);
|
||||
}
|
||||
|
||||
BKE_mesh_vert_coords_apply(src_me, vertexCos);
|
||||
|
||||
MEM_freeN(vertexCos);
|
||||
BKE_mesh_tag_positions_changed(src_me);
|
||||
}
|
||||
|
@ -45,9 +45,14 @@ class Context {
|
||||
/* Get the node tree used for compositing. */
|
||||
virtual const bNodeTree &get_node_tree() const = 0;
|
||||
|
||||
/* True if compositor should do write file outputs, false if only running for viewing. */
|
||||
/* True if the compositor should write file outputs, false otherwise. */
|
||||
virtual bool use_file_output() const = 0;
|
||||
|
||||
/* True if the compositor should write the composite output, otherwise, the compositor is assumed
|
||||
* to not support the composite output and just displays its viewer output. In that case, the
|
||||
* composite output will be used as a fallback viewer if no other viewer exists */
|
||||
virtual bool use_composite_output() const = 0;
|
||||
|
||||
/* True if color management should be used for texture evaluation. */
|
||||
virtual bool use_texture_color_management() const = 0;
|
||||
|
||||
@ -66,10 +71,14 @@ class Context {
|
||||
* region. */
|
||||
virtual rcti get_compositing_region() const = 0;
|
||||
|
||||
/* Get the texture representing the output where the result of the compositor should be
|
||||
* written. This should be called by output nodes to get their target texture. */
|
||||
/* Get the texture where the result of the compositor should be written. This should be called by
|
||||
* the composite output node to get its target texture. */
|
||||
virtual GPUTexture *get_output_texture() = 0;
|
||||
|
||||
/* Get the texture where the result of the compositor viewer should be written. This should be
|
||||
* called by viewer output nodes to get their target texture. */
|
||||
virtual GPUTexture *get_viewer_output_texture() = 0;
|
||||
|
||||
/* Get the texture where the given render pass is stored. This should be called by the Render
|
||||
* Layer node to populate its outputs. */
|
||||
virtual GPUTexture *get_input_texture(int view_layer, const char *pass_name) = 0;
|
||||
|
@ -8,6 +8,8 @@
|
||||
|
||||
#include "NOD_derived_node_tree.hh"
|
||||
|
||||
#include "COM_context.hh"
|
||||
|
||||
namespace blender::realtime_compositor {
|
||||
|
||||
using namespace nodes::derived_node_tree_types;
|
||||
@ -18,6 +20,6 @@ using Schedule = VectorSet<DNode>;
|
||||
/* Computes the execution schedule of the node tree. This is essentially a post-order depth first
|
||||
* traversal of the node tree from the output node to the leaf input nodes, with informed order of
|
||||
* traversal of dependencies based on a heuristic estimation of the number of needed buffers. */
|
||||
Schedule compute_schedule(const DerivedNodeTree &tree);
|
||||
Schedule compute_schedule(const Context &context, const DerivedNodeTree &tree);
|
||||
|
||||
} // namespace blender::realtime_compositor
|
||||
|
@ -72,7 +72,7 @@ void Evaluator::compile_and_evaluate()
|
||||
return;
|
||||
}
|
||||
|
||||
const Schedule schedule = compute_schedule(*derived_node_tree_);
|
||||
const Schedule schedule = compute_schedule(context_, *derived_node_tree_);
|
||||
|
||||
CompileState compile_state(schedule);
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "BKE_node.hh"
|
||||
#include "BKE_node_runtime.hh"
|
||||
|
||||
#include "COM_context.hh"
|
||||
#include "COM_scheduler.hh"
|
||||
#include "COM_utilities.hh"
|
||||
|
||||
@ -72,55 +73,88 @@ static const DTreeContext *find_active_context(const DerivedNodeTree &tree)
|
||||
return find_active_context_recursive(&tree.root_context(), NODE_INSTANCE_KEY_BASE);
|
||||
}
|
||||
|
||||
/* Return the output node which is marked as NODE_DO_OUTPUT. If multiple types of output nodes are
|
||||
* marked, then the preference will be CMP_NODE_VIEWER > CMP_NODE_SPLITVIEWER > CMP_NODE_COMPOSITE.
|
||||
* If no output node exists, a null node will be returned. */
|
||||
static DNode find_output_in_context(const DTreeContext *context)
|
||||
/* Add the viewer node which is marked as NODE_DO_OUTPUT in the given context to the given stack.
|
||||
* If multiple types of viewer nodes are marked, then the preference will be CMP_NODE_VIEWER >
|
||||
* CMP_NODE_SPLITVIEWER. If no viewer nodes were found, composite nodes can be added as a fallback
|
||||
* viewer node. */
|
||||
static bool add_viewer_nodes_in_context(const DTreeContext *context, Stack<DNode> &node_stack)
|
||||
{
|
||||
const bNodeTree &tree = context->btree();
|
||||
|
||||
for (const bNode *node : tree.nodes_by_type("CompositorNodeViewer")) {
|
||||
for (const bNode *node : context->btree().nodes_by_type("CompositorNodeViewer")) {
|
||||
if (node->flag & NODE_DO_OUTPUT) {
|
||||
return DNode(context, node);
|
||||
node_stack.push(DNode(context, node));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
for (const bNode *node : tree.nodes_by_type("CompositorNodeSplitViewer")) {
|
||||
for (const bNode *node : context->btree().nodes_by_type("CompositorNodeSplitViewer")) {
|
||||
if (node->flag & NODE_DO_OUTPUT) {
|
||||
return DNode(context, node);
|
||||
node_stack.push(DNode(context, node));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
for (const bNode *node : tree.nodes_by_type("CompositorNodeComposite")) {
|
||||
/* The active Composite node was already added, no need to add it again, see the next block. */
|
||||
if (!node_stack.is_empty() && node_stack.peek()->type == CMP_NODE_COMPOSITE) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* No active viewers exist in this context, try to add the Composite node as a fallback viewer if
|
||||
* it was not already added. */
|
||||
for (const bNode *node : context->btree().nodes_by_type("CompositorNodeComposite")) {
|
||||
if (node->flag & NODE_DO_OUTPUT) {
|
||||
return DNode(context, node);
|
||||
node_stack.push(DNode(context, node));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return DNode();
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Compute the output node whose result should be computed. This node is the output node that
|
||||
* satisfies the requirements in the find_output_in_context function. First, the active context is
|
||||
* searched for an output node, if non was found, the root context is search. For more information
|
||||
* on what contexts mean here, see the find_active_context function. */
|
||||
static DNode compute_output_node(const DerivedNodeTree &tree)
|
||||
/* Add the output nodes whose result should be computed to the given stack. This includes File
|
||||
* Output, Composite, and Viewer nodes. Viewer nodes are a special case, as only the nodes that
|
||||
* satisfies the requirements in the add_viewer_nodes_in_context function are added. First, the
|
||||
* active context is searched for viewer nodes, if non were found, the root context is searched.
|
||||
* For more information on what contexts mean here, see the find_active_context function. */
|
||||
static void add_output_nodes(const Context &context,
|
||||
const DerivedNodeTree &tree,
|
||||
Stack<DNode> &node_stack)
|
||||
{
|
||||
const DTreeContext &root_context = tree.root_context();
|
||||
|
||||
/* Only add File Output nodes if the context supports them. */
|
||||
if (context.use_file_output()) {
|
||||
for (const bNode *node : root_context.btree().nodes_by_type("CompositorNodeOutputFile")) {
|
||||
node_stack.push(DNode(&root_context, node));
|
||||
}
|
||||
}
|
||||
|
||||
/* Only add the Composite output node if the context supports composite outputs. The active
|
||||
* Composite node may still be added as a fallback viewer output below. */
|
||||
if (context.use_composite_output()) {
|
||||
for (const bNode *node : root_context.btree().nodes_by_type("CompositorNodeComposite")) {
|
||||
if (node->flag & NODE_DO_OUTPUT) {
|
||||
node_stack.push(DNode(&root_context, node));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const DTreeContext *active_context = find_active_context(tree);
|
||||
const bool viewer_was_added = add_viewer_nodes_in_context(active_context, node_stack);
|
||||
|
||||
const DNode node = find_output_in_context(active_context);
|
||||
if (node) {
|
||||
return node;
|
||||
/* An active viewer was added, no need to search further. */
|
||||
if (viewer_was_added) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* If the active context is the root one and no output node was found, we consider this node tree
|
||||
* to have no output node, even if one of the non-active descendants have an output node. */
|
||||
/* If the active context is the root one and no viewer nodes were found, we consider this node
|
||||
* tree to have no viewer nodes, even if one of the non-active descendants have viewer nodes. */
|
||||
if (active_context->is_root()) {
|
||||
return DNode();
|
||||
return;
|
||||
}
|
||||
|
||||
/* The active context doesn't have an output node, search in the root context as a fallback. */
|
||||
return find_output_in_context(&tree.root_context());
|
||||
/* The active context doesn't have a viewer node, search in the root context as a fallback. */
|
||||
add_viewer_nodes_in_context(&tree.root_context(), node_stack);
|
||||
}
|
||||
|
||||
/* A type representing a mapping that associates each node with a heuristic estimation of the
|
||||
@ -177,12 +211,12 @@ using NeededBuffers = Map<DNode, int>;
|
||||
* implementation because it rarely affects the output and is done by very few nodes.
|
||||
* - The compiler may decide to compiler the schedule differently depending on runtime information
|
||||
* which we can merely speculate at scheduling-time as described above. */
|
||||
static NeededBuffers compute_number_of_needed_buffers(DNode output_node)
|
||||
static NeededBuffers compute_number_of_needed_buffers(Stack<DNode> &output_nodes)
|
||||
{
|
||||
NeededBuffers needed_buffers;
|
||||
|
||||
/* A stack of nodes used to traverse the node tree starting from the output node. */
|
||||
Stack<DNode> node_stack = {output_node};
|
||||
/* A stack of nodes used to traverse the node tree starting from the output nodes. */
|
||||
Stack<DNode> node_stack = output_nodes;
|
||||
|
||||
/* Traverse the node tree in a post order depth first manner and compute the number of needed
|
||||
* buffers for each node. Post order traversal guarantee that all the node dependencies of each
|
||||
@ -301,23 +335,23 @@ static NeededBuffers compute_number_of_needed_buffers(DNode output_node)
|
||||
* doesn't always guarantee an optimal evaluation order, as the optimal evaluation order is very
|
||||
* difficult to compute, however, this method works well in most cases. Moreover it assumes that
|
||||
* all buffers will have roughly the same size, which may not always be the case. */
|
||||
Schedule compute_schedule(const DerivedNodeTree &tree)
|
||||
Schedule compute_schedule(const Context &context, const DerivedNodeTree &tree)
|
||||
{
|
||||
Schedule schedule;
|
||||
|
||||
/* Compute the output node whose result should be computed. */
|
||||
const DNode output_node = compute_output_node(tree);
|
||||
/* A stack of nodes used to traverse the node tree starting from the output nodes. */
|
||||
Stack<DNode> node_stack;
|
||||
|
||||
/* No output node, the node tree has no effect, return an empty schedule. */
|
||||
if (!output_node) {
|
||||
/* Add the output nodes whose result should be computed to the stack. */
|
||||
add_output_nodes(context, tree, node_stack);
|
||||
|
||||
/* No output nodes, the node tree has no effect, return an empty schedule. */
|
||||
if (node_stack.is_empty()) {
|
||||
return schedule;
|
||||
}
|
||||
|
||||
/* Compute the number of buffers needed by each node connected to the output. */
|
||||
const NeededBuffers needed_buffers = compute_number_of_needed_buffers(output_node);
|
||||
|
||||
/* A stack of nodes used to traverse the node tree starting from the output node. */
|
||||
Stack<DNode> node_stack = {output_node};
|
||||
/* Compute the number of buffers needed by each node connected to the outputs. */
|
||||
const NeededBuffers needed_buffers = compute_number_of_needed_buffers(node_stack);
|
||||
|
||||
/* Traverse the node tree in a post order depth first manner, scheduling the nodes in an order
|
||||
* informed by the number of buffers needed by each node. Post order traversal guarantee that all
|
||||
|
@ -10,7 +10,19 @@ GPU_SHADER_CREATE_INFO(compositor_read_pass_shared)
|
||||
.sampler(0, ImageType::FLOAT_2D, "input_tx")
|
||||
.compute_source("compositor_read_pass.glsl");
|
||||
|
||||
GPU_SHADER_CREATE_INFO(compositor_read_pass)
|
||||
GPU_SHADER_CREATE_INFO(compositor_read_pass_float)
|
||||
.additional_info("compositor_read_pass_shared")
|
||||
.image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
|
||||
.define("READ_EXPRESSION(pass_color)", "vec4(pass_color.r, vec3(0.0))")
|
||||
.do_static_compilation(true);
|
||||
|
||||
GPU_SHADER_CREATE_INFO(compositor_read_pass_vector)
|
||||
.additional_info("compositor_read_pass_shared")
|
||||
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
|
||||
.define("READ_EXPRESSION(pass_color)", "pass_color")
|
||||
.do_static_compilation(true);
|
||||
|
||||
GPU_SHADER_CREATE_INFO(compositor_read_pass_color)
|
||||
.additional_info("compositor_read_pass_shared")
|
||||
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
|
||||
.define("READ_EXPRESSION(pass_color)", "pass_color")
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "BLI_utildefines.h"
|
||||
|
||||
#include "BKE_action.h"
|
||||
#include "BKE_collection.h"
|
||||
|
||||
#include "RNA_prototypes.h"
|
||||
|
||||
@ -183,6 +184,14 @@ void deg_graph_build_finalize(Main *bmain, Depsgraph *graph)
|
||||
flag |= ID_RECALC_NTREE_OUTPUT;
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Collection content might have changed (children collection might have been added or
|
||||
* removed from the graph based on their inclusion and visibility flags). */
|
||||
const ID_Type id_type = GS(id_node->id_cow->name);
|
||||
if (id_type == ID_GR) {
|
||||
BKE_collection_object_cache_free(reinterpret_cast<Collection *>(id_node->id_cow));
|
||||
}
|
||||
}
|
||||
/* Restore recalc flags from original ID, which could possibly contain recalc flags set by
|
||||
* an operator and then were carried on by the undo system. */
|
||||
flag |= id_orig->recalc;
|
||||
|
@ -68,6 +68,14 @@ class Context : public realtime_compositor::Context {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* The viewport compositor doesn't really support the composite output, it only displays the
|
||||
* viewer output in the viewport. Settings this to false will make the compositor use the
|
||||
* composite output as fallback viewer if no other viewer exists. */
|
||||
bool use_composite_output() const override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool use_texture_color_management() const override
|
||||
{
|
||||
return BKE_scene_check_color_management_enabled(DRW_context_state_get()->scene);
|
||||
@ -145,6 +153,11 @@ class Context : public realtime_compositor::Context {
|
||||
return DRW_viewport_texture_list_get()->color;
|
||||
}
|
||||
|
||||
GPUTexture *get_viewer_output_texture() override
|
||||
{
|
||||
return DRW_viewport_texture_list_get()->color;
|
||||
}
|
||||
|
||||
GPUTexture *get_input_texture(int view_layer, const char *pass_name) override
|
||||
{
|
||||
if (view_layer == 0 && STREQ(pass_name, RE_PASSNAME_COMBINED)) {
|
||||
|
@ -34,23 +34,7 @@ void ED_asset_handle_get_full_library_path(
|
||||
/* `1024` for #FILE_MAX,
|
||||
* rely on warnings to let us know if this gets out of sync. */
|
||||
char r_full_lib_path[1024]);
|
||||
bool ED_asset_handle_get_use_relative_path(const struct AssetHandle *asset);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
# include <optional>
|
||||
|
||||
# include "BLI_string_ref.hh"
|
||||
|
||||
/** The asset library may have an import method (e.g. append vs. link) defined to use. If so, this
|
||||
* returns it. Otherwise a reasonable method should be used, usually "Append (Reuse Data)". */
|
||||
std::optional<eAssetImportMethod> ED_asset_handle_get_import_method(
|
||||
const struct AssetHandle *asset);
|
||||
|
||||
blender::StringRefNull ED_asset_handle_get_library_relative_identifier(const AssetHandle &asset);
|
||||
|
||||
#endif
|
||||
|
@ -24,7 +24,7 @@ bool ED_asset_filter_matches_asset(const AssetFilterSettings *filter,
|
||||
ID_Type asset_type = asset.get_id_type();
|
||||
uint64_t asset_id_filter = BKE_idtype_idcode_to_idfilter(asset_type);
|
||||
|
||||
if ((filter->id_types & asset_id_filter) == 0) {
|
||||
if (filter->id_types && (filter->id_types & asset_id_filter) == 0) {
|
||||
return false;
|
||||
}
|
||||
/* Not very efficient (O(n^2)), could be improved quite a bit. */
|
||||
|
@ -51,17 +51,6 @@ int ED_asset_handle_get_preview_icon_id(const AssetHandle *asset)
|
||||
return asset->file_data->preview_icon_id;
|
||||
}
|
||||
|
||||
std::optional<eAssetImportMethod> ED_asset_handle_get_import_method(
|
||||
const AssetHandle *asset_handle)
|
||||
{
|
||||
return AS_asset_representation_import_method_get(asset_handle->file_data->asset);
|
||||
}
|
||||
|
||||
blender::StringRefNull ED_asset_handle_get_library_relative_identifier(const AssetHandle &asset)
|
||||
{
|
||||
return AS_asset_representation_library_relative_identifier_get(asset.file_data->asset);
|
||||
}
|
||||
|
||||
void ED_asset_handle_get_full_library_path(const AssetHandle *asset_handle,
|
||||
char r_full_lib_path[FILE_MAX])
|
||||
{
|
||||
@ -75,8 +64,3 @@ void ED_asset_handle_get_full_library_path(const AssetHandle *asset_handle,
|
||||
|
||||
BLI_strncpy(r_full_lib_path, library_path.c_str(), FILE_MAX);
|
||||
}
|
||||
|
||||
bool ED_asset_handle_get_use_relative_path(const AssetHandle *asset)
|
||||
{
|
||||
return AS_asset_representation_use_relative_path_get(asset->file_data->asset);
|
||||
}
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include "BLI_utildefines.h"
|
||||
#include "BLI_vector_set.hh"
|
||||
|
||||
#include "BLT_translation.h"
|
||||
|
||||
#include "ED_curves.h"
|
||||
#include "ED_object.h"
|
||||
#include "ED_screen.h"
|
||||
@ -938,15 +940,15 @@ static void CURVES_OT_select_random(wmOperatorType *ot)
|
||||
1.0f);
|
||||
}
|
||||
|
||||
static int select_end_exec(bContext *C, wmOperator *op)
|
||||
static int select_ends_exec(bContext *C, wmOperator *op)
|
||||
{
|
||||
VectorSet<Curves *> unique_curves = curves::get_unique_editable_curves(*C);
|
||||
const bool end_points = RNA_boolean_get(op->ptr, "end_points");
|
||||
const int amount = RNA_int_get(op->ptr, "amount");
|
||||
const int amount_start = RNA_int_get(op->ptr, "amount_start");
|
||||
const int amount_end = RNA_int_get(op->ptr, "amount_end");
|
||||
|
||||
for (Curves *curves_id : unique_curves) {
|
||||
CurvesGeometry &curves = curves_id->geometry.wrap();
|
||||
select_ends(curves, amount, end_points);
|
||||
select_ends(curves, amount_start, amount_end);
|
||||
|
||||
/* Use #ID_RECALC_GEOMETRY instead of #ID_RECALC_SELECT because it is handled as a generic
|
||||
* attribute for now. */
|
||||
@ -957,24 +959,48 @@ static int select_end_exec(bContext *C, wmOperator *op)
|
||||
return OPERATOR_FINISHED;
|
||||
}
|
||||
|
||||
static void CURVES_OT_select_end(wmOperatorType *ot)
|
||||
static void select_ends_ui(bContext * /*C*/, wmOperator *op)
|
||||
{
|
||||
ot->name = "Select End";
|
||||
uiLayout *layout = op->layout;
|
||||
|
||||
uiLayoutSetPropSep(layout, true);
|
||||
|
||||
uiLayout *col = uiLayoutColumn(layout, true);
|
||||
uiLayoutSetPropDecorate(col, false);
|
||||
uiItemR(col, op->ptr, "amount_start", 0, IFACE_("Amount Start"), ICON_NONE);
|
||||
uiItemR(col, op->ptr, "amount_end", 0, IFACE_("End"), ICON_NONE);
|
||||
}
|
||||
|
||||
static void CURVES_OT_select_ends(wmOperatorType *ot)
|
||||
{
|
||||
ot->name = "Select Ends";
|
||||
ot->idname = __func__;
|
||||
ot->description = "Select end points of curves";
|
||||
|
||||
ot->exec = select_end_exec;
|
||||
ot->exec = select_ends_exec;
|
||||
ot->ui = select_ends_ui;
|
||||
ot->poll = editable_curves_point_domain_poll;
|
||||
|
||||
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
|
||||
|
||||
RNA_def_boolean(ot->srna,
|
||||
"end_points",
|
||||
true,
|
||||
"End Points",
|
||||
"Select points at the end of the curve as opposed to the beginning");
|
||||
RNA_def_int(
|
||||
ot->srna, "amount", 1, 0, INT32_MAX, "Amount", "Number of points to select", 0, INT32_MAX);
|
||||
RNA_def_int(ot->srna,
|
||||
"amount_start",
|
||||
0,
|
||||
0,
|
||||
INT32_MAX,
|
||||
"Amount Front",
|
||||
"Number of points to select from the front",
|
||||
0,
|
||||
INT32_MAX);
|
||||
RNA_def_int(ot->srna,
|
||||
"amount_end",
|
||||
1,
|
||||
0,
|
||||
INT32_MAX,
|
||||
"Amount Back",
|
||||
"Number of points to select from the back",
|
||||
0,
|
||||
INT32_MAX);
|
||||
}
|
||||
|
||||
static int select_linked_exec(bContext *C, wmOperator * /*op*/)
|
||||
@ -1181,7 +1207,7 @@ void ED_operatortypes_curves()
|
||||
WM_operatortype_append(CURVES_OT_set_selection_domain);
|
||||
WM_operatortype_append(CURVES_OT_select_all);
|
||||
WM_operatortype_append(CURVES_OT_select_random);
|
||||
WM_operatortype_append(CURVES_OT_select_end);
|
||||
WM_operatortype_append(CURVES_OT_select_ends);
|
||||
WM_operatortype_append(CURVES_OT_select_linked);
|
||||
WM_operatortype_append(CURVES_OT_select_more);
|
||||
WM_operatortype_append(CURVES_OT_select_less);
|
||||
|
@ -222,7 +222,7 @@ void select_all(bke::CurvesGeometry &curves, const eAttrDomain selection_domain,
|
||||
}
|
||||
}
|
||||
|
||||
void select_ends(bke::CurvesGeometry &curves, int amount, bool end_points)
|
||||
void select_ends(bke::CurvesGeometry &curves, int amount_start, int amount_end)
|
||||
{
|
||||
const bool was_anything_selected = has_anything_selected(curves);
|
||||
const OffsetIndices points_by_curve = curves.points_by_curve();
|
||||
@ -240,12 +240,9 @@ void select_ends(bke::CurvesGeometry &curves, int amount, bool end_points)
|
||||
MutableSpan<T> selection_typed = selection.span.typed<T>();
|
||||
threading::parallel_for(curves.curves_range(), 256, [&](const IndexRange range) {
|
||||
for (const int curve_i : range) {
|
||||
if (end_points) {
|
||||
selection_typed.slice(points_by_curve[curve_i].drop_back(amount)).fill(T(0));
|
||||
}
|
||||
else {
|
||||
selection_typed.slice(points_by_curve[curve_i].drop_front(amount)).fill(T(0));
|
||||
}
|
||||
selection_typed
|
||||
.slice(points_by_curve[curve_i].drop_front(amount_start).drop_back(amount_end))
|
||||
.fill(T(0));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -150,10 +150,10 @@ void select_all(bke::CurvesGeometry &curves, eAttrDomain selection_domain, int a
|
||||
/**
|
||||
* Select the ends (front or back) of all the curves.
|
||||
*
|
||||
* \param amount: The amount of points to select from the front or back.
|
||||
* \param end_points: If true, select the last point(s), if false, select the first point(s).
|
||||
* \param amount_start: The amount of points to select from the front.
|
||||
* \param amount_end: The amount of points to select from the back.
|
||||
*/
|
||||
void select_ends(bke::CurvesGeometry &curves, int amount, bool end_points);
|
||||
void select_ends(bke::CurvesGeometry &curves, int amount_start, int amount_end);
|
||||
|
||||
/**
|
||||
* Select the points of all curves that have at least one point selected.
|
||||
|
@ -1056,7 +1056,6 @@ void ED_view3d_check_mats_rv3d(struct RegionView3D *rv3d);
|
||||
struct RV3DMatrixStore *ED_view3d_mats_rv3d_backup(struct RegionView3D *rv3d);
|
||||
void ED_view3d_mats_rv3d_restore(struct RegionView3D *rv3d, struct RV3DMatrixStore *rv3dmat);
|
||||
|
||||
|
||||
struct RenderEngineType *ED_view3d_engine_type(const struct Scene *scene, int drawtype);
|
||||
|
||||
bool ED_view3d_context_activate(struct bContext *C);
|
||||
|
@ -226,8 +226,6 @@ void ED_operatortypes_object(void)
|
||||
WM_operatortype_append(OBJECT_OT_vertex_weight_normalize_active_vertex);
|
||||
WM_operatortype_append(OBJECT_OT_vertex_weight_copy);
|
||||
|
||||
|
||||
|
||||
WM_operatortype_append(TRANSFORM_OT_vertex_warp);
|
||||
|
||||
WM_operatortype_append(OBJECT_OT_move_to_collection);
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "testing/testing.h"
|
||||
|
||||
#include "GPU_context.h"
|
||||
#include "GPU_framebuffer.h"
|
||||
#include "gpu_testing.hh"
|
||||
|
||||
@ -199,4 +200,52 @@ static void test_framebuffer_scissor_test()
|
||||
}
|
||||
GPU_TEST(framebuffer_scissor_test);
|
||||
|
||||
/* Color each side of a cubemap with a different color. */
|
||||
static void test_framebuffer_cube()
|
||||
{
|
||||
const int SIZE = 32;
|
||||
GPU_render_begin();
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
|
||||
GPUTexture *tex = GPU_texture_create_cube("tex", SIZE, 1, GPU_RGBA32F, usage, nullptr);
|
||||
|
||||
const float4 clear_colors[6] = {
|
||||
{0.5f, 0.0f, 0.0f, 1.0f},
|
||||
{1.0f, 0.0f, 0.0f, 1.0f},
|
||||
{0.0f, 0.5f, 0.0f, 1.0f},
|
||||
{0.0f, 1.0f, 0.0f, 1.0f},
|
||||
{0.0f, 0.0f, 0.5f, 1.0f},
|
||||
{0.0f, 0.0f, 1.0f, 1.0f},
|
||||
};
|
||||
GPUFrameBuffer *framebuffers[6] = {nullptr};
|
||||
|
||||
for (int i : IndexRange(6)) {
|
||||
GPU_framebuffer_ensure_config(&framebuffers[i],
|
||||
{
|
||||
GPU_ATTACHMENT_NONE,
|
||||
GPU_ATTACHMENT_TEXTURE_CUBEFACE(tex, i),
|
||||
});
|
||||
GPU_framebuffer_bind(framebuffers[i]);
|
||||
GPU_framebuffer_clear_color(framebuffers[i], clear_colors[i]);
|
||||
};
|
||||
|
||||
float4 *data = (float4 *)GPU_texture_read(tex, GPU_DATA_FLOAT, 0);
|
||||
for (int side : IndexRange(6)) {
|
||||
for (int pixel_index : IndexRange(SIZE * SIZE)) {
|
||||
int index = pixel_index + (SIZE * SIZE) * side;
|
||||
EXPECT_EQ(clear_colors[side], data[index]);
|
||||
}
|
||||
}
|
||||
MEM_freeN(data);
|
||||
|
||||
GPU_texture_free(tex);
|
||||
|
||||
for (int i : IndexRange(6)) {
|
||||
GPU_FRAMEBUFFER_FREE_SAFE(framebuffers[i]);
|
||||
}
|
||||
|
||||
GPU_render_end();
|
||||
}
|
||||
GPU_TEST(framebuffer_cube)
|
||||
|
||||
} // namespace blender::gpu::tests
|
||||
|
@ -63,6 +63,52 @@ static void test_texture_read()
|
||||
}
|
||||
GPU_TEST(texture_read)
|
||||
|
||||
static void test_texture_cube()
|
||||
{
|
||||
const int SIZE = 32;
|
||||
GPU_render_begin();
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
|
||||
GPUTexture *tex = GPU_texture_create_cube("tex", SIZE, 1, GPU_RGBA32F, usage, nullptr);
|
||||
float4 clear_color(1.0f, 0.5f, 0.2f, 1.0f);
|
||||
GPU_texture_clear(tex, GPU_DATA_FLOAT, clear_color);
|
||||
|
||||
float4 *data = (float4 *)GPU_texture_read(tex, GPU_DATA_FLOAT, 0);
|
||||
for (int index : IndexRange(SIZE * SIZE * 6)) {
|
||||
EXPECT_EQ(clear_color, data[index]);
|
||||
}
|
||||
MEM_freeN(data);
|
||||
|
||||
GPU_texture_free(tex);
|
||||
|
||||
GPU_render_end();
|
||||
}
|
||||
GPU_TEST(texture_cube)
|
||||
|
||||
static void test_texture_cube_array()
|
||||
{
|
||||
const int SIZE = 32;
|
||||
const int ARRAY = 2;
|
||||
GPU_render_begin();
|
||||
|
||||
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
|
||||
GPUTexture *tex = GPU_texture_create_cube_array(
|
||||
"tex", SIZE, ARRAY, 1, GPU_RGBA32F, usage, nullptr);
|
||||
float4 clear_color(1.0f, 0.5f, 0.2f, 1.0f);
|
||||
GPU_texture_clear(tex, GPU_DATA_FLOAT, clear_color);
|
||||
|
||||
float4 *data = (float4 *)GPU_texture_read(tex, GPU_DATA_FLOAT, 0);
|
||||
for (int index : IndexRange(SIZE * SIZE * 6 * ARRAY)) {
|
||||
EXPECT_EQ(clear_color, data[index]);
|
||||
}
|
||||
MEM_freeN(data);
|
||||
|
||||
GPU_texture_free(tex);
|
||||
|
||||
GPU_render_end();
|
||||
}
|
||||
GPU_TEST(texture_cube_array)
|
||||
|
||||
static void test_texture_copy()
|
||||
{
|
||||
const int SIZE = 128;
|
||||
|
@ -108,7 +108,10 @@ void VKBackend::compute_dispatch(int groups_x_len, int groups_y_len, int groups_
|
||||
command_buffer.dispatch(groups_x_len, groups_y_len, groups_z_len);
|
||||
}
|
||||
|
||||
void VKBackend::compute_dispatch_indirect(StorageBuf * /*indirect_buf*/) {}
|
||||
void VKBackend::compute_dispatch_indirect(StorageBuf * /*indirect_buf*/)
|
||||
{
|
||||
NOT_YET_IMPLEMENTED;
|
||||
}
|
||||
|
||||
Context *VKBackend::context_alloc(void *ghost_window, void *ghost_context)
|
||||
{
|
||||
|
@ -585,17 +585,22 @@ VkFormat to_vk_format(const GPUVertCompType type, const uint32_t size, GPUVertFe
|
||||
|
||||
VkImageType to_vk_image_type(const eGPUTextureType type)
|
||||
{
|
||||
/* See
|
||||
* https://vulkan.lunarg.com/doc/view/1.3.243.0/linux/1.3-extensions/vkspec.html#resources-image-views-compatibility
|
||||
* for reference */
|
||||
switch (type) {
|
||||
case GPU_TEXTURE_1D:
|
||||
case GPU_TEXTURE_BUFFER:
|
||||
case GPU_TEXTURE_1D_ARRAY:
|
||||
return VK_IMAGE_TYPE_1D;
|
||||
|
||||
case GPU_TEXTURE_2D:
|
||||
case GPU_TEXTURE_2D_ARRAY:
|
||||
return VK_IMAGE_TYPE_2D;
|
||||
case GPU_TEXTURE_3D:
|
||||
case GPU_TEXTURE_CUBE:
|
||||
case GPU_TEXTURE_CUBE_ARRAY:
|
||||
return VK_IMAGE_TYPE_2D;
|
||||
|
||||
case GPU_TEXTURE_3D:
|
||||
return VK_IMAGE_TYPE_3D;
|
||||
|
||||
case GPU_TEXTURE_ARRAY:
|
||||
@ -607,7 +612,7 @@ VkImageType to_vk_image_type(const eGPUTextureType type)
|
||||
return VK_IMAGE_TYPE_1D;
|
||||
}
|
||||
|
||||
VkImageViewType to_vk_image_view_type(const eGPUTextureType type)
|
||||
VkImageViewType to_vk_image_view_type(const eGPUTextureType type, const eImageViewUsage view_type)
|
||||
{
|
||||
switch (type) {
|
||||
case GPU_TEXTURE_1D:
|
||||
@ -618,13 +623,15 @@ VkImageViewType to_vk_image_view_type(const eGPUTextureType type)
|
||||
case GPU_TEXTURE_3D:
|
||||
return VK_IMAGE_VIEW_TYPE_3D;
|
||||
case GPU_TEXTURE_CUBE:
|
||||
return VK_IMAGE_VIEW_TYPE_CUBE;
|
||||
return view_type == eImageViewUsage::Attachment ? VK_IMAGE_VIEW_TYPE_2D_ARRAY :
|
||||
VK_IMAGE_VIEW_TYPE_CUBE;
|
||||
case GPU_TEXTURE_1D_ARRAY:
|
||||
return VK_IMAGE_VIEW_TYPE_1D_ARRAY;
|
||||
case GPU_TEXTURE_2D_ARRAY:
|
||||
return VK_IMAGE_VIEW_TYPE_2D_ARRAY;
|
||||
case GPU_TEXTURE_CUBE_ARRAY:
|
||||
return VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
|
||||
return view_type == eImageViewUsage::Attachment ? VK_IMAGE_VIEW_TYPE_2D_ARRAY :
|
||||
VK_IMAGE_VIEW_TYPE_CUBE_ARRAY;
|
||||
|
||||
case GPU_TEXTURE_ARRAY:
|
||||
/* GPU_TEXTURE_ARRAY should always be used together with 1D, 2D, or CUBE*/
|
||||
|
@ -23,13 +23,27 @@
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
/**
|
||||
* Based on the usage of an Image View a different image view type should be created.
|
||||
*
|
||||
* When using a GPU_TEXTURE_CUBE as an frame buffer attachment it will be used as a
|
||||
* GPU_TEXTURE_2D_ARRAY. eg only a single side of the cube map will be attached. But when bound as
|
||||
* a shader resource the cubemap will be used.
|
||||
*/
|
||||
enum class eImageViewUsage {
|
||||
/** Image View will be used as a bindable shader resource. */
|
||||
ShaderBinding,
|
||||
/** Image View will be used as an framebuffer attachment. */
|
||||
Attachment,
|
||||
};
|
||||
|
||||
VkImageAspectFlagBits to_vk_image_aspect_flag_bits(const eGPUTextureFormat format);
|
||||
VkFormat to_vk_format(const eGPUTextureFormat format);
|
||||
VkFormat to_vk_format(const GPUVertCompType type,
|
||||
const uint32_t size,
|
||||
const GPUVertFetchMode fetch_mode);
|
||||
VkComponentMapping to_vk_component_mapping(const eGPUTextureFormat format);
|
||||
VkImageViewType to_vk_image_view_type(const eGPUTextureType type);
|
||||
VkImageViewType to_vk_image_view_type(const eGPUTextureType type, eImageViewUsage view_type);
|
||||
VkImageType to_vk_image_type(const eGPUTextureType type);
|
||||
VkClearColorValue to_vk_clear_color_value(const eGPUDataFormat format, const void *data);
|
||||
VkIndexType to_vk_index_type(const GPUIndexBufType index_type);
|
||||
|
@ -404,7 +404,7 @@ void VKFrameBuffer::render_pass_create()
|
||||
/* Ensure texture is allocated to ensure the image view. */
|
||||
VKTexture &texture = *static_cast<VKTexture *>(unwrap(attachment.tex));
|
||||
texture.ensure_allocated();
|
||||
image_views_.append(VKImageView(texture, attachment.mip, name_));
|
||||
image_views_.append(VKImageView(texture, attachment.layer, attachment.mip, name_));
|
||||
image_views[attachment_location] = image_views_.last().vk_handle();
|
||||
|
||||
VkAttachmentDescription &attachment_description =
|
||||
|
@ -15,8 +15,8 @@
|
||||
|
||||
namespace blender::gpu {
|
||||
|
||||
VKImageView::VKImageView(VKTexture &texture, int mip_level, StringRefNull name)
|
||||
: vk_image_view_(create_vk_image_view(texture, mip_level, name))
|
||||
VKImageView::VKImageView(VKTexture &texture, int layer, int mip_level, StringRefNull name)
|
||||
: vk_image_view_(create_vk_image_view(texture, layer, mip_level, name))
|
||||
{
|
||||
BLI_assert(vk_image_view_ != VK_NULL_HANDLE);
|
||||
}
|
||||
@ -41,6 +41,7 @@ VKImageView::~VKImageView()
|
||||
}
|
||||
}
|
||||
VkImageView VKImageView::create_vk_image_view(VKTexture &texture,
|
||||
int layer,
|
||||
int mip_level,
|
||||
StringRefNull name)
|
||||
{
|
||||
@ -49,14 +50,15 @@ VkImageView VKImageView::create_vk_image_view(VKTexture &texture,
|
||||
VkImageViewCreateInfo image_view_info = {};
|
||||
image_view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
||||
image_view_info.image = texture.vk_image_handle();
|
||||
image_view_info.viewType = to_vk_image_view_type(texture.type_get());
|
||||
image_view_info.viewType = to_vk_image_view_type(texture.type_get(),
|
||||
eImageViewUsage::Attachment);
|
||||
image_view_info.format = to_vk_format(texture.format_get());
|
||||
image_view_info.components = to_vk_component_mapping(texture.format_get());
|
||||
image_view_info.subresourceRange.aspectMask = to_vk_image_aspect_flag_bits(texture.format_get());
|
||||
image_view_info.subresourceRange.baseMipLevel = mip_level;
|
||||
image_view_info.subresourceRange.levelCount = 1;
|
||||
image_view_info.subresourceRange.baseArrayLayer = 0;
|
||||
image_view_info.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
|
||||
image_view_info.subresourceRange.baseArrayLayer = layer == -1 ? 0 : layer;
|
||||
image_view_info.subresourceRange.layerCount = 1;
|
||||
|
||||
const VKDevice &device = VKBackend::get().device_get();
|
||||
VkImageView image_view = VK_NULL_HANDLE;
|
||||
|
@ -20,7 +20,7 @@ class VKImageView : NonCopyable {
|
||||
VkImageView vk_image_view_ = VK_NULL_HANDLE;
|
||||
|
||||
public:
|
||||
VKImageView(VKTexture &texture, int mip_level, StringRefNull name);
|
||||
VKImageView(VKTexture &texture, int layer, int mip_level, StringRefNull name);
|
||||
|
||||
/**
|
||||
* Wrap the given vk_image_view handle. Note that the vk_image_view handle ownership is
|
||||
@ -38,7 +38,10 @@ class VKImageView : NonCopyable {
|
||||
}
|
||||
|
||||
private:
|
||||
static VkImageView create_vk_image_view(VKTexture &texture, int mip_level, StringRefNull name);
|
||||
static VkImageView create_vk_image_view(VKTexture &texture,
|
||||
int layer,
|
||||
int mip_level,
|
||||
StringRefNull name);
|
||||
};
|
||||
|
||||
} // namespace blender::gpu
|
||||
|
@ -157,6 +157,15 @@ void VKTexture::mip_range_set(int min, int max)
|
||||
flags_ |= IMAGE_VIEW_DIRTY;
|
||||
}
|
||||
|
||||
int VKTexture::layer_count()
|
||||
{
|
||||
int layers = 1;
|
||||
if (ELEM(type_, GPU_TEXTURE_CUBE, GPU_TEXTURE_CUBE_ARRAY)) {
|
||||
layers = d_;
|
||||
}
|
||||
return layers;
|
||||
}
|
||||
|
||||
void VKTexture::read_sub(int mip, eGPUDataFormat format, const int area[4], void *r_data)
|
||||
{
|
||||
VKContext &context = *VKContext::get();
|
||||
@ -165,7 +174,7 @@ void VKTexture::read_sub(int mip, eGPUDataFormat format, const int area[4], void
|
||||
/* Vulkan images cannot be directly mapped to host memory and requires a staging buffer. */
|
||||
VKBuffer staging_buffer;
|
||||
|
||||
size_t sample_len = area[2] * area[3];
|
||||
size_t sample_len = area[2] * area[3] * layer_count();
|
||||
size_t device_memory_size = sample_len * to_bytesize(format_);
|
||||
|
||||
staging_buffer.create(
|
||||
@ -179,7 +188,7 @@ void VKTexture::read_sub(int mip, eGPUDataFormat format, const int area[4], void
|
||||
region.imageExtent.depth = 1;
|
||||
region.imageSubresource.aspectMask = to_vk_image_aspect_flag_bits(format_);
|
||||
region.imageSubresource.mipLevel = mip;
|
||||
region.imageSubresource.layerCount = 1;
|
||||
region.imageSubresource.layerCount = layer_count();
|
||||
|
||||
VKCommandBuffer &command_buffer = context.command_buffer_get();
|
||||
command_buffer.copy(staging_buffer, *this, Span<VkBufferImageCopy>(®ion, 1));
|
||||
@ -192,7 +201,7 @@ void *VKTexture::read(int mip, eGPUDataFormat format)
|
||||
{
|
||||
int mip_size[3] = {1, 1, 1};
|
||||
mip_size_get(mip, mip_size);
|
||||
size_t sample_len = mip_size[0] * mip_size[1];
|
||||
size_t sample_len = mip_size[0] * mip_size[1] * layer_count();
|
||||
size_t host_memory_size = sample_len * to_bytesize(format_, format);
|
||||
|
||||
void *data = MEM_mallocN(host_memory_size, __func__);
|
||||
@ -372,6 +381,17 @@ static VkImageUsageFlagBits to_vk_image_usage(const eGPUTextureUsage usage,
|
||||
return result;
|
||||
}
|
||||
|
||||
static VkImageCreateFlagBits to_vk_image_create(const eGPUTextureType texture_type)
|
||||
{
|
||||
VkImageCreateFlagBits result = static_cast<VkImageCreateFlagBits>(0);
|
||||
|
||||
if (ELEM(texture_type, GPU_TEXTURE_CUBE, GPU_TEXTURE_CUBE_ARRAY)) {
|
||||
result = static_cast<VkImageCreateFlagBits>(result | VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
bool VKTexture::allocate()
|
||||
{
|
||||
BLI_assert(vk_image_ == VK_NULL_HANDLE);
|
||||
@ -379,17 +399,23 @@ bool VKTexture::allocate()
|
||||
|
||||
int extent[3] = {1, 1, 1};
|
||||
mip_size_get(0, extent);
|
||||
int layers = 1;
|
||||
if (ELEM(type_, GPU_TEXTURE_CUBE, GPU_TEXTURE_CUBE_ARRAY)) {
|
||||
layers = extent[2];
|
||||
extent[2] = 1;
|
||||
}
|
||||
|
||||
VKContext &context = *VKContext::get();
|
||||
const VKDevice &device = VKBackend::get().device_get();
|
||||
VkImageCreateInfo image_info = {};
|
||||
image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
|
||||
image_info.flags = to_vk_image_create(type_);
|
||||
image_info.imageType = to_vk_image_type(type_);
|
||||
image_info.extent.width = extent[0];
|
||||
image_info.extent.height = extent[1];
|
||||
image_info.extent.depth = extent[2];
|
||||
image_info.mipLevels = max_ii(mipmaps_, 1);
|
||||
image_info.arrayLayers = 1;
|
||||
image_info.arrayLayers = layers;
|
||||
image_info.format = to_vk_format(format_);
|
||||
/* Some platforms (NVIDIA) requires that attached textures are always tiled optimal.
|
||||
*
|
||||
@ -536,14 +562,15 @@ void VKTexture::image_view_update()
|
||||
VkImageViewCreateInfo image_view_info = {};
|
||||
image_view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
||||
image_view_info.image = vk_image_;
|
||||
image_view_info.viewType = to_vk_image_view_type(type_);
|
||||
image_view_info.viewType = to_vk_image_view_type(type_, eImageViewUsage::ShaderBinding);
|
||||
image_view_info.format = to_vk_format(format_);
|
||||
image_view_info.components = to_vk_component_mapping(format_);
|
||||
image_view_info.subresourceRange.aspectMask = to_vk_image_aspect_flag_bits(format_);
|
||||
IndexRange mip_range = mip_map_range();
|
||||
image_view_info.subresourceRange.baseMipLevel = mip_range.first();
|
||||
image_view_info.subresourceRange.levelCount = mip_range.size();
|
||||
image_view_info.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
|
||||
image_view_info.subresourceRange.layerCount =
|
||||
ELEM(type_, GPU_TEXTURE_CUBE, GPU_TEXTURE_CUBE_ARRAY) ? d_ : VK_REMAINING_ARRAY_LAYERS;
|
||||
|
||||
const VKDevice &device = VKBackend::get().device_get();
|
||||
VkImageView image_view = VK_NULL_HANDLE;
|
||||
|
@ -86,6 +86,8 @@ class VKTexture : public Texture {
|
||||
*/
|
||||
bool allocate();
|
||||
|
||||
int layer_count();
|
||||
|
||||
VkImageViewType vk_image_view_type() const;
|
||||
|
||||
/* -------------------------------------------------------------------- */
|
||||
|
@ -357,7 +357,6 @@ static void rna_AssetHandle_file_data_set(PointerRNA *ptr,
|
||||
static void rna_AssetHandle_get_full_library_path(
|
||||
// AssetHandle *asset,
|
||||
FileDirEntry *asset_file,
|
||||
AssetLibraryReference *UNUSED(asset_library), /* Deprecated. */
|
||||
char r_result[/*FILE_MAX_LIBEXTRA*/])
|
||||
{
|
||||
AssetHandle asset = {.file_data = asset_file};
|
||||
@ -537,13 +536,6 @@ static void rna_def_asset_handle_api(StructRNA *srna)
|
||||
RNA_def_function_flag(func, FUNC_NO_SELF);
|
||||
parm = RNA_def_pointer(func, "asset_file_handle", "FileSelectEntry", "", "");
|
||||
RNA_def_parameter_flags(parm, 0, PARM_REQUIRED);
|
||||
RNA_def_pointer(
|
||||
func,
|
||||
"asset_library_ref",
|
||||
"AssetLibraryReference",
|
||||
"",
|
||||
"The asset library containing the given asset. Deprecated and optional argument, will be "
|
||||
"ignored. Kept for API compatibility only");
|
||||
parm = RNA_def_string(func, "result", NULL, FILE_MAX_LIBEXTRA, "result", "");
|
||||
RNA_def_parameter_flags(parm, PROP_THICK_WRAP, 0);
|
||||
RNA_def_function_output(func, parm);
|
||||
|
@ -72,7 +72,9 @@
|
||||
|
||||
struct Vert2GeomData {
|
||||
/* Read-only data */
|
||||
float (*v_cos)[3];
|
||||
const float (*v_cos)[3];
|
||||
|
||||
const int *indices;
|
||||
|
||||
const SpaceTransform *loc2trgt;
|
||||
|
||||
@ -106,7 +108,7 @@ static void vert2geom_task_cb_ex(void *__restrict userdata,
|
||||
int i;
|
||||
|
||||
/* Convert the vertex to tree coordinates. */
|
||||
copy_v3_v3(tmp_co, data->v_cos[iter]);
|
||||
copy_v3_v3(tmp_co, data->v_cos[data->indices ? data->indices[iter] : iter]);
|
||||
BLI_space_transform_apply(data->loc2trgt, tmp_co);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(data->dist); i++) {
|
||||
@ -145,7 +147,8 @@ static void vert2geom_task_cb_ex(void *__restrict userdata,
|
||||
* Find nearest vertex and/or edge and/or face, for each vertex (adapted from shrinkwrap.c).
|
||||
*/
|
||||
static void get_vert2geom_distance(int verts_num,
|
||||
float (*v_cos)[3],
|
||||
const float (*v_cos)[3],
|
||||
const int *indices,
|
||||
float *dist_v,
|
||||
float *dist_e,
|
||||
float *dist_f,
|
||||
@ -185,6 +188,7 @@ static void get_vert2geom_distance(int verts_num,
|
||||
}
|
||||
|
||||
data.v_cos = v_cos;
|
||||
data.indices = indices;
|
||||
data.loc2trgt = loc2trgt;
|
||||
data.treeData[0] = &treeData_v;
|
||||
data.treeData[1] = &treeData_e;
|
||||
@ -215,8 +219,12 @@ static void get_vert2geom_distance(int verts_num,
|
||||
* Returns the real distance between a vertex and another reference object.
|
||||
* Note that it works in final world space (i.e. with constraints etc. applied).
|
||||
*/
|
||||
static void get_vert2ob_distance(
|
||||
int verts_num, float (*v_cos)[3], float *dist, Object *ob, Object *obr)
|
||||
static void get_vert2ob_distance(int verts_num,
|
||||
const float (*v_cos)[3],
|
||||
const int *indices,
|
||||
float *dist,
|
||||
Object *ob,
|
||||
Object *obr)
|
||||
{
|
||||
/* Vertex and ref object coordinates. */
|
||||
float v_wco[3];
|
||||
@ -224,7 +232,7 @@ static void get_vert2ob_distance(
|
||||
|
||||
while (i-- > 0) {
|
||||
/* Get world-coordinates of the vertex (constraints and anim included). */
|
||||
mul_v3_m4v3(v_wco, ob->object_to_world, v_cos[i]);
|
||||
mul_v3_m4v3(v_wco, ob->object_to_world, v_cos[indices ? indices[i] : i]);
|
||||
/* Return distance between both coordinates. */
|
||||
dist[i] = len_v3v3(v_wco, obr->object_to_world[3]);
|
||||
}
|
||||
@ -421,7 +429,7 @@ static Mesh *modifyMesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh *
|
||||
|
||||
WeightVGProximityModifierData *wmd = (WeightVGProximityModifierData *)md;
|
||||
MDeformWeight **dw, **tdw;
|
||||
float(*v_cos)[3] = nullptr; /* The vertices coordinates. */
|
||||
const float(*v_cos)[3] = nullptr; /* The vertices coordinates. */
|
||||
Object *ob = ctx->object;
|
||||
Object *obr = nullptr; /* Our target object. */
|
||||
int defgrp_index;
|
||||
@ -516,16 +524,7 @@ static Mesh *modifyMesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh *
|
||||
MEM_freeN(tidx);
|
||||
|
||||
/* Get our vertex coordinates. */
|
||||
if (index_num != verts_num) {
|
||||
const float(*tv_cos)[3] = BKE_mesh_vert_positions(mesh);
|
||||
v_cos = static_cast<float(*)[3]>(MEM_malloc_arrayN(index_num, sizeof(float[3]), __func__));
|
||||
for (i = 0; i < index_num; i++) {
|
||||
copy_v3_v3(v_cos[i], tv_cos[indices[i]]);
|
||||
}
|
||||
}
|
||||
else {
|
||||
v_cos = BKE_mesh_vert_coords_alloc(mesh, nullptr);
|
||||
}
|
||||
v_cos = BKE_mesh_vert_positions(mesh);
|
||||
|
||||
/* Compute wanted distances. */
|
||||
if (wmd->proximity_mode == MOD_WVG_PROXIMITY_OBJECT) {
|
||||
@ -562,7 +561,7 @@ static Mesh *modifyMesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh *
|
||||
|
||||
BLI_SPACE_TRANSFORM_SETUP(&loc2trgt, ob, obr);
|
||||
get_vert2geom_distance(
|
||||
index_num, v_cos, dists_v, dists_e, dists_f, target_mesh, &loc2trgt);
|
||||
index_num, v_cos, indices, dists_v, dists_e, dists_f, target_mesh, &loc2trgt);
|
||||
for (i = 0; i < index_num; i++) {
|
||||
new_w[i] = dists_v ? dists_v[i] : FLT_MAX;
|
||||
if (dists_e) {
|
||||
@ -579,11 +578,11 @@ static Mesh *modifyMesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh *
|
||||
}
|
||||
/* Else, fall back to default obj2vert behavior. */
|
||||
else {
|
||||
get_vert2ob_distance(index_num, v_cos, new_w, ob, obr);
|
||||
get_vert2ob_distance(index_num, v_cos, indices, new_w, ob, obr);
|
||||
}
|
||||
}
|
||||
else {
|
||||
get_vert2ob_distance(index_num, v_cos, new_w, ob, obr);
|
||||
get_vert2ob_distance(index_num, v_cos, indices, new_w, ob, obr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -632,7 +631,6 @@ static Mesh *modifyMesh(ModifierData *md, const ModifierEvalContext *ctx, Mesh *
|
||||
MEM_freeN(org_w);
|
||||
MEM_freeN(new_w);
|
||||
MEM_freeN(dw);
|
||||
MEM_freeN(v_cos);
|
||||
MEM_SAFE_FREE(indices);
|
||||
|
||||
#ifdef USE_TIMEIT
|
||||
|
@ -825,36 +825,47 @@ class RenderLayerOperation : public NodeOperation {
|
||||
void execute() override
|
||||
{
|
||||
const int view_layer = bnode().custom1;
|
||||
GPUTexture *pass_texture = context().get_input_texture(view_layer, RE_PASSNAME_COMBINED);
|
||||
GPUTexture *combined_texture = context().get_input_texture(view_layer, RE_PASSNAME_COMBINED);
|
||||
|
||||
execute_image(pass_texture);
|
||||
execute_alpha(pass_texture);
|
||||
execute_pass("Image", combined_texture, "compositor_read_pass_color");
|
||||
execute_pass("Alpha", combined_texture, "compositor_read_pass_alpha");
|
||||
|
||||
/* Other output passes are not supported for now, so allocate them as invalid. */
|
||||
for (const bNodeSocket *output : this->node()->output_sockets()) {
|
||||
if (!STR_ELEM(output->identifier, "Image", "Alpha")) {
|
||||
Result &unsupported_result = get_result(output->identifier);
|
||||
if (unsupported_result.should_compute()) {
|
||||
unsupported_result.allocate_invalid();
|
||||
context().set_info_message("Viewport compositor setup not fully supported");
|
||||
}
|
||||
if (STR_ELEM(output->identifier, "Image", "Alpha")) {
|
||||
continue;
|
||||
}
|
||||
|
||||
GPUTexture *pass_texture = context().get_input_texture(view_layer, output->identifier);
|
||||
if (output->type == SOCK_FLOAT) {
|
||||
execute_pass(output->identifier, pass_texture, "compositor_read_pass_float");
|
||||
}
|
||||
else if (output->type == SOCK_VECTOR) {
|
||||
execute_pass(output->identifier, pass_texture, "compositor_read_pass_vector");
|
||||
}
|
||||
else if (output->type == SOCK_RGBA) {
|
||||
execute_pass(output->identifier, pass_texture, "compositor_read_pass_color");
|
||||
}
|
||||
else {
|
||||
BLI_assert_unreachable();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void execute_image(GPUTexture *pass_texture)
|
||||
void execute_pass(const char *pass_name, GPUTexture *pass_texture, const char *shader_name)
|
||||
{
|
||||
Result &image_result = get_result("Image");
|
||||
Result &image_result = get_result(pass_name);
|
||||
if (!image_result.should_compute()) {
|
||||
return;
|
||||
}
|
||||
if (pass_texture == nullptr) {
|
||||
/* Pass not rendered (yet). */
|
||||
/* Pass not rendered yet, or not supported by viewport. */
|
||||
image_result.allocate_invalid();
|
||||
context().set_info_message("Viewport compositor setup not fully supported");
|
||||
return;
|
||||
}
|
||||
|
||||
GPUShader *shader = shader_manager().get("compositor_read_pass");
|
||||
GPUShader *shader = shader_manager().get(shader_name);
|
||||
GPU_shader_bind(shader);
|
||||
|
||||
/* The compositing space might be limited to a subset of the pass texture, so only read that
|
||||
@ -876,41 +887,6 @@ class RenderLayerOperation : public NodeOperation {
|
||||
GPU_texture_unbind(pass_texture);
|
||||
image_result.unbind_as_image();
|
||||
}
|
||||
|
||||
void execute_alpha(GPUTexture *pass_texture)
|
||||
{
|
||||
Result &alpha_result = get_result("Alpha");
|
||||
if (!alpha_result.should_compute()) {
|
||||
return;
|
||||
}
|
||||
if (pass_texture == nullptr) {
|
||||
/* Pass not rendered (yet). */
|
||||
alpha_result.allocate_invalid();
|
||||
return;
|
||||
}
|
||||
|
||||
GPUShader *shader = shader_manager().get("compositor_read_pass_alpha");
|
||||
GPU_shader_bind(shader);
|
||||
|
||||
/* The compositing space might be limited to a subset of the pass texture, so only read that
|
||||
* compositing region into an appropriately sized texture. */
|
||||
const rcti compositing_region = context().get_compositing_region();
|
||||
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
|
||||
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
|
||||
|
||||
const int input_unit = GPU_shader_get_sampler_binding(shader, "input_tx");
|
||||
GPU_texture_bind(pass_texture, input_unit);
|
||||
|
||||
const int2 compositing_region_size = context().get_compositing_region_size();
|
||||
alpha_result.allocate_texture(Domain(compositing_region_size));
|
||||
alpha_result.bind_as_image(shader, "output_img");
|
||||
|
||||
compute_dispatch_threads_at_least(shader, compositing_region_size);
|
||||
|
||||
GPU_shader_unbind();
|
||||
GPU_texture_unbind(pass_texture);
|
||||
alpha_result.unbind_as_image();
|
||||
}
|
||||
};
|
||||
|
||||
static NodeOperation *get_compositor_operation(Context &context, DNode node)
|
||||
|
@ -77,7 +77,7 @@ class ViewerOperation : public NodeOperation {
|
||||
const Result &second_image = get_input("Image_001");
|
||||
second_image.bind_as_texture(shader, "second_image_tx");
|
||||
|
||||
GPUTexture *output_texture = context().get_output_texture();
|
||||
GPUTexture *output_texture = context().get_viewer_output_texture();
|
||||
const int image_unit = GPU_shader_get_sampler_binding(shader, "output_img");
|
||||
GPU_texture_image_bind(output_texture, image_unit);
|
||||
|
||||
|
@ -105,7 +105,7 @@ class ViewerOperation : public NodeOperation {
|
||||
color.w = alpha.get_float_value();
|
||||
}
|
||||
|
||||
GPU_texture_clear(context().get_output_texture(), GPU_DATA_FLOAT, color);
|
||||
GPU_texture_clear(context().get_viewer_output_texture(), GPU_DATA_FLOAT, color);
|
||||
}
|
||||
|
||||
/* Executes when the alpha channel of the image is ignored. */
|
||||
@ -123,7 +123,7 @@ class ViewerOperation : public NodeOperation {
|
||||
const Result &image = get_input("Image");
|
||||
image.bind_as_texture(shader, "input_tx");
|
||||
|
||||
GPUTexture *output_texture = context().get_output_texture();
|
||||
GPUTexture *output_texture = context().get_viewer_output_texture();
|
||||
const int image_unit = GPU_shader_get_sampler_binding(shader, "output_img");
|
||||
GPU_texture_image_bind(output_texture, image_unit);
|
||||
|
||||
@ -151,7 +151,7 @@ class ViewerOperation : public NodeOperation {
|
||||
const Result &image = get_input("Image");
|
||||
image.bind_as_texture(shader, "input_tx");
|
||||
|
||||
GPUTexture *output_texture = context().get_output_texture();
|
||||
GPUTexture *output_texture = context().get_viewer_output_texture();
|
||||
const int image_unit = GPU_shader_get_sampler_binding(shader, "output_img");
|
||||
GPU_texture_image_bind(output_texture, image_unit);
|
||||
|
||||
@ -181,7 +181,7 @@ class ViewerOperation : public NodeOperation {
|
||||
const Result &alpha = get_input("Alpha");
|
||||
alpha.bind_as_texture(shader, "alpha_tx");
|
||||
|
||||
GPUTexture *output_texture = context().get_output_texture();
|
||||
GPUTexture *output_texture = context().get_viewer_output_texture();
|
||||
const int image_unit = GPU_shader_get_sampler_binding(shader, "output_img");
|
||||
GPU_texture_image_bind(output_texture, image_unit);
|
||||
|
||||
|
@ -2,9 +2,13 @@
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later */
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "BLI_threads.h"
|
||||
#include "BLI_vector.hh"
|
||||
|
||||
#include "MEM_guardedalloc.h"
|
||||
|
||||
#include "BKE_global.h"
|
||||
#include "BKE_image.h"
|
||||
#include "BKE_node.hh"
|
||||
@ -12,6 +16,9 @@
|
||||
|
||||
#include "DRW_engine.h"
|
||||
|
||||
#include "IMB_colormanagement.h"
|
||||
#include "IMB_imbuf.h"
|
||||
|
||||
#include "COM_context.hh"
|
||||
#include "COM_evaluator.hh"
|
||||
|
||||
@ -63,6 +70,9 @@ class Context : public realtime_compositor::Context {
|
||||
/* Output combined texture. */
|
||||
GPUTexture *output_texture_ = nullptr;
|
||||
|
||||
/* Viewer output texture. */
|
||||
GPUTexture *viewer_output_texture_ = nullptr;
|
||||
|
||||
public:
|
||||
Context(const Scene &scene,
|
||||
const RenderData &render_data,
|
||||
@ -81,7 +91,8 @@ class Context : public realtime_compositor::Context {
|
||||
|
||||
virtual ~Context()
|
||||
{
|
||||
GPU_texture_free(output_texture_);
|
||||
GPU_TEXTURE_FREE_SAFE(output_texture_);
|
||||
GPU_TEXTURE_FREE_SAFE(viewer_output_texture_);
|
||||
}
|
||||
|
||||
const bNodeTree &get_node_tree() const override
|
||||
@ -94,6 +105,11 @@ class Context : public realtime_compositor::Context {
|
||||
return use_file_output_;
|
||||
}
|
||||
|
||||
bool use_composite_output() const override
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool use_texture_color_management() const override
|
||||
{
|
||||
return BKE_scene_check_color_management_enabled(&scene_);
|
||||
@ -121,7 +137,7 @@ class Context : public realtime_compositor::Context {
|
||||
|
||||
GPUTexture *get_output_texture() override
|
||||
{
|
||||
/* TODO: support outputting for viewers and previews.
|
||||
/* TODO: support outputting for previews.
|
||||
* TODO: just a temporary hack, needs to get stored in RenderResult,
|
||||
* once that supports GPU buffers. */
|
||||
if (output_texture_ == nullptr) {
|
||||
@ -138,6 +154,25 @@ class Context : public realtime_compositor::Context {
|
||||
return output_texture_;
|
||||
}
|
||||
|
||||
GPUTexture *get_viewer_output_texture() override
|
||||
{
|
||||
/* TODO: support outputting previews.
|
||||
* TODO: just a temporary hack, needs to get stored in RenderResult,
|
||||
* once that supports GPU buffers. */
|
||||
if (viewer_output_texture_ == nullptr) {
|
||||
const int2 size = get_render_size();
|
||||
viewer_output_texture_ = GPU_texture_create_2d("compositor_viewer_output_texture",
|
||||
size.x,
|
||||
size.y,
|
||||
1,
|
||||
GPU_RGBA16F,
|
||||
GPU_TEXTURE_USAGE_GENERAL,
|
||||
NULL);
|
||||
}
|
||||
|
||||
return viewer_output_texture_;
|
||||
}
|
||||
|
||||
GPUTexture *get_input_texture(int view_layer_id, const char *pass_name) override
|
||||
{
|
||||
/* TODO: eventually this should get cached on the RenderResult itself when
|
||||
@ -224,6 +259,10 @@ class Context : public realtime_compositor::Context {
|
||||
|
||||
void output_to_render_result()
|
||||
{
|
||||
if (!output_texture_) {
|
||||
return;
|
||||
}
|
||||
|
||||
Render *re = RE_GetSceneRender(&scene_);
|
||||
RenderResult *rr = RE_AcquireResultWrite(re);
|
||||
|
||||
@ -253,6 +292,55 @@ class Context : public realtime_compositor::Context {
|
||||
BKE_image_signal(G.main, image, nullptr, IMA_SIGNAL_FREE);
|
||||
BLI_thread_unlock(LOCK_DRAW_IMAGE);
|
||||
}
|
||||
|
||||
void viewer_output_to_viewer_image()
|
||||
{
|
||||
if (!viewer_output_texture_) {
|
||||
return;
|
||||
}
|
||||
|
||||
Image *image = BKE_image_ensure_viewer(G.main, IMA_TYPE_COMPOSITE, "Viewer Node");
|
||||
|
||||
ImageUser image_user = {0};
|
||||
image_user.multi_index = BKE_scene_multiview_view_id_get(&render_data_, view_name_);
|
||||
|
||||
if (BKE_scene_multiview_is_render_view_first(&render_data_, view_name_)) {
|
||||
BKE_image_ensure_viewer_views(&render_data_, image, &image_user);
|
||||
}
|
||||
|
||||
BLI_thread_lock(LOCK_DRAW_IMAGE);
|
||||
|
||||
void *lock;
|
||||
ImBuf *image_buffer = BKE_image_acquire_ibuf(image, &image_user, &lock);
|
||||
|
||||
const int2 render_size = get_render_size();
|
||||
if (image_buffer->x != render_size.x || image_buffer->y != render_size.y) {
|
||||
imb_freerectImBuf(image_buffer);
|
||||
imb_freerectfloatImBuf(image_buffer);
|
||||
IMB_freezbuffloatImBuf(image_buffer);
|
||||
image_buffer->x = render_size.x;
|
||||
image_buffer->y = render_size.y;
|
||||
imb_addrectfloatImBuf(image_buffer, 4);
|
||||
image_buffer->userflags |= IB_DISPLAY_BUFFER_INVALID;
|
||||
}
|
||||
|
||||
BKE_image_release_ibuf(image, image_buffer, lock);
|
||||
BLI_thread_unlock(LOCK_DRAW_IMAGE);
|
||||
|
||||
GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE);
|
||||
float *output_buffer = (float *)GPU_texture_read(viewer_output_texture_, GPU_DATA_FLOAT, 0);
|
||||
|
||||
std::memcpy(image_buffer->float_buffer.data,
|
||||
output_buffer,
|
||||
render_size.x * render_size.y * 4 * sizeof(float));
|
||||
|
||||
MEM_freeN(output_buffer);
|
||||
|
||||
BKE_image_partial_update_mark_full_update(image);
|
||||
if (node_tree_.runtime->update_draw) {
|
||||
node_tree_.runtime->update_draw(node_tree_.runtime->udh);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
/* Render Realtime Compositor */
|
||||
@ -289,6 +377,7 @@ void RealtimeCompositor::execute()
|
||||
DRW_render_context_enable(&render_);
|
||||
evaluator_->evaluate();
|
||||
context_->output_to_render_result();
|
||||
context_->viewer_output_to_viewer_image();
|
||||
DRW_render_context_disable(&render_);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user