Realtime Compositor: Add node previews for shader operations #109250

Merged
Omar Emara merged 3 commits from OmarEmaraDev/blender:shader-operation-preview into main 2023-06-23 18:46:25 +02:00
6 changed files with 171 additions and 113 deletions

View File

@ -45,10 +45,7 @@ class NodeOperation : public Operation {
void compute_results_reference_counts(const Schedule &schedule);
protected:
/* Compute a preview for the operation and set to the bNodePreview of the node. This is only done
* for nodes which enables previews, are not hidden, and are part of the active node context. The
* preview is computed as a lower resolution version of the output of the get_preview_result
* method. */
/* Compute a node preview using the result returned from the get_preview_result method. */
void compute_preview() override;
/* Returns a reference to the derived node that this operation represents. */
@ -67,10 +64,6 @@ class NodeOperation : public Operation {
* guaranteed not to be returned, since the node will always either have a linked output or an
* allocated input. */
Result *get_preview_result();
/* Resize the give input result to the given preview size and set it to the preview buffer after
* applying the necessary color management processor.*/
void write_preview_from_result(bNodePreview &preview, Result &input_result);
};
} // namespace blender::realtime_compositor

View File

@ -112,6 +112,9 @@ class ShaderOperation : public Operation {
* the attribute that was created for it. This is used to share the same attribute with all
* inputs that are linked to the same output socket. */
Map<DOutputSocket, GPUNodeLink *> output_to_material_attribute_map_;
/* A vector set that stores all output sockets that are used as previews for nodes inside the
* shader operation. */
VectorSet<DOutputSocket> preview_outputs_;
public:
/* Construct and compile a GPU material from the given shader compile unit by calling
@ -125,6 +128,13 @@ class ShaderOperation : public Operation {
* shader. */
void execute() override;
/* Compute a node preview for all nodes in the shader operations if the node requires a preview.
*
* Previews are computed from results that are populated for outputs that are used to compute
* previews even if they are internally linked, and those outputs are stored and tracked in the
* preview_outputs_ vector set, see the populate_results_for_node method for more information. */
void compute_preview() override;
/* Get the identifier of the operation output corresponding to the given output socket. This is
* called by the compiler to identify the operation output that provides the result for an input
* by providing the output socket that the input is linked to. See
@ -138,9 +148,14 @@ class ShaderOperation : public Operation {
/* Compute and set the initial reference counts of all the results of the operation. The
* reference counts of the results are the number of operations that use those results, which is
* computed as the number of inputs whose node is part of the schedule and is linked to the
* output corresponding to each of the results of the operation. The node execution schedule is
* given as an input. */
* computed as the number of inputs linked to the output corresponding to each of the results of
* the operation, but only the linked inputs whose node is part of the schedule but not part of
* the shader operation, since inputs that are part of the shader operations are internal links.
*
* Additionally, results that are used as node previews gets an extra reference count because
* they are referenced and released by the compute_preview method.
*
* The node execution schedule is given as an input. */
void compute_results_reference_counts(const Schedule &schedule);
private:
@ -209,7 +224,8 @@ class ShaderOperation : public Operation {
GPUMaterial *material);
/* Populate the output results of the shader operation for output sockets of the given node that
* are linked to nodes outside of the shader operation. */
* are linked to nodes outside of the shader operation or are used to compute a preview for the
* node. */
void populate_results_for_node(DNode node, GPUMaterial *material);
/* Given the output socket of a node that is part of the shader operation which is linked to an

View File

@ -70,4 +70,11 @@ void compute_dispatch_threads_at_least(GPUShader *shader,
int2 threads_range,
int2 local_size = int2(16));
/* Returns true if a node preview needs to be computed for the give node. */
bool is_node_preview_needed(const DNode &node);
/* Computes a lower resolution version of the given result and sets it as a preview for the given
* node after applying the appropriate color management specified in the given context. */
void compute_preview_from_result(Context &context, const DNode &node, Result &input_result);
} // namespace blender::realtime_compositor

View File

@ -5,21 +5,16 @@
#include <memory>
#include "BLI_assert.h"
#include "BLI_index_range.hh"
#include "BLI_map.hh"
#include "BLI_math_base.h"
#include "BLI_math_base.hh"
#include "BLI_math_color.h"
#include "BLI_math_vector_types.hh"
#include "BLI_string_ref.hh"
#include "BLI_task.hh"
#include "BLI_vector.hh"
#include "GPU_shader.h"
#include "GPU_texture.h"
#include "IMB_colormanagement.h"
#include "DNA_node_types.h"
#include "NOD_derived_node_tree.hh"
@ -53,53 +48,11 @@ NodeOperation::NodeOperation(Context &context, DNode node) : Operation(context),
}
}
/* Given the size of a result, compute a lower resolution size for a preview. The greater dimension
* will be assigned an arbitrarily chosen size of 128, while the other dimension will get the size
* that maintains the same aspect ratio. */
static int2 compute_preview_size(int2 size)
{
const int greater_dimension_size = 128;
if (size.x > size.y) {
return int2(greater_dimension_size, int(greater_dimension_size * (float(size.y) / size.x)));
}
else {
return int2(int(greater_dimension_size * (float(size.x) / size.y)), greater_dimension_size);
}
}
void NodeOperation::compute_preview()
{
if (!(node()->flag & NODE_PREVIEW)) {
return;
if (is_node_preview_needed(node())) {
compute_preview_from_result(context(), node(), *get_preview_result());
}
if (node()->flag & NODE_HIDDEN) {
return;
}
/* Only compute previews for nodes in the active context. */
if (node().context()->instance_key().value !=
node().context()->derived_tree().active_context().instance_key().value)
{
return;
}
/* Initialize node tree previews if not already initialized. */
bNodeTree *root_tree = const_cast<bNodeTree *>(
&node().context()->derived_tree().root_context().btree());
if (!root_tree->previews) {
root_tree->previews = BKE_node_instance_hash_new("node previews");
}
Result *preview_result = get_preview_result();
const int2 preview_size = compute_preview_size(preview_result->domain().size);
node()->runtime->preview_xsize = preview_size.x;
node()->runtime->preview_ysize = preview_size.y;
bNodePreview *preview = bke::node_preview_verify(
root_tree->previews, node().instance_key(), preview_size.x, preview_size.y, true);
write_preview_from_result(*preview, *preview_result);
}
Result *NodeOperation::get_preview_result()
@ -124,55 +77,6 @@ Result *NodeOperation::get_preview_result()
return nullptr;
}
void NodeOperation::write_preview_from_result(bNodePreview &preview, Result &input_result)
{
GPUShader *shader = shader_manager().get("compositor_compute_preview");
GPU_shader_bind(shader);
if (input_result.type() == ResultType::Float) {
GPU_texture_swizzle_set(input_result.texture(), "rrr1");
}
input_result.bind_as_texture(shader, "input_tx");
const int2 preview_size = int2(preview.xsize, preview.ysize);
Result preview_result = Result::Temporary(ResultType::Color, texture_pool());
preview_result.allocate_texture(Domain(preview_size));
preview_result.bind_as_image(shader, "preview_img");
compute_dispatch_threads_at_least(shader, preview_size);
input_result.unbind_as_texture();
preview_result.unbind_as_image();
GPU_shader_unbind();
GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH);
float *preview_pixels = static_cast<float *>(
GPU_texture_read(preview_result.texture(), GPU_DATA_FLOAT, 0));
preview_result.release();
ColormanageProcessor *color_processor = IMB_colormanagement_display_processor_new(
&context().get_scene().view_settings, &context().get_scene().display_settings);
threading::parallel_for(IndexRange(preview_size.y), 1, [&](const IndexRange sub_y_range) {
for (const int64_t y : sub_y_range) {
for (const int64_t x : IndexRange(preview_size.x)) {
const int index = (y * preview_size.x + x) * 4;
IMB_colormanagement_processor_apply_v4(color_processor, preview_pixels + index);
rgba_float_to_uchar(preview.rect + index, preview_pixels + index);
}
}
});
/* Restore original swizzle mask set above. */
if (input_result.type() == ResultType::Float) {
GPU_texture_swizzle_set(input_result.texture(), "rgba");
}
IMB_colormanagement_processor_free(color_processor);
MEM_freeN(preview_pixels);
}
void NodeOperation::compute_results_reference_counts(const Schedule &schedule)
{
for (const bNodeSocket *output : this->node()->output_sockets()) {

View File

@ -71,6 +71,15 @@ void ShaderOperation::execute()
GPU_shader_unbind();
}
void ShaderOperation::compute_preview()
{
for (const DOutputSocket &output : preview_outputs_) {
Result &result = get_result(get_output_identifier_from_output_socket(output));
compute_preview_from_result(context(), output.node(), result);
result.release();
}
}
StringRef ShaderOperation::get_output_identifier_from_output_socket(DOutputSocket output_socket)
{
return output_sockets_to_output_identifiers_map_.lookup(output_socket);
@ -84,7 +93,7 @@ Map<std::string, DOutputSocket> &ShaderOperation::get_inputs_to_linked_outputs_m
void ShaderOperation::compute_results_reference_counts(const Schedule &schedule)
{
for (const auto item : output_sockets_to_output_identifiers_map_.items()) {
const int reference_count = number_of_inputs_linked_to_output_conditioned(
int reference_count = number_of_inputs_linked_to_output_conditioned(
item.key, [&](DInputSocket input) {
/* We only consider inputs that are not part of the shader operations, because inputs
* that are part of the shader operations are internal and do not deal with the result
@ -92,6 +101,10 @@ void ShaderOperation::compute_results_reference_counts(const Schedule &schedule)
return schedule.contains(input.node()) && !compile_unit_.contains(input.node());
});
if (preview_outputs_.contains(item.key)) {
reference_count++;
}
get_result(item.value).set_initial_reference_count(reference_count);
}
}
@ -248,17 +261,41 @@ void ShaderOperation::declare_operation_input(DInputSocket input_socket,
inputs_to_linked_outputs_map_.add_new(input_identifier, output_socket);
}
static DOutputSocket find_preview_output_socket(const DNode &node)
{
if (!is_node_preview_needed(node)) {
return DOutputSocket();
}
for (const bNodeSocket *output : node->output_sockets()) {
if (output->is_logically_linked()) {
return DOutputSocket(node.context(), output);
}
}
return DOutputSocket();
}
void ShaderOperation::populate_results_for_node(DNode node, GPUMaterial *material)
{
const DOutputSocket preview_output = find_preview_output_socket(node);
for (const bNodeSocket *output : node->output_sockets()) {
const DOutputSocket doutput{node.context(), output};
/* If any of the nodes linked to the output are not part of the shader operation, then an
* output result needs to be populated for it. */
const bool need_to_populate_result = is_output_linked_to_node_conditioned(
const bool is_operation_output = is_output_linked_to_node_conditioned(
doutput, [&](DNode node) { return !compile_unit_.contains(node); });
if (need_to_populate_result) {
/* If the output is used as the node preview, then an output result needs to be populated for
* it, and we additionally keep track of that output to later compute the previes from. */
const bool is_preview_output = doutput == preview_output;
if (is_preview_output) {
preview_outputs_.add(doutput);
}
if (is_operation_output || is_preview_output) {
populate_operation_result(doutput, material);
}
}

View File

@ -4,10 +4,15 @@
#include "BLI_assert.h"
#include "BLI_function_ref.hh"
#include "BLI_index_range.hh"
#include "BLI_math_color.h"
#include "BLI_math_vector.hh"
#include "BLI_math_vector_types.hh"
#include "BLI_task.hh"
#include "BLI_utildefines.h"
#include "IMB_colormanagement.h"
#include "DNA_node_types.h"
#include "NOD_derived_node_tree.hh"
@ -133,4 +138,100 @@ void compute_dispatch_threads_at_least(GPUShader *shader, int2 threads_range, in
GPU_compute_dispatch(shader, groups_to_dispatch.x, groups_to_dispatch.y, 1);
}
bool is_node_preview_needed(const DNode &node)
{
if (!(node->flag & NODE_PREVIEW)) {
return false;
}
if (node->flag & NODE_HIDDEN) {
return false;
}
/* Only compute previews for nodes in the active context. */
if (node.context()->instance_key().value !=
node.context()->derived_tree().active_context().instance_key().value)
{
return false;
}
return true;
}
/* Given the size of a result, compute a lower resolution size for a preview. The greater dimension
* will be assigned an arbitrarily chosen size of 128, while the other dimension will get the size
* that maintains the same aspect ratio. */
static int2 compute_preview_size(int2 size)
{
const int greater_dimension_size = 128;
if (size.x > size.y) {
return int2(greater_dimension_size, int(greater_dimension_size * (float(size.y) / size.x)));
}
else {
return int2(int(greater_dimension_size * (float(size.x) / size.y)), greater_dimension_size);
}
}
void compute_preview_from_result(Context &context, const DNode &node, Result &input_result)
{
/* Initialize node tree previews if not already initialized. */
bNodeTree *root_tree = const_cast<bNodeTree *>(
&node.context()->derived_tree().root_context().btree());
if (!root_tree->previews) {
root_tree->previews = BKE_node_instance_hash_new("node previews");
}
const int2 preview_size = compute_preview_size(input_result.domain().size);
node->runtime->preview_xsize = preview_size.x;
node->runtime->preview_ysize = preview_size.y;
bNodePreview *preview = bke::node_preview_verify(
root_tree->previews, node.instance_key(), preview_size.x, preview_size.y, true);
GPUShader *shader = context.shader_manager().get("compositor_compute_preview");
GPU_shader_bind(shader);
if (input_result.type() == ResultType::Float) {
GPU_texture_swizzle_set(input_result.texture(), "rrr1");
}
input_result.bind_as_texture(shader, "input_tx");
Result preview_result = Result::Temporary(ResultType::Color, context.texture_pool());
preview_result.allocate_texture(Domain(preview_size));
preview_result.bind_as_image(shader, "preview_img");
compute_dispatch_threads_at_least(shader, preview_size);
input_result.unbind_as_texture();
preview_result.unbind_as_image();
GPU_shader_unbind();
GPU_memory_barrier(GPU_BARRIER_TEXTURE_FETCH);
float *preview_pixels = static_cast<float *>(
GPU_texture_read(preview_result.texture(), GPU_DATA_FLOAT, 0));
preview_result.release();
ColormanageProcessor *color_processor = IMB_colormanagement_display_processor_new(
&context.get_scene().view_settings, &context.get_scene().display_settings);
threading::parallel_for(IndexRange(preview_size.y), 1, [&](const IndexRange sub_y_range) {
for (const int64_t y : sub_y_range) {
for (const int64_t x : IndexRange(preview_size.x)) {
const int index = (y * preview_size.x + x) * 4;
IMB_colormanagement_processor_apply_v4(color_processor, preview_pixels + index);
rgba_float_to_uchar(preview->rect + index, preview_pixels + index);
}
}
});
/* Restore original swizzle mask set above. */
if (input_result.type() == ResultType::Float) {
GPU_texture_swizzle_set(input_result.texture(), "rgba");
}
IMB_colormanagement_processor_free(color_processor);
MEM_freeN(preview_pixels);
}
} // namespace blender::realtime_compositor