Compositor: add new node: Kuwahara filter #107015
|
@ -350,8 +350,10 @@ if(WITH_COMPOSITOR_CPU)
|
|||
operations/COM_GaussianXBlurOperation.h
|
||||
operations/COM_GaussianYBlurOperation.cc
|
||||
operations/COM_GaussianYBlurOperation.h
|
||||
operations/COM_KuwaharaOperation.h
|
||||
operations/COM_KuwaharaOperation.cc
|
||||
operations/COM_KuwaharaClassicOperation.h
|
||||
operations/COM_KuwaharaClassicOperation.cc
|
||||
operations/COM_KuwaharaAnisotropicOperation.h
|
||||
operations/COM_KuwaharaAnisotropicOperation.cc
|
||||
operations/COM_MovieClipAttributeOperation.cc
|
||||
operations/COM_MovieClipAttributeOperation.h
|
||||
operations/COM_MovieDistortionOperation.cc
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2011 Blender Foundation. */
|
||||
* Copyright 2023 Blender Foundation. */
|
||||
|
||||
#include "COM_KuwaharaNode.h"
|
||||
#include "COM_KuwaharaOperation.h"
|
||||
#include "COM_FastGaussianBlurOperation.h"
|
||||
#include "COM_KuwaharaAnisotropicOperation.h"
|
||||
#include "COM_KuwaharaClassicOperation.h"
|
||||
#include "COM_MathBaseOperation.h"
|
||||
#include "COM_SetValueOperation.h"
|
||||
|
||||
namespace blender::compositor {
|
||||
|
||||
|
@ -12,13 +16,98 @@ void KuwaharaNode::convert_to_operations(NodeConverter &converter,
|
|||
const bNode *node = this->get_bnode();
|
||||
const NodeKuwaharaData *data = (const NodeKuwaharaData *)node->storage;
|
||||
|
||||
KuwaharaOperation *operation = new KuwaharaOperation();
|
||||
operation->set_kernel_size(data->kernel_size);
|
||||
operation->set_variation(data->variation);
|
||||
switch (data->variation) {
|
||||
case CMP_NODE_KUWAHARA_CLASSIC: {
|
||||
KuwaharaClassicOperation *operation = new KuwaharaClassicOperation();
|
||||
operation->set_kernel_size(data->kernel_size);
|
||||
|
||||
converter.add_operation(operation);
|
||||
converter.map_input_socket(get_input_socket(0), operation->get_input_socket(0));
|
||||
converter.map_output_socket(get_output_socket(0), operation->get_output_socket());
|
||||
converter.add_operation(operation);
|
||||
converter.map_input_socket(get_input_socket(0), operation->get_input_socket(0));
|
||||
converter.map_output_socket(get_output_socket(0), operation->get_output_socket());
|
||||
} break;
|
||||
|
||||
case CMP_NODE_KUWAHARA_ANISOTROPIC: {
|
||||
/* Edge detection */
|
||||
auto const_fact = new SetValueOperation();
|
||||
const_fact->set_value(1.0f);
|
||||
converter.add_operation(const_fact);
|
||||
|
||||
auto sobel_x = new ConvolutionFilterOperation();
|
||||
sobel_x->set3x3Filter(1, 0, -1, 2, 0, -2, 1, 0, -1);
|
||||
converter.add_operation(sobel_x);
|
||||
converter.map_input_socket(get_input_socket(0), sobel_x->get_input_socket(0));
|
||||
converter.add_link(const_fact->get_output_socket(0), sobel_x->get_input_socket(1));
|
||||
|
||||
auto sobel_y = new ConvolutionFilterOperation();
|
||||
sobel_y->set3x3Filter(1, 2, 1, 0, 0, 0, -1, -2, -1);
|
||||
converter.add_operation(sobel_y);
|
||||
converter.map_input_socket(get_input_socket(0), sobel_y->get_input_socket(0));
|
||||
converter.add_link(const_fact->get_output_socket(0), sobel_y->get_input_socket(1));
|
||||
|
||||
/* Compute intensity of edges */
|
||||
auto sobel_xx = new MathMultiplyOperation();
|
||||
auto sobel_yy = new MathMultiplyOperation();
|
||||
auto sobel_xy = new MathMultiplyOperation();
|
||||
converter.add_operation(sobel_xx);
|
||||
converter.add_operation(sobel_yy);
|
||||
converter.add_operation(sobel_xy);
|
||||
|
||||
converter.add_link(sobel_x->get_output_socket(0), sobel_xx->get_input_socket(0));
|
||||
converter.add_link(sobel_x->get_output_socket(0), sobel_xx->get_input_socket(1));
|
||||
|
||||
converter.add_link(sobel_y->get_output_socket(0), sobel_yy->get_input_socket(0));
|
||||
converter.add_link(sobel_y->get_output_socket(0), sobel_yy->get_input_socket(1));
|
||||
|
||||
converter.add_link(sobel_x->get_output_socket(0), sobel_xy->get_input_socket(0));
|
||||
converter.add_link(sobel_y->get_output_socket(0), sobel_xy->get_input_socket(1));
|
||||
|
||||
/* blurring for more robustness. */
|
||||
// Note: blurring doesn't make as big of a difference as I was expecting,
|
||||
// especially around edges.
|
||||
// Todo: investigate further and remove if necessary. For now the parameter is kept for
|
||||
// better user feedback
|
||||
float sigma = data->sigma;
|
||||
|
||||
auto blur_sobel_xx = new FastGaussianBlurOperation();
|
||||
auto blur_sobel_yy = new FastGaussianBlurOperation();
|
||||
auto blur_sobel_xy = new FastGaussianBlurOperation();
|
||||
|
||||
blur_sobel_yy->set_size(sigma, sigma);
|
||||
blur_sobel_xx->set_size(sigma, sigma);
|
||||
blur_sobel_xy->set_size(sigma, sigma);
|
||||
|
||||
converter.add_operation(blur_sobel_xx);
|
||||
converter.add_operation(blur_sobel_yy);
|
||||
converter.add_operation(blur_sobel_xy);
|
||||
|
||||
converter.add_link(sobel_xx->get_output_socket(0), blur_sobel_xx->get_input_socket(0));
|
||||
converter.add_link(sobel_yy->get_output_socket(0), blur_sobel_yy->get_input_socket(0));
|
||||
converter.add_link(sobel_xy->get_output_socket(0), blur_sobel_xy->get_input_socket(0));
|
||||
|
||||
// For now, orientation is part of kuwahara operation.
|
||||
// todo: implement orientation as a separate operation
|
||||
// auto orientation = new OrientationOperation(); // OrientationOperation
|
||||
|
||||
/* Apply anisotropic Kuwahara filter */
|
||||
KuwaharaAnisotropicOperation *aniso = new KuwaharaAnisotropicOperation();
|
||||
aniso->set_kernel_size(data->kernel_size);
|
||||
converter.map_input_socket(get_input_socket(0), aniso->get_input_socket(0));
|
||||
converter.add_operation(aniso);
|
||||
|
||||
converter.add_link(blur_sobel_xx->get_output_socket(0), aniso->get_input_socket(1));
|
||||
converter.add_link(blur_sobel_yy->get_output_socket(0), aniso->get_input_socket(2));
|
||||
converter.add_link(blur_sobel_xy->get_output_socket(0), aniso->get_input_socket(3));
|
||||
|
||||
converter.map_output_socket(get_output_socket(0), aniso->get_output_socket(0));
|
||||
|
||||
// For debug. Todo: remove
|
||||
// converter.map_output_socket(get_output_socket(1), sobel_xx->get_output_socket(0));
|
||||
// converter.map_output_socket(get_output_socket(2), blur_sobel_xx->get_output_socket(0));
|
||||
// converter.map_output_socket(get_output_socket(3), blur_sobel_xy->get_output_socket(0));
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace blender::compositor
|
||||
|
|
|
@ -10,6 +10,7 @@ namespace blender::compositor {
|
|||
FastGaussianBlurOperation::FastGaussianBlurOperation() : BlurBaseOperation(DataType::Color)
|
||||
{
|
||||
iirgaus_ = nullptr;
|
||||
data_.filtertype = R_FILTER_FAST_GAUSS;
|
||||
}
|
||||
|
||||
void FastGaussianBlurOperation::execute_pixel(float output[4], int x, int y, void *data)
|
||||
|
@ -67,6 +68,15 @@ void FastGaussianBlurOperation::deinit_execution()
|
|||
BlurBaseOperation::deinit_mutex();
|
||||
}
|
||||
|
||||
void FastGaussianBlurOperation::set_size(int size_x, int size_y)
|
||||
{
|
||||
// todo: there should be a better way to use the operation without knowing specifics of the blur
|
||||
// node (data_) Could use factory pattern to solve this problem.
|
||||
data_.sizex = size_x;
|
||||
data_.sizey = size_y;
|
||||
sizeavailable_ = true;
|
||||
}
|
||||
|
||||
void *FastGaussianBlurOperation::initialize_tile_data(rcti *rect)
|
||||
{
|
||||
lock_mutex();
|
||||
|
|
|
@ -27,6 +27,8 @@ class FastGaussianBlurOperation : public BlurBaseOperation {
|
|||
void deinit_execution() override;
|
||||
void init_execution() override;
|
||||
|
||||
void set_size(int size_x, int size_y);
|
||||
|
||||
void get_area_of_interest(int input_idx, const rcti &output_area, rcti &r_input_area) override;
|
||||
void update_memory_buffer_started(MemoryBuffer *output,
|
||||
const rcti &area,
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2023 Blender Foundation. */
|
||||
|
||||
#include "COM_KuwaharaAnisotropicOperation.h"
|
||||
#include "BLI_vector.hh"
|
||||
|
||||
namespace blender::compositor {
|
||||
|
||||
KuwaharaAnisotropicOperation::KuwaharaAnisotropicOperation()
|
||||
{
|
||||
this->add_input_socket(DataType::Color);
|
||||
this->add_input_socket(DataType::Color);
|
||||
this->add_input_socket(DataType::Color);
|
||||
this->add_input_socket(DataType::Color);
|
||||
|
||||
this->add_output_socket(DataType::Color);
|
||||
// output for debug
|
||||
// todo: remove
|
||||
this->add_output_socket(DataType::Color);
|
||||
this->add_output_socket(DataType::Color);
|
||||
this->add_output_socket(DataType::Color);
|
||||
|
||||
this->set_kernel_size(8);
|
||||
|
||||
this->flags_.is_fullframe_operation = true;
|
||||
}
|
||||
|
||||
void KuwaharaAnisotropicOperation::init_execution()
|
||||
{
|
||||
image_reader_ = this->get_input_socket_reader(0);
|
||||
}
|
||||
|
||||
void KuwaharaAnisotropicOperation::deinit_execution()
|
||||
{
|
||||
image_reader_ = nullptr;
|
||||
}
|
||||
|
||||
void KuwaharaAnisotropicOperation::execute_pixel_sampled(float output[4],
|
||||
float x,
|
||||
float y,
|
||||
PixelSampler sampler)
|
||||
{
|
||||
/* Not implemented */
|
||||
}
|
||||
|
||||
void KuwaharaAnisotropicOperation::set_kernel_size(int kernel_size)
|
||||
{
|
||||
kernel_size_ = kernel_size;
|
||||
}
|
||||
|
||||
int KuwaharaAnisotropicOperation::get_kernel_size()
|
||||
{
|
||||
return kernel_size_;
|
||||
}
|
||||
|
||||
void KuwaharaAnisotropicOperation::update_memory_buffer_partial(MemoryBuffer *output,
|
||||
const rcti &area,
|
||||
Span<MemoryBuffer *> inputs)
|
||||
{
|
||||
/*
|
||||
Implementation based on Kyprianidis, Jan & Kang, Henry & Döllner, Jürgen. (2009).
|
||||
"Image and Video Abstraction by Anisotropic Kuwahara Filtering".
|
||||
Comput. Graph. Forum. 28. 1955-1963. 10.1111/j.1467-8659.2009.01574.x.
|
||||
Used reference implementation from lime image processing library (MIT license).
|
||||
*/
|
||||
|
||||
MemoryBuffer *image = inputs[0];
|
||||
MemoryBuffer *s_xx = inputs[1];
|
||||
MemoryBuffer *s_yy = inputs[2];
|
||||
MemoryBuffer *s_xy = inputs[3];
|
||||
|
||||
// BLI_assert all inputs have same size
|
||||
|
||||
int n_div = 8; // recommended by authors in original paper
|
||||
double angle = 2.0 * M_PI / n_div;
|
||||
double q = 3.0;
|
||||
const float EPS = 1.0e-10;
|
||||
|
||||
|
||||
for (BuffersIterator<float> it = output->iterate_with(inputs, area); !it.is_end(); ++it) {
|
||||
const int x = it.x;
|
||||
const int y = it.y;
|
||||
|
||||
/* Compute orientation */
|
||||
// todo: make orientation separate operation
|
||||
|
||||
// for now use green channel to compute orientation
|
||||
// todo: convert to HSV and compute orientation and strength on luminance channel
|
||||
const float a = s_xx->get_value(x, y, 1);
|
||||
const float b = s_xy->get_value(x, y, 1);
|
||||
const float c = s_yy->get_value(x, y, 1);
|
||||
|
||||
|
||||
/* Compute egenvalues of structure tensor */
|
||||
const double tr = a + c;
|
||||
const double discr = sqrt((a - b)*(a-b) + 4 * b * c);
|
||||
const double lambda1 = (tr + discr) / 2;
|
||||
const double lambda2 = (tr - discr) / 2;
|
||||
|
||||
/* Compute orientation and its strength based on structure tensor */
|
||||
const double orientation = 0.5 * atan2(2 * b, a - c);
|
||||
const double strength = (lambda1 == 0 && lambda2 == 0) ? 0 : (lambda1 - lambda2) / (lambda1 + lambda2);
|
||||
|
||||
for(int ch = 0; ch < 3; ch++) {
|
||||
// todo: compute anisotropy and weights on luminance channel to avoid color artifacts
|
||||
|
||||
Vector<float> sum(n_div, 0.0f);
|
||||
Vector<float> var(n_div, 0.0f);
|
||||
Vector<float> weight(n_div, 0.0f);
|
||||
|
||||
float sx = 1.0f / (strength + 1.0f);
|
||||
float sy = (1.0f + strength) / 1.0f;
|
||||
float theta = -orientation;
|
||||
|
||||
for (int dy = -kernel_size_; dy <= kernel_size_; dy++) {
|
||||
for (int dx = -kernel_size_; dx <= kernel_size_; dx++) {
|
||||
if (dx == 0 && dy == 0) continue;
|
||||
|
||||
// rotate and scale the kernel. This is the "anisotropic" part.
|
||||
int dx2 = static_cast<int>(sx * (cos(theta) * dx - sin(theta) * dy));
|
||||
int dy2 = static_cast<int>(sy * (sin(theta) * dx + cos(theta) * dy));
|
||||
int xx = x + dx2;
|
||||
int yy = y + dy2;
|
||||
|
||||
if (xx >= 0 && yy >= 0 && xx < image->get_width() && yy < image->get_height()) {
|
||||
float ddx2 = (float)dx2;
|
||||
float ddy2 = (float)dy2;
|
||||
float theta = atan2(ddy2, ddx2) + M_PI;
|
||||
int t = static_cast<int>(floor(theta / angle)) % n_div;
|
||||
|
||||
float d2 = dx2 * dx2 + dy2 * dy2;
|
||||
float g = exp(-d2 / (2.0 * kernel_size_));
|
||||
float v = image->get_value(xx, yy, ch);
|
||||
sum[t] += g * v;
|
||||
var[t] += g * v * v;
|
||||
weight[t] += g;
|
||||
}
|
||||
}
|
||||
zazizizou marked this conversation as resolved
|
||||
}
|
||||
|
||||
// Calculate weighted average
|
||||
float de = 0.0;
|
||||
float nu = 0.0;
|
||||
for (int i = 0; i < n_div; i++) {
|
||||
sum[i] = weight[i] != 0 ? sum[i] / weight[i] : 0.0;
|
||||
var[i] = weight[i] != 0 ? var[i] / weight[i] : 0.0;
|
||||
var[i] = var[i] - sum[i] * sum[i];
|
||||
var[i] = var[i] > EPS ? sqrt(var[i]) : EPS;
|
||||
float w = powf(var[i], -q);
|
||||
|
||||
de += sum[i] * w;
|
||||
nu += w;
|
||||
}
|
||||
|
||||
float val = nu > EPS ? de / nu : 0.0;
|
||||
CLAMP_MAX(val, 1.0f);
|
||||
it.out[ch] = val;
|
||||
}
|
||||
|
||||
/* No changes for alpha channel */
|
||||
it.out[3] = image->get_value(x, y, 3);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // namespace blender::compositor
|
|
@ -0,0 +1,30 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2023 Blender Foundation. */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "COM_MultiThreadedOperation.h"
|
||||
|
||||
namespace blender::compositor {
|
||||
|
||||
class KuwaharaAnisotropicOperation : public MultiThreadedOperation {
|
||||
SocketReader *image_reader_;
|
||||
|
||||
int kernel_size_;
|
||||
|
||||
public:
|
||||
KuwaharaAnisotropicOperation();
|
||||
|
||||
void init_execution() override;
|
||||
void deinit_execution() override;
|
||||
void execute_pixel_sampled(float output[4], float x, float y, PixelSampler sampler) override;
|
||||
|
||||
void set_kernel_size(int kernel_size);
|
||||
int get_kernel_size();
|
||||
|
||||
void update_memory_buffer_partial(MemoryBuffer *output,
|
||||
const rcti &area,
|
||||
Span<MemoryBuffer *> inputs) override;
|
||||
};
|
||||
|
||||
} // namespace blender::compositor
|
|
@ -1,66 +1,50 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2011 Blender Foundation. */
|
||||
* Copyright 2023 Blender Foundation. */
|
||||
|
||||
#include "COM_KuwaharaOperation.h"
|
||||
#include "COM_KuwaharaClassicOperation.h"
|
||||
|
||||
namespace blender::compositor {
|
||||
|
||||
KuwaharaOperation::KuwaharaOperation()
|
||||
KuwaharaClassicOperation::KuwaharaClassicOperation()
|
||||
{
|
||||
this->add_input_socket(DataType::Color);
|
||||
this->add_output_socket(DataType::Color);
|
||||
this->set_kernel_size(4.4f);
|
||||
this->set_kernel_size(4);
|
||||
|
||||
this->flags_.is_fullframe_operation = true;
|
||||
}
|
||||
|
||||
void KuwaharaOperation::init_execution()
|
||||
void KuwaharaClassicOperation::init_execution()
|
||||
{
|
||||
image_reader_ = this->get_input_socket_reader(0);
|
||||
}
|
||||
|
||||
void KuwaharaOperation::deinit_execution()
|
||||
void KuwaharaClassicOperation::deinit_execution()
|
||||
{
|
||||
image_reader_ = nullptr;
|
||||
}
|
||||
|
||||
void KuwaharaOperation::execute_pixel_sampled(float output[4],
|
||||
float x,
|
||||
float y,
|
||||
PixelSampler sampler)
|
||||
void KuwaharaClassicOperation::execute_pixel_sampled(float output[4],
|
||||
float x,
|
||||
float y,
|
||||
PixelSampler sampler)
|
||||
{
|
||||
float input_value[4];
|
||||
image_reader_->read_sampled(input_value, x, y, sampler);
|
||||
|
||||
output[0] = input_value[0] + 1.0;
|
||||
output[1] = input_value[1] + 2.0;
|
||||
output[2] = input_value[2] + 3.0;
|
||||
output[3] = input_value[3] + 4.0;
|
||||
/* Not implemented */
|
||||
}
|
||||
|
||||
void KuwaharaOperation::set_kernel_size(int kernel_size)
|
||||
void KuwaharaClassicOperation::set_kernel_size(int kernel_size)
|
||||
{
|
||||
kernel_size_ = kernel_size;
|
||||
}
|
||||
|
||||
int KuwaharaOperation::get_kernel_size()
|
||||
int KuwaharaClassicOperation::get_kernel_size()
|
||||
{
|
||||
return kernel_size_;
|
||||
}
|
||||
|
||||
void KuwaharaOperation::set_variation(int variation)
|
||||
{
|
||||
variation_ = variation;
|
||||
}
|
||||
|
||||
int KuwaharaOperation::get_variation()
|
||||
{
|
||||
return variation_;
|
||||
}
|
||||
|
||||
void KuwaharaOperation::update_memory_buffer_partial(MemoryBuffer *output,
|
||||
const rcti &area,
|
||||
Span<MemoryBuffer *> inputs)
|
||||
void KuwaharaClassicOperation::update_memory_buffer_partial(MemoryBuffer *output,
|
||||
const rcti &area,
|
||||
Span<MemoryBuffer *> inputs)
|
||||
{
|
||||
MemoryBuffer *image = inputs[0];
|
||||
|
||||
|
@ -74,13 +58,13 @@ void KuwaharaOperation::update_memory_buffer_partial(MemoryBuffer *output,
|
|||
float var[4] = {0.0f, 0.0f, 0.0f, 0.0f};
|
||||
int cnt[4] = {0, 0, 0, 0};
|
||||
|
||||
/* Split surroundings of */
|
||||
/* Split surroundings of pixel into 4 overlapping regions */
|
||||
for (int dy = -kernel_size_; dy <= kernel_size_; dy++) {
|
||||
for (int dx = -kernel_size_; dx <= kernel_size_; dx++) {
|
||||
|
||||
int xx = x + dx;
|
||||
int yy = y + dy;
|
||||
if (xx >= 0 && yy >= 0 && xx < area.xmax && yy < area.ymax) {
|
||||
if (xx >= 0 && yy >= 0 && xx < image->get_width() && yy < image->get_height()) {
|
||||
float v;
|
||||
v = image->get_value(xx, yy, ch);
|
||||
|
|
@ -1,5 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2011 Blender Foundation. */
|
||||
* Copyright 2023 Blender Foundation. */
|
||||
|
||||
#pragma once
|
||||
|
||||
|
@ -7,14 +7,13 @@
|
|||
|
||||
namespace blender::compositor {
|
||||
|
||||
class KuwaharaOperation : public MultiThreadedOperation {
|
||||
class KuwaharaClassicOperation : public MultiThreadedOperation {
|
||||
SocketReader *image_reader_;
|
||||
|
||||
int kernel_size_;
|
||||
int variation_;
|
||||
|
||||
public:
|
||||
KuwaharaOperation();
|
||||
KuwaharaClassicOperation();
|
||||
|
||||
void init_execution() override;
|
||||
void deinit_execution() override;
|
||||
|
@ -23,9 +22,6 @@ class KuwaharaOperation : public MultiThreadedOperation {
|
|||
void set_kernel_size(int kernel_size);
|
||||
int get_kernel_size();
|
||||
|
||||
void set_variation(int variation);
|
||||
int get_variation();
|
||||
|
||||
void update_memory_buffer_partial(MemoryBuffer *output,
|
||||
const rcti &area,
|
||||
Span<MemoryBuffer *> inputs) override;
|
|
@ -879,7 +879,7 @@ typedef struct NodeBilateralBlurData {
|
|||
typedef struct NodeKuwaharaData {
|
||||
short kernel_size;
|
||||
short variation;
|
||||
char _pad[4];
|
||||
float sigma;
|
||||
} NodeKuwaharaData;
|
||||
|
||||
typedef struct NodeAntiAliasingData {
|
||||
|
@ -2102,6 +2102,12 @@ typedef enum CMPNodeGlareType {
|
|||
CMP_NODE_GLARE_GHOST = 3,
|
||||
} CMPNodeGlareType;
|
||||
|
||||
/* Kuwahara Node. Stored in variation */
|
||||
typedef enum CMPNodeKuwahara {
|
||||
CMP_NODE_KUWAHARA_CLASSIC = 0,
|
||||
CMP_NODE_KUWAHARA_ANISOTROPIC = 1,
|
||||
} CMPNodeKuwahara;
|
||||
|
||||
/* Plane track deform node. */
|
||||
|
||||
enum {
|
||||
|
|
|
@ -9265,6 +9265,14 @@ static void def_cmp_kuwahara(StructRNA *srna)
|
|||
RNA_def_property_enum_items(prop, variation_items);
|
||||
RNA_def_property_ui_text(prop, "", "Variation of Kuwahara filter to use.");
|
||||
RNA_def_property_update(prop, NC_NODE | NA_EDITED, "rna_Node_update");
|
||||
|
||||
prop = RNA_def_property(srna, "sigma", PROP_FLOAT, PROP_NONE);
|
||||
RNA_def_property_float_sdna(prop, NULL, "sigma");
|
||||
RNA_def_property_ui_text(
|
||||
prop,
|
||||
"Sigma",
|
||||
"Edges get smoothed before applying filter. Sigma controls smoothing degree.");
|
||||
RNA_def_property_update(prop, NC_NODE | NA_EDITED, "rna_Node_update");
|
||||
}
|
||||
|
||||
static void def_cmp_antialiasing(StructRNA *srna)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
* Copyright 2020 Blender Foundation */
|
||||
* Copyright 2023 Blender Foundation */
|
||||
|
||||
/** \file
|
||||
* \ingroup cmpnodes
|
||||
|
@ -22,6 +22,11 @@ static void cmp_node_kuwahara_declare(NodeDeclarationBuilder &b)
|
|||
.default_value({1.0f, 1.0f, 1.0f, 1.0f})
|
||||
.compositor_domain_priority(0);
|
||||
b.add_output<decl::Color>(N_("Image"));
|
||||
|
||||
// For debug. Todo:remove
|
||||
// b.add_output<decl::Color>(N_("Sobel x"));
|
||||
// b.add_output<decl::Color>(N_("Sobel xx blurred"));
|
||||
// b.add_output<decl::Color>(N_("Sobel xy blurred"));
|
||||
}
|
||||
|
||||
static void node_composit_init_kuwahara(bNodeTree * /*ntree*/, bNode *node)
|
||||
|
@ -40,6 +45,12 @@ static void node_composit_buts_kuwahara(uiLayout *layout, bContext * /*C*/, Poin
|
|||
|
||||
uiItemR(col, ptr, "variation", 0, nullptr, ICON_NONE);
|
||||
uiItemR(col, ptr, "kernel_size", 0, nullptr, ICON_NONE);
|
||||
|
||||
const int variation = RNA_enum_get(ptr, "variation");
|
||||
|
||||
if(variation == CMP_NODE_KUWAHARA_ANISOTROPIC) {
|
||||
uiItemR(col, ptr, "sigma", 0, nullptr, ICON_NONE);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace blender::nodes::node_composite_kuwahara_cc
|
||||
|
|
Loading…
Reference in New Issue
In other performance critical areas we do
Again, not something which i know for sure will show performance impact, but might worth doing so nevertheless.