Animation: Allow NLA strips to be horizontally shuffled #105532

Merged
Nate Rupsis merged 22 commits from nrupsis/blender:NLA-horizontal-shuffle into main 2023-04-03 17:10:50 +02:00
97 changed files with 2889 additions and 309 deletions
Showing only changes of commit 6543deed45 - Show all commits

View File

@ -933,6 +933,11 @@ extern bool GHOST_SupportsCursorWarp(void);
*/
extern bool GHOST_SupportsWindowPosition(void);
/**
* Support a separate primary clipboard.
*/
extern bool GHOST_SupportsPrimaryClipboard(void);
/**
* Assign the callback which generates a back-trace (may be NULL).
*/

View File

@ -327,6 +327,11 @@ class GHOST_ISystem {
*/
virtual bool supportsWindowPosition() = 0;
/**
* Return true when a separate primary clipboard is supported.
*/
virtual bool supportsPrimaryClipboard() = 0;
/**
* Focus window after opening, or put them in the background.
*/

View File

@ -907,6 +907,12 @@ bool GHOST_SupportsWindowPosition(void)
return system->supportsWindowPosition();
}
bool GHOST_SupportsPrimaryClipboard(void)
{
GHOST_ISystem *system = GHOST_ISystem::getSystem();
return system->supportsPrimaryClipboard();
}
void GHOST_SetBacktraceHandler(GHOST_TBacktraceFn backtrace_fn)
{
GHOST_ISystem::setBacktraceFn(backtrace_fn);

View File

@ -428,6 +428,11 @@ bool GHOST_System::supportsWindowPosition()
return true;
}
bool GHOST_System::supportsPrimaryClipboard()
{
return false;
}
void GHOST_System::initDebug(GHOST_Debug debug)
{
m_is_debug_enabled = debug.flags & GHOST_kDebugDefault;

View File

@ -152,6 +152,7 @@ class GHOST_System : public GHOST_ISystem {
bool supportsCursorWarp(void);
bool supportsWindowPosition(void);
bool supportsPrimaryClipboard(void);
/**
* Focus window after opening, or put them in the background.

View File

@ -1117,7 +1117,7 @@ using GWL_RegistryHandler_UpdateFn = void (*)(GWL_Display *display,
using GWL_RegistryEntry_RemoveFn = void (*)(GWL_Display *display, void *user_data, bool on_exit);
struct GWL_RegistryHandler {
/** Pointer to the name (not the name it's self), needed as the values aren't set on startup. */
/** Pointer to the name (not the name itself), needed as the values aren't set on startup. */
const char *const *interface_p = nullptr;
/** Add the interface. */
@ -2016,7 +2016,13 @@ static char *read_file_as_buffer(const int fd, const bool nil_terminate, size_t
{
struct ByteChunk {
ByteChunk *next;
char data[4096 - sizeof(ByteChunk *)];
/* NOTE(@ideasman42): On GNOME-SHELL-43.3, non powers of two values
* (1023 or 4088 for e.g.) makes `read()` *intermittently* include uninitialized memory
* (failing to read the end of the chunk) as well as truncating the end of the whole buffer.
* The WAYLAND spec doesn't mention buffer-size so this may be a bug in GNOME-SHELL.
* Whatever the case, using a power of two isn't a problem (besides some slop-space waste).
* This workaround isn't necessary for KDE & WLROOTS based compositors, see: #106040. */
char data[4096];
};
ByteChunk *chunk_first = nullptr, **chunk_link_p = &chunk_first;
bool ok = true;
@ -3621,7 +3627,7 @@ static void tablet_seat_handle_tool_added(void *data,
GWL_TabletTool *tablet_tool = new GWL_TabletTool();
tablet_tool->seat = seat;
/* Every tool has it's own cursor wl_surface. */
/* Every tool has its own cursor wl_surface. */
tablet_tool->wl_surface_cursor = wl_compositor_create_surface(seat->system->wl_compositor());
ghost_wl_surface_tag_cursor_tablet(tablet_tool->wl_surface_cursor);
@ -3963,7 +3969,7 @@ static void keyboard_handle_key(void *data,
}
else if (xkb_keymap_key_repeats(xkb_state_get_keymap(seat->xkb_state), key_code)) {
if (etype == GHOST_kEventKeyDown) {
/* Any other key-down always cancels (and may start it's own repeat timer). */
/* Any other key-down always cancels (and may start its own repeat timer). */
timer_action = CANCEL;
}
else {
@ -6682,6 +6688,11 @@ bool GHOST_SystemWayland::supportsWindowPosition()
return false;
}
bool GHOST_SystemWayland::supportsPrimaryClipboard()
{
return true;
}
bool GHOST_SystemWayland::cursor_grab_use_software_display_get(const GHOST_TGrabCursorMode mode)
{
/* Caller must lock `server_mutex`. */

View File

@ -144,6 +144,7 @@ class GHOST_SystemWayland : public GHOST_System {
bool supportsCursorWarp() override;
bool supportsWindowPosition() override;
bool supportsPrimaryClipboard() override;
/* WAYLAND utility functions (share window/system logic). */

View File

@ -1740,6 +1740,11 @@ GHOST_TSuccess GHOST_SystemX11::setCursorPosition(int32_t x, int32_t y)
return GHOST_kSuccess;
}
bool GHOST_SystemX11::supportsPrimaryClipboard()
{
return true;
}
void GHOST_SystemX11::addDirtyWindow(GHOST_WindowX11 *bad_wind)
{
GHOST_ASSERT((bad_wind != nullptr), "addDirtyWindow() nullptr ptr trapped (window)");

View File

@ -168,6 +168,8 @@ class GHOST_SystemX11 : public GHOST_System {
*/
GHOST_TSuccess getButtons(GHOST_Buttons &buttons) const;
bool supportsPrimaryClipboard() override;
/**
* Flag a window as dirty. This will
* generate a GHOST window update event on a call to processEvents()

View File

@ -70,7 +70,7 @@ _script_module_dirs = "startup", "modules"
# Base scripts, this points to the directory containing: "modules" & "startup" (see `_script_module_dirs`).
# In Blender's code-base this is `./scripts`.
#
# NOTE: in virtually all cases this should match `BLENDER_SYSTEM_SCRIPTS` as this script is it's self a system script,
# NOTE: in virtually all cases this should match `BLENDER_SYSTEM_SCRIPTS` as this script is itself a system script,
# it must be in the `BLENDER_SYSTEM_SCRIPTS` by definition and there is no need for a look-up from `_bpy_script_paths`.
_script_base_dir = _os.path.dirname(_os.path.dirname(_os.path.dirname(_os.path.dirname(__file__))))

View File

@ -330,6 +330,7 @@ class GRAPH_MT_slider(Menu):
layout.operator("graph.blend_to_neighbor", text="Blend to Neighbor")
layout.operator("graph.blend_to_default", text="Blend to Default Value")
layout.operator("graph.ease", text="Ease")
layout.operator("graph.gaussian_smooth", text="Smooth")
class GRAPH_MT_view_pie(Menu):

View File

@ -1479,7 +1479,7 @@ struct FaceDetails {
/* Details about the fallback fonts we ship, so that we can load only when needed. */
static const struct FaceDetails static_face_details[] = {
{"lastresort.woff2", UINT32_MAX, UINT32_MAX, UINT32_MAX, UINT32_MAX},
{"Noto Sans CJK Regular.woff2", 0x30000083L, 0x2BDF3C10L, 0x16L, 0},
{"Noto Sans CJK Regular.woff2", 0x30000083L, 0x29DF3C10L, 0x16L, 0},
{"NotoEmoji-VariableFont_wght.woff2", 0x80000003L, 0x241E4ACL, 0x14000000L, 0x4000000L},
{"NotoSansArabic-VariableFont_wdth,wght.woff2",
TT_UCR_ARABIC,

View File

@ -631,20 +631,53 @@ static FT_UInt blf_glyph_index_from_charcode(FontBLF **font, const uint charcode
return 0;
}
/* Not found in main font, so look in the others. */
FontBLF *last_resort = NULL;
/* First look in currently-loaded cached fonts that match the coverage bit. Super fast. */
int coverage_bit = blf_charcode_to_coverage_bit(charcode);
for (int i = 0; i < BLF_MAX_FONT; i++) {
FontBLF *f = global_font[i];
if (!f || f == *font || !(f->face) || !(f->flags & BLF_DEFAULT) ||
(!((*font)->flags & BLF_MONOSPACED) && (f->flags & BLF_MONOSPACED)) ||
f->flags & BLF_LAST_RESORT) {
continue;
}
if (coverage_bit < 0 || blf_font_has_coverage_bit(f, coverage_bit)) {
glyph_index = blf_get_char_index(f, charcode);
if (glyph_index) {
*font = f;
return glyph_index;
}
}
}
/* Next look only in unloaded fonts that match the coverage bit. */
for (int i = 0; i < BLF_MAX_FONT; i++) {
FontBLF *f = global_font[i];
if (!f || f == *font || (f->face) || !(f->flags & BLF_DEFAULT) ||
(!((*font)->flags & BLF_MONOSPACED) && (f->flags & BLF_MONOSPACED)) ||
f->flags & BLF_LAST_RESORT) {
continue;
}
if (coverage_bit < 0 || blf_font_has_coverage_bit(f, coverage_bit)) {
glyph_index = blf_get_char_index(f, charcode);
if (glyph_index) {
*font = f;
return glyph_index;
}
}
}
/* Last look in anything else. Also check if we have a last-resort font. */
FontBLF *last_resort = NULL;
for (int i = 0; i < BLF_MAX_FONT; i++) {
FontBLF *f = global_font[i];
if (!f || f == *font || !(f->flags & BLF_DEFAULT)) {
continue;
}
if (f->flags & BLF_LAST_RESORT) {
last_resort = f;
continue;
}
if (coverage_bit < 0 || blf_font_has_coverage_bit(f, coverage_bit)) {
if (coverage_bit >= 0 && !blf_font_has_coverage_bit(f, coverage_bit)) {
glyph_index = blf_get_char_index(f, charcode);
if (glyph_index) {
*font = f;

View File

@ -58,7 +58,7 @@ struct bDeformGroup *BKE_object_defgroup_find_name(const struct Object *ob, cons
*
* \param use_default: How to handle cases where no symmetrical group is found.
* - false: sets these indices to -1, indicating the group should be ignored.
* - true: sets the index to its location in the array (making the group point to it's self).
* - true: sets the index to its location in the array (making the group point to itself).
* Enable this for symmetrical actions which apply weight operations on symmetrical vertices
* where the symmetrical group will be used (if found), otherwise the same group is used.
*

View File

@ -93,7 +93,7 @@ void txt_sel_all(struct Text *text);
void txt_sel_clear(struct Text *text);
void txt_sel_line(struct Text *text);
void txt_sel_set(struct Text *text, int startl, int startc, int endl, int endc);
char *txt_sel_to_buf(struct Text *text, size_t *r_buf_strlen);
char *txt_sel_to_buf(const struct Text *text, size_t *r_buf_strlen);
void txt_insert_buf(struct Text *text, const char *in_buffer, int in_buffer_len)
ATTR_NONNULL(1, 2);
void txt_split_curline(struct Text *text);

View File

@ -17,6 +17,7 @@
#include "BLI_string.h"
#include "BLI_string_utf8.h"
#include "BLI_string_utils.h"
#include "BLI_tempfile.h"
#include "BLI_utildefines.h"
#include "BKE_appdir.h" /* own include */
@ -1089,7 +1090,7 @@ void BKE_appdir_app_templates(ListBase *templates)
* Also make sure the temp dir has a trailing slash
*
* \param tempdir: The full path to the temporary temp directory.
* \param tempdir_len: The size of the \a tempdir buffer.
* \param tempdir_maxlen: The size of the \a tempdir buffer.
* \param userdir: Directory specified in user preferences (may be NULL).
* note that by default this is an empty string, only use when non-empty.
*/
@ -1098,37 +1099,14 @@ static void where_is_temp(char *tempdir, const size_t tempdir_maxlen, const char
tempdir[0] = '\0';
if (userdir && BLI_is_dir(userdir)) {
if (userdir && userdir[0] != '\0' && BLI_is_dir(userdir)) {
BLI_strncpy(tempdir, userdir, tempdir_maxlen);
}
if (tempdir[0] == '\0') {
const char *env_vars[] = {
#ifdef WIN32
"TEMP",
#else
/* Non standard (could be removed). */
"TMP",
/* Posix standard. */
"TMPDIR",
#endif
};
for (int i = 0; i < ARRAY_SIZE(env_vars); i++) {
const char *tmp = BLI_getenv(env_vars[i]);
if (tmp && (tmp[0] != '\0') && BLI_is_dir(tmp)) {
BLI_strncpy(tempdir, tmp, tempdir_maxlen);
break;
}
}
}
if (tempdir[0] == '\0') {
BLI_strncpy(tempdir, "/tmp/", tempdir_maxlen);
}
else {
/* add a trailing slash if needed */
/* Add a trailing slash if needed. */
BLI_path_slash_ensure(tempdir, tempdir_maxlen);
return;
}
BLI_temp_directory_path_get(tempdir, tempdir_maxlen);
}
static void tempdir_session_create(char *tempdir_session,

View File

@ -1450,7 +1450,7 @@ char *txt_to_buf(Text *text, size_t *r_buf_strlen)
return buf;
}
char *txt_sel_to_buf(Text *text, size_t *r_buf_strlen)
char *txt_sel_to_buf(const Text *text, size_t *r_buf_strlen)
{
char *buf;
size_t length = 0;

View File

@ -15,6 +15,8 @@
* This design allows some function overloads to be more efficient with certain types.
*/
#include <iostream>
#include "BLI_math_base.hh"
namespace blender::math {

View File

@ -18,6 +18,8 @@
* the fastest and more correct option.
*/
#include <iostream>
#include "BLI_math_angle_types.hh"
#include "BLI_math_base.hh"
#include "BLI_math_basis_types.hh"

View File

@ -21,6 +21,8 @@
* - Curve Tangent-Space: X-left, Y-up, Z-forward
*/
#include <iostream>
#include "BLI_math_base.hh"
#include "BLI_math_vector_types.hh"

View File

@ -24,6 +24,8 @@
* eg: `Euler3 my_euler(EulerOrder::XYZ); my_euler = my_quaternion:`
*/
#include <iostream>
#include "BLI_math_angle_types.hh"
#include "BLI_math_base.hh"
#include "BLI_math_basis_types.hh"

View File

@ -6,6 +6,8 @@
* \ingroup bli
*/
#include <iostream>
#include "BLI_math_angle_types.hh"
#include "BLI_math_base.hh"
#include "BLI_math_basis_types.hh"

View File

@ -102,6 +102,11 @@ size_t BLI_str_utf8_as_utf32(char32_t *__restrict dst_w,
size_t maxncpy) ATTR_NONNULL(1, 2);
size_t BLI_str_utf32_as_utf8(char *__restrict dst, const char32_t *__restrict src, size_t maxncpy)
ATTR_NONNULL(1, 2);
/**
* \return The UTF-32 len in UTF-8 with a clamped length.
*/
size_t BLI_str_utf32_as_utf8_len_ex(const char32_t *src, size_t src_maxlen) ATTR_WARN_UNUSED_RESULT
ATTR_NONNULL(1);
/**
* \return The UTF-32 len in UTF-8.
*/

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. */
/** \file
* \ingroup bli
*/
#pragma once
#include "BLI_sys_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/* Get the path to a directory suitable for temporary files.
*
* The return path is guaranteed to exist and to be a directory, as well as to contain a trailing
* directory separator.
*
* At maximum the buffer_size number of characters is written to the temp_directory. The directory
* path is always null-terminated. */
void BLI_temp_directory_path_get(char *temp_directory, const size_t buffer_size);
#ifdef __cplusplus
}
#endif

View File

@ -147,6 +147,7 @@ set(SRC
intern/task_pool.cc
intern/task_range.cc
intern/task_scheduler.cc
intern/tempfile.c
intern/threads.cc
intern/time.c
intern/timecode.c
@ -347,6 +348,7 @@ set(SRC
BLI_system.h
BLI_task.h
BLI_task.hh
BLI_tempfile.h
BLI_threads.h
BLI_timecode.h
BLI_timeit.hh
@ -533,6 +535,7 @@ if(WITH_GTESTS)
tests/BLI_string_utf8_test.cc
tests/BLI_task_graph_test.cc
tests/BLI_task_test.cc
tests/BLI_tempfile_test.cc
tests/BLI_uuid_test.cc
tests/BLI_vector_set_test.cc
tests/BLI_vector_test.cc

View File

@ -651,6 +651,18 @@ size_t BLI_str_utf32_as_utf8(char *__restrict dst,
return len;
}
size_t BLI_str_utf32_as_utf8_len_ex(const char32_t *src, const size_t src_maxlen)
{
size_t len = 0;
const char32_t *src_end = src + src_maxlen;
while ((src < src_end) && *src) {
len += BLI_str_utf8_from_unicode_len((uint)*src++);
}
return len;
}
size_t BLI_str_utf32_as_utf8_len(const char32_t *src)
{
size_t len = 0;

View File

@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. */
#include "BLI_tempfile.h"
#include "BLI_fileops.h"
#include "BLI_path_util.h"
#include "BLI_string.h"
void BLI_temp_directory_path_get(char *temp_directory, const size_t buffer_size)
{
temp_directory[0] = '\0';
const char *env_vars[] = {
#ifdef WIN32
"TEMP",
#else
/* Non standard (could be removed). */
"TMP",
/* Posix standard. */
"TMPDIR",
#endif
};
for (int i = 0; i < ARRAY_SIZE(env_vars); i++) {
const char *tmp = BLI_getenv(env_vars[i]);
if (tmp && (tmp[0] != '\0') && BLI_is_dir(tmp)) {
BLI_strncpy(temp_directory, tmp, buffer_size);
break;
}
}
if (temp_directory[0] == '\0') {
BLI_strncpy(temp_directory, "/tmp/", buffer_size);
}
else {
/* Add a trailing slash if needed. */
BLI_path_slash_ensure(temp_directory, buffer_size);
}
BLI_dir_create_recursive(temp_directory);
}

View File

@ -1,11 +1,16 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "testing/testing.h"
#include "BLI_fileops.hh"
#include "BLI_path_util.h"
#include "BLI_string.h"
#include "BLI_system.h"
#include "BLI_tempfile.h"
#include "BLI_threads.h"
#include BLI_SYSTEM_PID_H
namespace blender::tests {
class ChangeWorkingDirectoryTest : public testing::Test {
@ -26,6 +31,20 @@ class ChangeWorkingDirectoryTest : public testing::Test {
BLI_threadapi_exit();
}
/* Make a pseudo-unique file name file within the temp directory in a cross-platform manner. */
static std::string make_pseudo_unique_temp_filename()
{
char temp_dir[FILE_MAX];
BLI_temp_directory_path_get(temp_dir, sizeof(temp_dir));
const std::string directory_name = "blender_test_" + std::to_string(getpid());
char filepath[FILE_MAX];
BLI_path_join(filepath, sizeof(filepath), temp_dir, directory_name.c_str());
return filepath;
}
};
TEST(fileops, fstream_open_string_filename)
@ -71,7 +90,7 @@ TEST_F(ChangeWorkingDirectoryTest, change_working_directory)
ASSERT_TRUE(original_cwd == original_cwd_buff)
<< "Returned CWD path unexpectedly different than given char buffer.";
std::string temp_file_name(std::tmpnam(nullptr));
std::string temp_file_name = make_pseudo_unique_temp_filename();
test_temp_dir = temp_file_name + "овый";
if (BLI_exists(test_temp_dir.c_str())) {

View File

@ -0,0 +1,27 @@
/* SPDX-License-Identifier: Apache-2.0 */
#include "BLI_tempfile.h"
#include "BLI_fileops.h"
#include "BLI_path_util.h"
#include "testing/testing.h"
namespace blender::tests {
TEST(BLI_tempfile, BLI_temp_directory_path_get)
{
char temp_dir[FILE_MAX];
BLI_temp_directory_path_get(temp_dir, sizeof(temp_dir));
ASSERT_STRNE(temp_dir, "");
EXPECT_EQ(temp_dir[strlen(temp_dir) - 1], SEP);
EXPECT_TRUE(BLI_exists(temp_dir));
EXPECT_TRUE(BLI_is_dir(temp_dir));
EXPECT_TRUE(BLI_path_is_abs_from_cwd(temp_dir));
}
} // namespace blender::tests

View File

@ -281,6 +281,12 @@ static bool find_fcurve_segment(FCurve *fcu,
ListBase find_fcurve_segments(FCurve *fcu)
{
ListBase segments = {NULL, NULL};
/* Ignore baked curves. */
if (!fcu->bezt) {
return segments;
}
int segment_start_idx = 0;
int segment_len = 0;
int current_index = 0;
@ -392,7 +398,56 @@ void blend_to_default_fcurve(PointerRNA *id_ptr, FCurve *fcu, const float factor
move_key(&fcu->bezt[i], key_y_value);
}
}
/* ---------------- */
void ED_ANIM_get_1d_gauss_kernel(const float sigma, const int kernel_size, double *r_kernel)
{
BLI_assert(sigma > 0.0f);
BLI_assert(kernel_size > 0);
const double sigma_sq = 2.0 * sigma * sigma;
double sum = 0.0;
for (int i = 0; i < kernel_size; i++) {
const double normalized_index = (double)i / (kernel_size - 1);
r_kernel[i] = exp(-normalized_index * normalized_index / sigma_sq);
if (i == 0) {
sum += r_kernel[i];
}
else {
/* We only calculate half the kernel,
* the normalization needs to take that into account. */
sum += r_kernel[i] * 2;
}
}
/* Normalize kernel values. */
for (int i = 0; i < kernel_size; i++) {
r_kernel[i] /= sum;
}
}
void smooth_fcurve_segment(FCurve *fcu,
FCurveSegment *segment,
float *samples,
const float factor,
const int kernel_size,
double *kernel)
{
const int segment_end_index = segment->start_index + segment->length;
const int segment_start_x = fcu->bezt[segment->start_index].vec[1][0];
for (int i = segment->start_index; i < segment_end_index; i++) {
const int sample_index = (int)(fcu->bezt[i].vec[1][0] - segment_start_x) + kernel_size;
/* Apply the kernel. */
double filter_result = samples[sample_index] * kernel[0];
for (int j = 1; j <= kernel_size; j++) {
const double kernel_value = kernel[j];
filter_result += samples[sample_index + j] * kernel_value;
filter_result += samples[sample_index - j] * kernel_value;
}
const float key_y_value = interpf((float)filter_result, samples[sample_index], factor);
move_key(&fcu->bezt[i], key_y_value);
}
}
/* ---------------- */
void ease_fcurve_segment(FCurve *fcu, FCurveSegment *segment, const float factor)
@ -680,6 +735,16 @@ typedef struct TempFrameValCache {
float frame, val;
} TempFrameValCache;
void sample_fcurve_segment(FCurve *fcu,
const float start_frame,
float *samples,
const int sample_count)
{
for (int i = 0; i < sample_count; i++) {
samples[i] = evaluate_fcurve(fcu, start_frame + i);
}
}
void sample_fcurve(FCurve *fcu)
{
BezTriple *bezt, *start = NULL, *end = NULL;

View File

@ -10,6 +10,7 @@
#include "BKE_asset.h"
#include "BKE_context.h"
#include "BKE_global.h"
#include "BKE_icons.h"
#include "BKE_idtype.h"
#include "BKE_lib_id.h"
@ -52,7 +53,7 @@ void ED_asset_generate_preview(const bContext *C, ID *id)
BKE_previewimg_clear(preview);
}
UI_icon_render_id(C, nullptr, id, ICON_SIZE_PREVIEW, true);
UI_icon_render_id(C, nullptr, id, ICON_SIZE_PREVIEW, !G.background);
}
bool ED_asset_clear_id(ID *id)

View File

@ -57,6 +57,7 @@
#define MAXTEXT 32766
static int kill_selection(Object *obedit, int ins);
static char *font_select_to_buffer(Object *obedit);
/* -------------------------------------------------------------------- */
/** \name Internal Utilities
@ -453,6 +454,19 @@ static int kill_selection(Object *obedit, int ins) /* ins == new character len *
return direction;
}
static void font_select_update_primary_clipboard(Object *obedit)
{
if ((WM_capabilities_flag() & WM_CAPABILITY_PRIMARY_CLIPBOARD) == 0) {
return;
}
char *buf = font_select_to_buffer(obedit);
if (buf == NULL) {
return;
}
WM_clipboard_text_set(buf, true);
MEM_freeN(buf);
}
/** \} */
/* -------------------------------------------------------------------- */
@ -525,6 +539,29 @@ static bool font_paste_utf8(bContext *C, const char *str, const size_t str_len)
/** \} */
/* -------------------------------------------------------------------- */
/** \name Generic Copy Functions
* \{ */
static char *font_select_to_buffer(Object *obedit)
{
int selstart, selend;
if (!BKE_vfont_select_get(obedit, &selstart, &selend)) {
return NULL;
}
Curve *cu = obedit->data;
EditFont *ef = cu->editfont;
char32_t *text_buf = ef->textbuf + selstart;
const size_t text_buf_len = selend - selstart;
const size_t len_utf8 = BLI_str_utf32_as_utf8_len_ex(text_buf, text_buf_len + 1);
char *buf = MEM_mallocN(len_utf8 + 1, __func__);
BLI_str_utf32_as_utf8(buf, text_buf, len_utf8);
return buf;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Paste From File Operator
* \{ */
@ -864,6 +901,7 @@ static int font_select_all_exec(bContext *C, wmOperator *UNUSED(op))
ef->pos = ef->len;
text_update_edited(C, obedit, FO_SELCHANGE);
font_select_update_primary_clipboard(obedit);
return OPERATOR_FINISHED;
}
@ -1000,6 +1038,7 @@ static bool paste_selection(Object *obedit, ReportList *reports)
static int paste_text_exec(bContext *C, wmOperator *op)
{
const bool selection = RNA_boolean_get(op->ptr, "selection");
Object *obedit = CTX_data_edit_object(C);
int retval;
size_t len_utf8;
@ -1013,7 +1052,7 @@ static int paste_text_exec(bContext *C, wmOperator *op)
int len;
} clipboard_system = {NULL}, clipboard_vfont = {NULL};
clipboard_system.buf = WM_clipboard_text_get(false, &clipboard_system.len);
clipboard_system.buf = WM_clipboard_text_get(selection, &clipboard_system.len);
if (clipboard_system.buf == NULL) {
return OPERATOR_CANCELLED;
@ -1077,6 +1116,15 @@ void FONT_OT_text_paste(wmOperatorType *ot)
/* flags */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
/* properties */
PropertyRNA *prop;
prop = RNA_def_boolean(ot->srna,
"selection",
0,
"Selection",
"Paste text selected elsewhere rather than copied (X11/Wayland only)");
RNA_def_property_flag(prop, PROP_SKIP_SAVE);
}
/** \} */
@ -1216,6 +1264,7 @@ static int move_cursor(bContext *C, int type, const bool select)
if (select) {
ef->selend = ef->pos;
font_select_update_primary_clipboard(obedit);
}
text_update_edited(C, obedit, cursmove);

View File

@ -425,6 +425,17 @@ void blend_to_neighbor_fcurve_segment(struct FCurve *fcu,
struct FCurveSegment *segment,
float factor);
void breakdown_fcurve_segment(struct FCurve *fcu, struct FCurveSegment *segment, float factor);
/** Get a 1D gauss kernel. Since the kernel is symmetrical, only calculates the positive side.
* \param sigma The shape of the gauss distribution.
* \param kernel_size How long the kernel array is.
*/
void ED_ANIM_get_1d_gauss_kernel(const float sigma, int kernel_size, double *r_kernel);
void smooth_fcurve_segment(struct FCurve *fcu,
struct FCurveSegment *segment,
float *samples,
float factor,
int kernel_size,
double *kernel);
void ease_fcurve_segment(struct FCurve *fcu, struct FCurveSegment *segment, float factor);
bool decimate_fcurve(struct bAnimListElem *ale, float remove_ratio, float error_sq_max);
void blend_to_default_fcurve(struct PointerRNA *id_ptr, struct FCurve *fcu, float factor);
@ -433,6 +444,10 @@ void blend_to_default_fcurve(struct PointerRNA *id_ptr, struct FCurve *fcu, floa
*/
void smooth_fcurve(struct FCurve *fcu);
void sample_fcurve(struct FCurve *fcu);
void sample_fcurve_segment(struct FCurve *fcu,
float start_frame,
float *r_samples,
int sample_count);
/* ----------- */

View File

@ -74,7 +74,7 @@ class AbstractView {
* \note This drop target may be requested for each event. The view doesn't keep the drop target
* around currently. So it cannot contain persistent state.
*/
virtual std::unique_ptr<AbstractViewDropTarget> create_drop_target() const;
virtual std::unique_ptr<AbstractViewDropTarget> create_drop_target();
/** Listen to a notifier, returning true if a redraw is needed. */
virtual bool listen(const wmNotifier &) const;
@ -166,7 +166,7 @@ class AbstractViewItem {
* \note This drop target may be requested for each event. The view doesn't keep a drop target
* around currently. So it can not contain persistent state.
*/
virtual std::unique_ptr<AbstractViewItemDropTarget> create_drop_target() const;
virtual std::unique_ptr<AbstractViewItemDropTarget> create_drop_target();
/** Get the view this item is registered for using #AbstractView::register_item(). */
AbstractView &get_view() const;

View File

@ -111,8 +111,8 @@ bool drop_target_apply_drop(bContext &C,
*/
char *drop_target_tooltip(const DropTargetInterface &drop_target, const wmDrag &drag);
std::unique_ptr<DropTargetInterface> view_drop_target(const uiViewHandle *view_handle);
std::unique_ptr<DropTargetInterface> view_item_drop_target(const uiViewItemHandle *item_handle);
std::unique_ptr<DropTargetInterface> view_drop_target(uiViewHandle *view_handle);
std::unique_ptr<DropTargetInterface> view_item_drop_target(uiViewItemHandle *item_handle);
/**
* Try to find a view item with a drop target under the mouse cursor, or if not found, a view
* with a drop target.

View File

@ -2014,7 +2014,11 @@ void UI_block_end_ex(const bContext *C, uiBlock *block, const int xy[2], int r_x
break;
}
ui_block_views_bounds_calc(block);
/* Update bounds of all views in this block. If this block is a panel, this will be done later in
* #UI_panels_end(), because buttons are offset there. */
if (!block->panel) {
ui_block_views_bounds_calc(block);
}
if (block->rect.xmin == 0.0f && block->rect.xmax == 0.0f) {
UI_block_bounds_set_normal(block, 0);

View File

@ -1831,6 +1831,10 @@ void UI_panels_end(const bContext *C, ARegion *region, int *r_x, int *r_y)
LISTBASE_FOREACH (uiBlock *, block, &region->uiblocks) {
if (block->active && block->panel) {
ui_offset_panel_block(block);
/* Update bounds for all "views" in this block. Usually this is done in #UI_block_end(), but
* that wouldn't work because of the offset applied above. */
ui_block_views_bounds_calc(block);
}
}

View File

@ -62,7 +62,7 @@ void AbstractView::update_from_old(uiBlock &new_block)
/** \name Default implementations of virtual functions
* \{ */
std::unique_ptr<AbstractViewDropTarget> AbstractView::create_drop_target() const
std::unique_ptr<AbstractViewDropTarget> AbstractView::create_drop_target()
{
/* There's no drop target (and hence no drop support) by default. */
return nullptr;
@ -121,9 +121,9 @@ std::optional<rcti> AbstractView::get_bounds() const
/** \name General API functions
* \{ */
std::unique_ptr<DropTargetInterface> view_drop_target(const uiViewHandle *view_handle)
std::unique_ptr<DropTargetInterface> view_drop_target(uiViewHandle *view_handle)
{
const AbstractView &view = reinterpret_cast<const AbstractView &>(*view_handle);
AbstractView &view = reinterpret_cast<AbstractView &>(*view_handle);
return view.create_drop_target();
}

View File

@ -174,7 +174,7 @@ std::unique_ptr<AbstractViewItemDragController> AbstractViewItem::create_drag_co
return nullptr;
}
std::unique_ptr<AbstractViewItemDropTarget> AbstractViewItem::create_drop_target() const
std::unique_ptr<AbstractViewItemDropTarget> AbstractViewItem::create_drop_target()
{
/* There's no drop target (and hence no drop support) by default. */
return nullptr;
@ -221,9 +221,9 @@ bool AbstractViewItem::is_active() const
/** \name General API functions
* \{ */
std::unique_ptr<DropTargetInterface> view_item_drop_target(const uiViewItemHandle *item_handle)
std::unique_ptr<DropTargetInterface> view_item_drop_target(uiViewItemHandle *item_handle)
{
const AbstractViewItem &item = reinterpret_cast<const AbstractViewItem &>(*item_handle);
AbstractViewItem &item = reinterpret_cast<AbstractViewItem &>(*item_handle);
return item.create_drop_target();
}

View File

@ -194,7 +194,7 @@ namespace blender::ui {
std::unique_ptr<DropTargetInterface> region_views_find_drop_target_at(const ARegion *region,
const int xy[2])
{
const uiViewItemHandle *hovered_view_item = UI_region_views_find_item_at(region, xy);
uiViewItemHandle *hovered_view_item = UI_region_views_find_item_at(region, xy);
if (hovered_view_item) {
std::unique_ptr<DropTargetInterface> drop_target = view_item_drop_target(hovered_view_item);
if (drop_target) {
@ -204,7 +204,7 @@ std::unique_ptr<DropTargetInterface> region_views_find_drop_target_at(const AReg
/* Get style for some sensible padding around the view items. */
const uiStyle *style = UI_style_get_dpi();
const uiViewHandle *hovered_view = UI_region_view_find_at(region, xy, style->buttonspacex);
uiViewHandle *hovered_view = UI_region_view_find_at(region, xy, style->buttonspacex);
if (hovered_view) {
std::unique_ptr<DropTargetInterface> drop_target = view_drop_target(hovered_view);
if (drop_target) {

View File

@ -512,6 +512,7 @@ bool paintface_mouse_select(bContext *C,
ED_region_tag_redraw(CTX_wm_region(C)); /* XXX: should redraw all 3D views. */
changed = true;
}
select_poly.finish();
return changed || found;
}

View File

@ -344,9 +344,9 @@ static void um_arraystore_compact_ex(UndoMesh *um, const UndoMesh *um_ref, bool
/* Compacting can be time consuming, run in parallel.
*
* NOTE(@ideasman42): this could be further parallelized with every custom-data layer
* running in it's own thread. If this is a bottleneck it's worth considering.
* At the moment it seems fast enough to split by element type.
* Since this is it's self a background thread, using too many threads here could
* running in its own thread. If this is a bottleneck it's worth considering.
* At the moment it seems fast enough to split by domain.
* Since this is itself a background thread, using too many threads here could
* interfere with foreground tasks. */
blender::threading::parallel_invoke(
4096 < (me->totvert + me->totedge + me->totloop + me->totpoly),

View File

@ -34,6 +34,72 @@
#include "console_intern.h"
/* -------------------------------------------------------------------- */
/** \name Utilities
* \{ */
static char *console_select_to_buffer(SpaceConsole *sc)
{
if (sc->sel_start == sc->sel_end) {
return NULL;
}
ConsoleLine cl_dummy = {NULL};
console_scrollback_prompt_begin(sc, &cl_dummy);
int offset = 0;
for (ConsoleLine *cl = sc->scrollback.first; cl; cl = cl->next) {
offset += cl->len + 1;
}
char *buf_str = NULL;
if (offset != 0) {
offset -= 1;
int sel[2] = {offset - sc->sel_end, offset - sc->sel_start};
DynStr *buf_dyn = BLI_dynstr_new();
for (ConsoleLine *cl = sc->scrollback.first; cl; cl = cl->next) {
if (sel[0] <= cl->len && sel[1] >= 0) {
int sta = max_ii(sel[0], 0);
int end = min_ii(sel[1], cl->len);
if (BLI_dynstr_get_len(buf_dyn)) {
BLI_dynstr_append(buf_dyn, "\n");
}
BLI_dynstr_nappend(buf_dyn, cl->line + sta, end - sta);
}
sel[0] -= cl->len + 1;
sel[1] -= cl->len + 1;
}
buf_str = BLI_dynstr_get_cstring(buf_dyn);
BLI_dynstr_free(buf_dyn);
}
console_scrollback_prompt_end(sc, &cl_dummy);
return buf_str;
}
static void console_select_update_primary_clipboard(SpaceConsole *sc)
{
if ((WM_capabilities_flag() & WM_CAPABILITY_PRIMARY_CLIPBOARD) == 0) {
return;
}
if (sc->sel_start == sc->sel_end) {
return;
}
char *buf = console_select_to_buffer(sc);
if (buf == NULL) {
return;
}
WM_clipboard_text_set(buf, true);
MEM_freeN(buf);
}
/** \} */
/* so when we type - the view scrolls to the bottom */
static void console_scroll_bottom(ARegion *region)
{
@ -966,61 +1032,13 @@ void CONSOLE_OT_scrollback_append(wmOperatorType *ot)
static int console_copy_exec(bContext *C, wmOperator *UNUSED(op))
{
SpaceConsole *sc = CTX_wm_space_console(C);
DynStr *buf_dyn;
char *buf_str;
ConsoleLine *cl;
int sel[2];
int offset = 0;
ConsoleLine cl_dummy = {NULL};
if (sc->sel_start == sc->sel_end) {
char *buf = console_select_to_buffer(sc);
if (buf == NULL) {
return OPERATOR_CANCELLED;
}
console_scrollback_prompt_begin(sc, &cl_dummy);
for (cl = sc->scrollback.first; cl; cl = cl->next) {
offset += cl->len + 1;
}
if (offset == 0) {
console_scrollback_prompt_end(sc, &cl_dummy);
return OPERATOR_CANCELLED;
}
buf_dyn = BLI_dynstr_new();
offset -= 1;
sel[0] = offset - sc->sel_end;
sel[1] = offset - sc->sel_start;
for (cl = sc->scrollback.first; cl; cl = cl->next) {
if (sel[0] <= cl->len && sel[1] >= 0) {
int sta = max_ii(sel[0], 0);
int end = min_ii(sel[1], cl->len);
if (BLI_dynstr_get_len(buf_dyn)) {
BLI_dynstr_append(buf_dyn, "\n");
}
BLI_dynstr_nappend(buf_dyn, cl->line + sta, end - sta);
}
sel[0] -= cl->len + 1;
sel[1] -= cl->len + 1;
}
buf_str = BLI_dynstr_get_cstring(buf_dyn);
BLI_dynstr_free(buf_dyn);
WM_clipboard_text_set(buf_str, 0);
MEM_freeN(buf_str);
console_scrollback_prompt_end(sc, &cl_dummy);
WM_clipboard_text_set(buf, 0);
MEM_freeN(buf);
return OPERATOR_FINISHED;
}
@ -1038,14 +1056,15 @@ void CONSOLE_OT_copy(wmOperatorType *ot)
/* properties */
}
static int console_paste_exec(bContext *C, wmOperator *UNUSED(op))
static int console_paste_exec(bContext *C, wmOperator *op)
{
const bool selection = RNA_boolean_get(op->ptr, "selection");
SpaceConsole *sc = CTX_wm_space_console(C);
ARegion *region = CTX_wm_region(C);
ConsoleLine *ci = console_history_verify(C);
int buf_len;
char *buf_str = WM_clipboard_text_get(false, &buf_len);
char *buf_str = WM_clipboard_text_get(selection, &buf_len);
char *buf_step, *buf_next;
if (buf_str == NULL) {
@ -1091,6 +1110,13 @@ void CONSOLE_OT_paste(wmOperatorType *ot)
ot->exec = console_paste_exec;
/* properties */
PropertyRNA *prop;
prop = RNA_def_boolean(ot->srna,
"selection",
0,
"Selection",
"Paste text selected elsewhere rather than copied (X11/Wayland only)");
RNA_def_property_flag(prop, PROP_SKIP_SAVE);
}
typedef struct SetConsoleCursor {
@ -1146,18 +1172,12 @@ static void console_modal_select_apply(bContext *C, wmOperator *op, const wmEven
}
}
static void console_cursor_set_exit(bContext *UNUSED(C), wmOperator *op)
static void console_cursor_set_exit(bContext *C, wmOperator *op)
{
// SpaceConsole *sc = CTX_wm_space_console(C);
SpaceConsole *sc = CTX_wm_space_console(C);
SetConsoleCursor *scu = op->customdata;
#if 0
if (txt_has_sel(text)) {
buffer = txt_sel_to_buf(text);
WM_clipboard_text_set(buffer, 1);
MEM_freeN(buffer);
}
#endif
console_select_update_primary_clipboard(sc);
MEM_freeN(scu);
}
@ -1254,6 +1274,11 @@ static int console_selectword_invoke(bContext *C, wmOperator *UNUSED(op), const
}
console_scrollback_prompt_end(sc, &cl_dummy);
if (ret & OPERATOR_FINISHED) {
console_select_update_primary_clipboard(sc);
}
return ret;
}

View File

@ -90,7 +90,7 @@ class AssetCatalogTreeViewItem : public ui::BasicTreeViewItem {
/** Add drag support for catalog items. */
std::unique_ptr<ui::AbstractViewItemDragController> create_drag_controller() const override;
/** Add dropping support for catalog items. */
std::unique_ptr<ui::AbstractViewItemDropTarget> create_drop_target() const override;
std::unique_ptr<ui::AbstractViewItemDropTarget> create_drop_target() override;
};
class AssetCatalogDragController : public ui::AbstractViewItemDragController {
@ -154,7 +154,7 @@ class AssetCatalogTreeViewAllItem : public ui::BasicTreeViewItem {
bool on_drop(struct bContext *C, const wmDrag &drag) const override;
};
std::unique_ptr<ui::AbstractViewItemDropTarget> create_drop_target() const override;
std::unique_ptr<ui::AbstractViewItemDropTarget> create_drop_target() override;
};
class AssetCatalogTreeViewUnassignedItem : public ui::BasicTreeViewItem {
@ -168,7 +168,7 @@ class AssetCatalogTreeViewUnassignedItem : public ui::BasicTreeViewItem {
bool on_drop(struct bContext *C, const wmDrag &drag) const override;
};
std::unique_ptr<ui::AbstractViewItemDropTarget> create_drop_target() const override;
std::unique_ptr<ui::AbstractViewItemDropTarget> create_drop_target() override;
};
/* ---------------------------------------------------------------------- */
@ -340,7 +340,6 @@ bool AssetCatalogTreeViewItem::rename(StringRefNull new_name)
}
std::unique_ptr<ui::AbstractViewItemDropTarget> AssetCatalogTreeViewItem::create_drop_target()
const
{
return std::make_unique<AssetCatalogDropTarget>(
static_cast<AssetCatalogTreeView &>(get_tree_view()), catalog_item_);
@ -580,7 +579,6 @@ void AssetCatalogTreeViewAllItem::build_row(uiLayout &row)
}
std::unique_ptr<ui::AbstractViewItemDropTarget> AssetCatalogTreeViewAllItem::create_drop_target()
const
{
return std::make_unique<AssetCatalogTreeViewAllItem::DropTarget>(
static_cast<AssetCatalogTreeView &>(get_tree_view()));
@ -635,7 +633,7 @@ bool AssetCatalogTreeViewAllItem::DropTarget::on_drop(struct bContext * /*C*/,
/* ---------------------------------------------------------------------- */
std::unique_ptr<ui::AbstractViewItemDropTarget> AssetCatalogTreeViewUnassignedItem::
create_drop_target() const
create_drop_target()
{
return std::make_unique<AssetCatalogTreeViewUnassignedItem::DropTarget>(
static_cast<AssetCatalogTreeView &>(get_tree_view()));

View File

@ -703,7 +703,7 @@ static void draw_fcurve_curve_samples(bAnimContext *ac,
const uint shdr_pos,
const bool draw_extrapolation)
{
if (!draw_extrapolation) {
if (!draw_extrapolation && fcu->totvert == 1) {
return;
}
@ -816,7 +816,7 @@ static bool fcurve_can_use_simple_bezt_drawing(FCurve *fcu)
static void draw_fcurve_curve_bezts(
bAnimContext *ac, ID *id, FCurve *fcu, View2D *v2d, uint pos, const bool draw_extrapolation)
{
if (!draw_extrapolation) {
if (!draw_extrapolation && fcu->totvert == 1) {
return;
}

View File

@ -116,6 +116,7 @@ void GRAPH_OT_breakdown(struct wmOperatorType *ot);
void GRAPH_OT_ease(struct wmOperatorType *ot);
void GRAPH_OT_decimate(struct wmOperatorType *ot);
void GRAPH_OT_blend_to_default(struct wmOperatorType *ot);
void GRAPH_OT_gaussian_smooth(struct wmOperatorType *ot);
void GRAPH_OT_sample(struct wmOperatorType *ot);
void GRAPH_OT_bake(struct wmOperatorType *ot);
void GRAPH_OT_unbake(struct wmOperatorType *ot);

View File

@ -464,6 +464,7 @@ void graphedit_operatortypes(void)
WM_operatortype_append(GRAPH_OT_breakdown);
WM_operatortype_append(GRAPH_OT_ease);
WM_operatortype_append(GRAPH_OT_blend_to_default);
WM_operatortype_append(GRAPH_OT_gaussian_smooth);
WM_operatortype_append(GRAPH_OT_euler_filter);
WM_operatortype_append(GRAPH_OT_delete);
WM_operatortype_append(GRAPH_OT_duplicate);

View File

@ -69,6 +69,10 @@ typedef struct tGraphSliderOp {
/* Each operator has a specific update function. */
void (*modal_update)(struct bContext *, struct wmOperator *);
/* If an operator stores custom data, it also needs to provide the function to clean it up. */
void *operator_data;
void (*free_operator_data)(void *operator_data);
NumInput num;
} tGraphSliderOp;
@ -191,6 +195,10 @@ static void graph_slider_exit(bContext *C, wmOperator *op)
return;
}
if (gso->free_operator_data != NULL) {
gso->free_operator_data(gso->operator_data);
}
ScrArea *area = gso->area;
LinkData *link;
@ -1053,3 +1061,257 @@ void GRAPH_OT_ease(wmOperatorType *ot)
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Gauss Smooth Operator
* \{ */
/* It is necessary to store data for smoothing when running in modal, because the sampling of
* FCurves shouldn't be done on every update. */
typedef struct tGaussOperatorData {
double *kernel;
ListBase segment_links; /* tFCurveSegmentLink */
ListBase anim_data; /* bAnimListElem */
} tGaussOperatorData;
/* Store data to smooth an FCurve segment. */
typedef struct tFCurveSegmentLink {
struct tFCurveSegmentLink *prev, *next;
FCurve *fcu;
FCurveSegment *segment;
float *samples; /* Array of y-values of the FCurve segment. */
} tFCurveSegmentLink;
static void gaussian_smooth_allocate_operator_data(tGraphSliderOp *gso,
const int filter_width,
const float sigma)
{
tGaussOperatorData *operator_data = MEM_callocN(sizeof(tGaussOperatorData),
"tGaussOperatorData");
const int kernel_size = filter_width + 1;
double *kernel = MEM_callocN(sizeof(double) * kernel_size, "Gauss Kernel");
ED_ANIM_get_1d_gauss_kernel(sigma, kernel_size, kernel);
operator_data->kernel = kernel;
ListBase anim_data = {NULL, NULL};
ANIM_animdata_filter(&gso->ac, &anim_data, OPERATOR_DATA_FILTER, gso->ac.data, gso->ac.datatype);
ListBase segment_links = {NULL, NULL};
LISTBASE_FOREACH (bAnimListElem *, ale, &anim_data) {
FCurve *fcu = (FCurve *)ale->key_data;
ListBase fcu_segments = find_fcurve_segments(fcu);
LISTBASE_FOREACH (FCurveSegment *, segment, &fcu_segments) {
tFCurveSegmentLink *segment_link = MEM_callocN(sizeof(tFCurveSegmentLink),
"FCurve Segment Link");
segment_link->fcu = fcu;
segment_link->segment = segment;
BezTriple left_bezt = fcu->bezt[segment->start_index];
BezTriple right_bezt = fcu->bezt[segment->start_index + segment->length - 1];
const int sample_count = (int)(right_bezt.vec[1][0] - left_bezt.vec[1][0]) +
(filter_width * 2 + 1);
float *samples = MEM_callocN(sizeof(float) * sample_count, "Smooth FCurve Op Samples");
sample_fcurve_segment(fcu, left_bezt.vec[1][0] - filter_width, samples, sample_count);
segment_link->samples = samples;
BLI_addtail(&segment_links, segment_link);
}
}
operator_data->anim_data = anim_data;
operator_data->segment_links = segment_links;
gso->operator_data = operator_data;
}
static void gaussian_smooth_free_operator_data(void *operator_data)
{
tGaussOperatorData *gauss_data = (tGaussOperatorData *)operator_data;
LISTBASE_FOREACH (tFCurveSegmentLink *, segment_link, &gauss_data->segment_links) {
MEM_freeN(segment_link->samples);
MEM_freeN(segment_link->segment);
}
MEM_freeN(gauss_data->kernel);
BLI_freelistN(&gauss_data->segment_links);
ANIM_animdata_freelist(&gauss_data->anim_data);
MEM_freeN(gauss_data);
}
static void gaussian_smooth_draw_status_header(bContext *C, tGraphSliderOp *gso)
{
char status_str[UI_MAX_DRAW_STR];
char slider_string[UI_MAX_DRAW_STR];
ED_slider_status_string_get(gso->slider, slider_string, UI_MAX_DRAW_STR);
const char *mode_str = TIP_("Gaussian Smooth");
if (hasNumInput(&gso->num)) {
char str_ofs[NUM_STR_REP_LEN];
outputNumInput(&gso->num, str_ofs, &gso->scene->unit);
BLI_snprintf(status_str, sizeof(status_str), "%s: %s", mode_str, str_ofs);
}
else {
BLI_snprintf(status_str, sizeof(status_str), "%s: %s", mode_str, slider_string);
}
ED_workspace_status_text(C, status_str);
}
static void gaussian_smooth_modal_update(bContext *C, wmOperator *op)
{
tGraphSliderOp *gso = op->customdata;
bAnimContext ac;
if (ANIM_animdata_get_context(C, &ac) == 0) {
return;
}
gaussian_smooth_draw_status_header(C, gso);
const float factor = slider_factor_get_and_remember(op);
tGaussOperatorData *operator_data = (tGaussOperatorData *)gso->operator_data;
const int filter_width = RNA_int_get(op->ptr, "filter_width");
LISTBASE_FOREACH (tFCurveSegmentLink *, segment, &operator_data->segment_links) {
smooth_fcurve_segment(segment->fcu,
segment->segment,
segment->samples,
factor,
filter_width,
operator_data->kernel);
}
LISTBASE_FOREACH (bAnimListElem *, ale, &operator_data->anim_data) {
ale->update |= ANIM_UPDATE_DEFAULT;
}
ANIM_animdata_update(&ac, &operator_data->anim_data);
WM_event_add_notifier(C, NC_ANIMATION | ND_KEYFRAME | NA_EDITED, NULL);
}
static int gaussian_smooth_invoke(bContext *C, wmOperator *op, const wmEvent *event)
{
const int invoke_result = graph_slider_invoke(C, op, event);
if (invoke_result == OPERATOR_CANCELLED) {
return invoke_result;
}
tGraphSliderOp *gso = op->customdata;
gso->modal_update = gaussian_smooth_modal_update;
gso->factor_prop = RNA_struct_find_property(op->ptr, "factor");
const float sigma = RNA_float_get(op->ptr, "sigma");
const int filter_width = RNA_int_get(op->ptr, "filter_width");
gaussian_smooth_allocate_operator_data(gso, filter_width, sigma);
gso->free_operator_data = gaussian_smooth_free_operator_data;
ED_slider_allow_overshoot_set(gso->slider, false);
ED_slider_factor_set(gso->slider, 0.0f);
gaussian_smooth_draw_status_header(C, gso);
return invoke_result;
}
static void gaussian_smooth_graph_keys(bAnimContext *ac,
const float factor,
double *kernel,
const int filter_width)
{
ListBase anim_data = {NULL, NULL};
ANIM_animdata_filter(ac, &anim_data, OPERATOR_DATA_FILTER, ac->data, ac->datatype);
LISTBASE_FOREACH (bAnimListElem *, ale, &anim_data) {
FCurve *fcu = (FCurve *)ale->key_data;
ListBase segments = find_fcurve_segments(fcu);
LISTBASE_FOREACH (FCurveSegment *, segment, &segments) {
BezTriple left_bezt = fcu->bezt[segment->start_index];
BezTriple right_bezt = fcu->bezt[segment->start_index + segment->length - 1];
const int sample_count = (int)(right_bezt.vec[1][0] - left_bezt.vec[1][0]) +
(filter_width * 2 + 1);
float *samples = MEM_callocN(sizeof(float) * sample_count, "Smooth FCurve Op Samples");
sample_fcurve_segment(fcu, left_bezt.vec[1][0] - filter_width, samples, sample_count);
smooth_fcurve_segment(fcu, segment, samples, factor, filter_width, kernel);
MEM_freeN(samples);
}
BLI_freelistN(&segments);
ale->update |= ANIM_UPDATE_DEFAULT;
}
ANIM_animdata_update(ac, &anim_data);
ANIM_animdata_freelist(&anim_data);
}
static int gaussian_smooth_exec(bContext *C, wmOperator *op)
{
bAnimContext ac;
if (ANIM_animdata_get_context(C, &ac) == 0) {
return OPERATOR_CANCELLED;
}
const float factor = RNA_float_get(op->ptr, "factor");
const int filter_width = RNA_int_get(op->ptr, "filter_width");
const int kernel_size = filter_width + 1;
double *kernel = MEM_callocN(sizeof(double) * kernel_size, "Gauss Kernel");
ED_ANIM_get_1d_gauss_kernel(RNA_float_get(op->ptr, "sigma"), kernel_size, kernel);
gaussian_smooth_graph_keys(&ac, factor, kernel, filter_width);
MEM_freeN(kernel);
/* Set notifier that keyframes have changed. */
WM_event_add_notifier(C, NC_ANIMATION | ND_KEYFRAME | NA_EDITED, NULL);
return OPERATOR_FINISHED;
}
void GRAPH_OT_gaussian_smooth(wmOperatorType *ot)
{
/* Identifiers. */
ot->name = "Gaussian Smooth";
ot->idname = "GRAPH_OT_gaussian_smooth";
ot->description = "Smooth the curve using a Gaussian filter";
/* API callbacks. */
ot->invoke = gaussian_smooth_invoke;
ot->modal = graph_slider_modal;
ot->exec = gaussian_smooth_exec;
ot->poll = graphop_editable_keyframes_poll;
/* Flags. */
ot->flag = OPTYPE_REGISTER | OPTYPE_UNDO;
RNA_def_float_factor(ot->srna,
"factor",
1.0f,
0.0f,
FLT_MAX,
"Factor",
"How much to blend to the default value",
0.0f,
1.0f);
RNA_def_float(ot->srna,
"sigma",
0.33f,
0.001f,
FLT_MAX,
"Sigma",
"The shape of the gaussian distribution, lower values make it sharper",
0.001f,
100.0f);
RNA_def_int(ot->srna,
"filter_width",
6,
1,
64,
"Filter Width",
"How far to each side the operator will average the key values",
1,
32);
}
/** \} */

View File

@ -263,8 +263,11 @@ static int node_clipboard_paste_exec(bContext *C, wmOperator *op)
const float2 offset = (mouse_location - center) / UI_SCALE_FAC;
for (bNode *new_node : node_map.values()) {
new_node->locx += offset.x;
new_node->locy += offset.y;
/* Skip the offset for parented nodes since the location is in parent space. */
if (new_node->parent == nullptr) {
new_node->locx += offset.x;
new_node->locy += offset.y;
}
}
}

View File

@ -159,6 +159,22 @@ BLI_INLINE int text_pixel_x_to_column(SpaceText *st, const int x)
return (x + (st->runtime.cwidth_px / 2)) / st->runtime.cwidth_px;
}
static void text_select_update_primary_clipboard(const Text *text)
{
if ((WM_capabilities_flag() & WM_CAPABILITY_PRIMARY_CLIPBOARD) == 0) {
return;
}
if (!txt_has_sel(text)) {
return;
}
char *buf = txt_sel_to_buf(text, NULL);
if (buf == NULL) {
return;
}
WM_clipboard_text_set(buf, true);
MEM_freeN(buf);
}
/** \} */
/* -------------------------------------------------------------------- */
@ -954,11 +970,13 @@ void TEXT_OT_paste(wmOperatorType *ot)
ot->flag = OPTYPE_UNDO;
/* properties */
RNA_def_boolean(ot->srna,
"selection",
0,
"Selection",
"Paste text selected elsewhere rather than copied (X11 only)");
PropertyRNA *prop;
prop = RNA_def_boolean(ot->srna,
"selection",
0,
"Selection",
"Paste text selected elsewhere rather than copied (X11/Wayland only)");
RNA_def_property_flag(prop, PROP_SKIP_SAVE);
}
/** \} */
@ -1499,6 +1517,8 @@ static int text_select_all_exec(bContext *C, wmOperator *UNUSED(op))
txt_sel_all(text);
text_update_cursor_moved(C);
text_select_update_primary_clipboard(text);
WM_event_add_notifier(C, NC_TEXT | NA_EDITED, text);
return OPERATOR_FINISHED;
@ -1529,6 +1549,8 @@ static int text_select_line_exec(bContext *C, wmOperator *UNUSED(op))
txt_sel_line(text);
text_update_cursor_moved(C);
text_select_update_primary_clipboard(text);
WM_event_add_notifier(C, NC_TEXT | NA_EDITED, text);
return OPERATOR_FINISHED;
@ -1562,6 +1584,8 @@ static int text_select_word_exec(bContext *C, wmOperator *UNUSED(op))
txt_jump_right(text, true, use_init_step);
text_update_cursor_moved(C);
text_select_update_primary_clipboard(text);
WM_event_add_notifier(C, NC_TEXT | NA_EDITED, text);
return OPERATOR_FINISHED;
@ -2246,6 +2270,10 @@ static int text_move_cursor(bContext *C, int type, bool select)
}
text_update_cursor_moved(C);
if (select) {
text_select_update_primary_clipboard(st->text);
}
WM_event_add_notifier(C, NC_TEXT | ND_CURSOR, text);
return OPERATOR_FINISHED;
@ -3243,17 +3271,11 @@ static void text_cursor_set_apply(bContext *C, wmOperator *op, const wmEvent *ev
static void text_cursor_set_exit(bContext *C, wmOperator *op)
{
SpaceText *st = CTX_wm_space_text(C);
Text *text = st->text;
SetSelection *ssel = op->customdata;
char *buffer;
if (txt_has_sel(text)) {
buffer = txt_sel_to_buf(text, NULL);
WM_clipboard_text_set(buffer, 1);
MEM_freeN(buffer);
}
text_update_cursor_moved(C);
text_select_update_primary_clipboard(st->text);
WM_event_add_notifier(C, NC_TEXT | ND_CURSOR, st->text);
text_cursor_timer_remove(C, ssel);

View File

@ -52,7 +52,7 @@
* would be used for this purpose. The problem with using poll is once the gizmo is visible again
* is there is a visible flicker showing the previous location before cursor motion causes the
* pre selection to be updated. While this is only a glitch, it's distracting.
* The gizmo system it's self could support this use case by tracking which gizmos draw and ensure
* The gizmo system itself could support this use case by tracking which gizmos draw and ensure
* gizmos always run #wmGizmoType.test_select before drawing, however pre-selection is already
* outside the scope of what gizmos are meant to be used for, so keep this workaround localized
* to this gizmo type unless this seems worth supporting for more typical use-cases.

View File

@ -419,16 +419,6 @@ static int transform_modal(bContext *C, wmOperator *op, const wmEvent *event)
exit_code = transformEvent(t, event);
t->context = NULL;
/* XXX, workaround: active needs to be calculated before transforming,
* since we're not reading from 'td->center' in this case. see: #40241 */
if (t->tsnap.source_operation == SCE_SNAP_SOURCE_ACTIVE) {
/* In camera view, tsnap callback is not set
* (see #initSnappingMode() in transform_snap.c, and #40348). */
if (t->tsnap.snap_source_fn && ((t->tsnap.status & SNAP_SOURCE_FOUND) == 0)) {
t->tsnap.snap_source_fn(t);
}
}
transformApply(C, t);
exit_code |= transformEnd(C, t);

View File

@ -947,6 +947,11 @@ static void setSnappingCallback(TransInfo *t)
break;
case SCE_SNAP_SOURCE_ACTIVE:
t->tsnap.snap_source_fn = snap_source_active_fn;
/* XXX, workaround: active needs to be calculated before transforming, otherwise
* `t->tsnap.snap_source` will be calculated with the transformed data since we're not
* reading from 'td->center' in this case. (See: #40241 and #40348). */
snap_source_active_fn(t);
break;
}
}
@ -1230,13 +1235,18 @@ static void snap_source_active_fn(TransInfo *t)
{
/* Only need to calculate once */
if ((t->tsnap.status & SNAP_SOURCE_FOUND) == 0) {
if (calculateCenterActive(t, true, t->tsnap.snap_source)) {
if (t->around == V3D_AROUND_ACTIVE) {
/* Just copy the already calculated active center. */
copy_v3_v3(t->tsnap.snap_source, t->center_global);
TargetSnapOffset(t, nullptr);
t->tsnap.status |= SNAP_SOURCE_FOUND;
}
else if (calculateCenterActive(t, true, t->tsnap.snap_source)) {
TargetSnapOffset(t, nullptr);
t->tsnap.status |= SNAP_SOURCE_FOUND;
}
/* No active, default to median */
else {
/* No active, default to median, */
t->tsnap.source_operation = SCE_SNAP_SOURCE_MEDIAN;
t->tsnap.snap_source_fn = snap_source_median_fn;
snap_source_median_fn(t);

View File

@ -374,7 +374,7 @@ typedef struct LineartData {
bool do_shadow_cast;
bool light_reference_available;
/* Keep an copy of these data so when line art is running it's self-contained. */
/* Keep an copy of these data so when line art is running itself contained. */
bool cam_is_persp;
/* "Secondary" ones are from viewing camera
* (as opposed to shadow camera), during shadow calculation. */

View File

@ -52,6 +52,7 @@ endif()
set(INC_SYS
${Epoxy_INCLUDE_DIRS}
${IMATH_INCLUDE_DIR}
)
set(SRC
@ -203,6 +204,7 @@ set(VULKAN_SRC
vulkan/vk_command_buffer.cc
vulkan/vk_common.cc
vulkan/vk_context.cc
vulkan/vk_data_conversion.cc
vulkan/vk_debug.cc
vulkan/vk_descriptor_pools.cc
vulkan/vk_descriptor_set.cc
@ -216,6 +218,7 @@ set(VULKAN_SRC
vulkan/vk_pixel_buffer.cc
vulkan/vk_push_constants.cc
vulkan/vk_query.cc
vulkan/vk_resource_tracker.cc
vulkan/vk_shader.cc
vulkan/vk_shader_interface.cc
vulkan/vk_shader_log.cc
@ -231,6 +234,7 @@ set(VULKAN_SRC
vulkan/vk_command_buffer.hh
vulkan/vk_common.hh
vulkan/vk_context.hh
vulkan/vk_data_conversion.hh
vulkan/vk_descriptor_pools.hh
vulkan/vk_descriptor_set.hh
vulkan/vk_drawlist.hh
@ -243,6 +247,7 @@ set(VULKAN_SRC
vulkan/vk_pixel_buffer.hh
vulkan/vk_push_constants.hh
vulkan/vk_query.hh
vulkan/vk_resource_tracker.hh
vulkan/vk_shader.hh
vulkan/vk_shader_interface.hh
vulkan/vk_shader_log.hh

View File

@ -213,7 +213,8 @@ void Texture::detach_from(FrameBuffer *fb)
void Texture::update(eGPUDataFormat format, const void *data)
{
int mip = 0;
int extent[3], offset[3] = {0, 0, 0};
int extent[3] = {1, 1, 1};
int offset[3] = {0, 0, 0};
this->mip_size_get(mip, extent);
this->update_sub(mip, offset, extent, format, data);
}

View File

@ -759,7 +759,8 @@ inline size_t to_bytesize(eGPUTextureFormat tex_format, eGPUDataFormat data_form
}
/* Definitely not complete, edit according to the gl specification. */
inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat data_format)
constexpr inline bool validate_data_format(eGPUTextureFormat tex_format,
eGPUDataFormat data_format)
{
switch (tex_format) {
/* Formats texture & render-buffer */

View File

@ -78,6 +78,11 @@ struct Shader {
GPUShader *shader = nullptr;
Vector<CallData> call_datas;
Shader()
{
call_datas.reserve(10);
}
~Shader()
{
if (shader != nullptr) {
@ -117,7 +122,9 @@ struct Shader {
void dispatch()
{
GPU_compute_dispatch(shader, 1, 1, 1);
/* Dispatching 1000000 times to add some stress to the GPU. Without it tests may succeed when
* using too simple shaders. */
GPU_compute_dispatch(shader, 1000, 1000, 1);
}
};
@ -177,11 +184,7 @@ static void test_push_constants_512bytes()
}
GPU_TEST(push_constants_512bytes)
#if 0
/* Schedule multiple simultaneously. */
/* These test have been disabled for now as this will to be solved in a separate PR.
* - `DescriptorSets` may not be altered, when they are in the command queue or being executed.
*/
static void test_push_constants_multiple()
{
do_push_constants_test("gpu_push_constants_test", 10);
@ -205,6 +208,5 @@ static void test_push_constants_multiple_512bytes()
do_push_constants_test("gpu_push_constants_512bytes_test", 10);
}
GPU_TEST(push_constants_multiple_512bytes)
#endif
} // namespace blender::gpu::tests

View File

@ -5,10 +5,23 @@
#include "MEM_guardedalloc.h"
#include "BLI_math_vector.hh"
#include "BLI_vector.hh"
#include "GPU_context.h"
#include "GPU_texture.h"
#include "gpu_texture_private.hh"
/* Not all texture types are supported by all platforms. This define safe guards them until we have
* a working workaround or decided to remove support for those texture types. */
#define RUN_UNSUPPORTED false
/* Skip tests that haven't been developed yet due to non standard data types or it needs an
* framebuffer to create the texture. */
#define RUN_SRGB_UNIMPLEMENTED false
#define RUN_NON_STANDARD_UNIMPLEMENTED false
#define RUN_COMPONENT_UNIMPLEMENTED false
namespace blender::gpu::tests {
static void test_texture_read()
@ -48,4 +61,709 @@ static void test_texture_read()
}
GPU_TEST(texture_read)
template<typename DataType> static DataType *generate_test_data(size_t data_len)
{
DataType *data = static_cast<DataType *>(MEM_mallocN(data_len * sizeof(DataType), __func__));
for (int i : IndexRange(data_len)) {
if (std::is_same<DataType, float>()) {
data[i] = (DataType)(i % 8) / 8.0f;
}
else {
data[i] = (DataType)(i % 8);
}
}
return data;
}
template<eGPUTextureFormat DeviceFormat,
eGPUDataFormat HostFormat,
typename DataType,
int Size = 16>
static void texture_create_upload_read()
{
static_assert(!std::is_same<DataType, float>());
static_assert(validate_data_format(DeviceFormat, HostFormat));
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture = GPU_texture_create_2d(
"texture", Size, Size, 1, DeviceFormat, usage, nullptr);
if (texture == nullptr) {
GTEST_SKIP() << "Platform doesn't support texture format [" << STRINGIFY(DeviceFormat) << "]";
}
size_t data_len = Size * Size * to_component_len(DeviceFormat);
DataType *data = static_cast<DataType *>(generate_test_data<DataType>(data_len));
GPU_texture_update(texture, HostFormat, data);
DataType *read_data = static_cast<DataType *>(GPU_texture_read(texture, HostFormat, 0));
bool failed = false;
for (int i : IndexRange(data_len)) {
bool ok = (read_data[i] - data[i]) == 0;
failed |= !ok;
}
EXPECT_FALSE(failed);
MEM_freeN(read_data);
MEM_freeN(data);
GPU_texture_free(texture);
}
template<eGPUTextureFormat DeviceFormat, eGPUDataFormat HostFormat, int Size = 16>
static void texture_create_upload_read_with_bias(float max_allowed_bias)
{
static_assert(validate_data_format(DeviceFormat, HostFormat));
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture = GPU_texture_create_2d(
"texture", Size, Size, 1, DeviceFormat, usage, nullptr);
if (texture == nullptr) {
GTEST_SKIP() << "Platform doesn't support texture format [" << STRINGIFY(DeviceFormat) << "]";
}
size_t data_len = Size * Size * to_component_len(DeviceFormat);
float *data = static_cast<float *>(generate_test_data<float>(data_len));
GPU_texture_update(texture, HostFormat, data);
float *read_data = static_cast<float *>(GPU_texture_read(texture, HostFormat, 0));
float max_used_bias = 0.0f;
for (int i : IndexRange(data_len)) {
float bias = abs(read_data[i] - data[i]);
max_used_bias = max_ff(max_used_bias, bias);
}
EXPECT_LE(max_used_bias, max_allowed_bias);
MEM_freeN(read_data);
MEM_freeN(data);
GPU_texture_free(texture);
}
/* Derivative of texture_create_upload_read_pixels that doesn't test each component, but a pixel at
* a time. This is needed to check the R11G11B10 and similar types. */
template<eGPUTextureFormat DeviceFormat, eGPUDataFormat HostFormat, int Size = 16>
static void texture_create_upload_read_pixel()
{
using DataType = uint32_t;
static_assert(validate_data_format(DeviceFormat, HostFormat));
eGPUTextureUsage usage = GPU_TEXTURE_USAGE_ATTACHMENT | GPU_TEXTURE_USAGE_HOST_READ;
GPUTexture *texture = GPU_texture_create_2d(
"texture", Size, Size, 1, DeviceFormat, usage, nullptr);
ASSERT_NE(texture, nullptr);
size_t data_len = Size * Size;
DataType *data = static_cast<DataType *>(generate_test_data<DataType>(data_len));
GPU_texture_update(texture, HostFormat, data);
DataType *read_data = static_cast<DataType *>(GPU_texture_read(texture, HostFormat, 0));
bool failed = false;
for (int i : IndexRange(data_len)) {
bool ok = (read_data[i] - data[i]) == 0;
failed |= !ok;
}
EXPECT_FALSE(failed);
MEM_freeN(read_data);
MEM_freeN(data);
GPU_texture_free(texture);
}
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_FLOAT
* \{ */
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8()
{
texture_create_upload_read_with_bias<GPU_RGBA8, GPU_DATA_FLOAT>(0.004f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16F()
{
texture_create_upload_read_with_bias<GPU_RGBA16F, GPU_DATA_FLOAT>(0.9f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16()
{
texture_create_upload_read_with_bias<GPU_RGBA16, GPU_DATA_FLOAT>(0.00002f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA32F()
{
texture_create_upload_read_with_bias<GPU_RGBA32F, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA32F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG8()
{
texture_create_upload_read_with_bias<GPU_RG8, GPU_DATA_FLOAT>(0.004f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG8);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16F()
{
texture_create_upload_read_with_bias<GPU_RG16F, GPU_DATA_FLOAT>(0.9f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16()
{
texture_create_upload_read_with_bias<GPU_RG16, GPU_DATA_FLOAT>(0.00002f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG32F()
{
texture_create_upload_read_with_bias<GPU_RG32F, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG32F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R8()
{
texture_create_upload_read_with_bias<GPU_R8, GPU_DATA_FLOAT>(0.004f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R8);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R16F()
{
texture_create_upload_read_with_bias<GPU_R16F, GPU_DATA_FLOAT>(0.9f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R16F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R16()
{
texture_create_upload_read_with_bias<GPU_R16, GPU_DATA_FLOAT>(0.00002f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R16);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R32F()
{
texture_create_upload_read_with_bias<GPU_R32F, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R32F);
#if RUN_NON_STANDARD_UNIMPLEMENTED
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB10_A2()
{
texture_create_upload_read_with_bias<GPU_RGB10_A2, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB10_A2);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB10_A2UI()
{
texture_create_upload_read_with_bias<GPU_RGB10_A2UI, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB10_A2UI);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R11F_G11F_B10F()
{
texture_create_upload_read_with_bias<GPU_R11F_G11F_B10F, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R11F_G11F_B10F);
#endif
#if RUN_SRGB_UNIMPLEMENTED
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8()
{
texture_create_upload_read_with_bias<GPU_SRGB8_A8, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8);
#endif
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_SNORM()
{
texture_create_upload_read_with_bias<GPU_RGBA8_SNORM, GPU_DATA_FLOAT>(0.004f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16_SNORM()
{
texture_create_upload_read_with_bias<GPU_RGBA16_SNORM, GPU_DATA_FLOAT>(0.00002f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA16_SNORM);
#if RUN_UNSUPPORTED
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB8()
{
texture_create_upload_read_with_bias<GPU_RGB8, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB8);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB8_SNORM()
{
texture_create_upload_read_with_bias<GPU_RGB8_SNORM, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB8_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16F()
{
texture_create_upload_read_with_bias<GPU_RGB16F, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16F);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16()
{
texture_create_upload_read_with_bias<GPU_RGB16, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16_SNORM()
{
texture_create_upload_read_with_bias<GPU_RGB16_SNORM, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB16_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB32F()
{
texture_create_upload_read_with_bias<GPU_RGB32F, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB32F);
#endif
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG8_SNORM()
{
texture_create_upload_read_with_bias<GPU_RG8_SNORM, GPU_DATA_FLOAT>(0.004f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG8_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16_SNORM()
{
texture_create_upload_read_with_bias<GPU_RG16_SNORM, GPU_DATA_FLOAT>(0.00002f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RG16_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R8_SNORM()
{
texture_create_upload_read_with_bias<GPU_R8_SNORM, GPU_DATA_FLOAT>(0.004f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R8_SNORM);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_R16_SNORM()
{
texture_create_upload_read_with_bias<GPU_R16_SNORM, GPU_DATA_FLOAT>(0.00002f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_R16_SNORM);
#if RUN_NON_STANDARD_UNIMPLEMENTED
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT1()
{
texture_create_upload_read_with_bias<GPU_SRGB8_A8_DXT1, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT1);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT3()
{
texture_create_upload_read_with_bias<GPU_SRGB8_A8_DXT3, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT3);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT5()
{
texture_create_upload_read_with_bias<GPU_SRGB8_A8_DXT5, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8_A8_DXT5);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT1()
{
texture_create_upload_read_with_bias<GPU_RGBA8_DXT1, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT1);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT3()
{
texture_create_upload_read_with_bias<GPU_RGBA8_DXT3, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT3);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT5()
{
texture_create_upload_read_with_bias<GPU_RGBA8_DXT5, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGBA8_DXT5);
#endif
#if RUN_SRGB_UNIMPLEMENTED
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8()
{
texture_create_upload_read_with_bias<GPU_SRGB8, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_SRGB8);
#endif
#if RUN_NON_STANDARD_UNIMPLEMENTED
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB9_E5()
{
texture_create_upload_read_with_bias<GPU_RGB9_E5, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_RGB9_E5);
#endif
#if RUN_UNSUPPORTED
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT32F()
{
texture_create_upload_read_with_bias<GPU_DEPTH_COMPONENT32F, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT32F);
#endif
#if RUN_COMPONENT_UNIMPLEMENTED
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT24()
{
texture_create_upload_read_with_bias<GPU_DEPTH_COMPONENT24, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT24);
static void test_texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT16()
{
texture_create_upload_read_with_bias<GPU_DEPTH_COMPONENT16, GPU_DATA_FLOAT>(0.0f);
}
GPU_TEST(texture_roundtrip__GPU_DATA_FLOAT__GPU_DEPTH_COMPONENT16);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_HALF_FLOAT
* \{ */
static void test_texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RGBA16F()
{
texture_create_upload_read<GPU_RGBA16F, GPU_DATA_HALF_FLOAT, uint16_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RGBA16F);
static void test_texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RG16F()
{
texture_create_upload_read<GPU_RG16F, GPU_DATA_HALF_FLOAT, uint16_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RG16F);
static void test_texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_R16F()
{
texture_create_upload_read<GPU_R16F, GPU_DATA_HALF_FLOAT, uint16_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_R16F);
#if RUN_UNSUPPORTED
static void test_texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RGB16F()
{
texture_create_upload_read<GPU_RGB16F, GPU_DATA_HALF_FLOAT, uint16_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_HALF_FLOAT__GPU_RGB16F);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_INT
* \{ */
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGBA8I()
{
texture_create_upload_read<GPU_RGBA8I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGBA8I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGBA16I()
{
texture_create_upload_read<GPU_RGBA16I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGBA16I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGBA32I()
{
texture_create_upload_read<GPU_RGBA32I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGBA32I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RG8I()
{
texture_create_upload_read<GPU_RG8I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RG8I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RG16I()
{
texture_create_upload_read<GPU_RG16I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RG16I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RG32I()
{
texture_create_upload_read<GPU_RG32I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RG32I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_R8I()
{
texture_create_upload_read<GPU_R8I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_R8I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_R16I()
{
texture_create_upload_read<GPU_R16I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_R16I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_R32I()
{
texture_create_upload_read<GPU_R32I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_R32I);
#if RUN_UNSUPPORTED
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGB8I()
{
texture_create_upload_read<GPU_RGB8I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGB8I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGB16I()
{
texture_create_upload_read<GPU_RGB16I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGB16I);
static void test_texture_roundtrip__GPU_DATA_INT__GPU_RGB32I()
{
texture_create_upload_read<GPU_RGB32I, GPU_DATA_INT, int32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_INT__GPU_RGB32I);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_UINT
* \{ */
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGBA8UI()
{
texture_create_upload_read<GPU_RGBA8UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGBA8UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGBA16UI()
{
texture_create_upload_read<GPU_RGBA16UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGBA16UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGBA32UI()
{
texture_create_upload_read<GPU_RGBA32UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGBA32UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RG8UI()
{
texture_create_upload_read<GPU_RG8UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RG8UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RG16UI()
{
texture_create_upload_read<GPU_RG16UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RG16UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RG32UI()
{
texture_create_upload_read<GPU_RG32UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RG32UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_R8UI()
{
texture_create_upload_read<GPU_R8UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_R8UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_R16UI()
{
texture_create_upload_read<GPU_R16UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_R16UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_R32UI()
{
texture_create_upload_read<GPU_R32UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_R32UI);
#if RUN_UNSUPPORTED
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH32F_STENCIL8()
{
texture_create_upload_read<GPU_DEPTH32F_STENCIL8, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH32F_STENCIL8);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH24_STENCIL8()
{
texture_create_upload_read<GPU_DEPTH24_STENCIL8, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH24_STENCIL8);
#endif
#if RUN_UNSUPPORTED
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGB8UI()
{
texture_create_upload_read<GPU_RGB8UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGB8UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGB16UI()
{
texture_create_upload_read<GPU_RGB16UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGB16UI);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_RGB32UI()
{
texture_create_upload_read<GPU_RGB32UI, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_RGB32UI);
#endif
#if RUN_COMPONENT_UNIMPLEMENTED
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT32F()
{
texture_create_upload_read<GPU_DEPTH_COMPONENT32F, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT32F);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT24()
{
texture_create_upload_read<GPU_DEPTH_COMPONENT24, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT24);
static void test_texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT16()
{
texture_create_upload_read<GPU_DEPTH_COMPONENT16, GPU_DATA_UINT, uint32_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT__GPU_DEPTH_COMPONENT16);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_UBYTE
* \{ */
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RGBA8UI()
{
texture_create_upload_read<GPU_RGBA8UI, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RGBA8UI);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RGBA8()
{
texture_create_upload_read<GPU_RGBA8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RGBA8);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RG8UI()
{
texture_create_upload_read<GPU_RG8UI, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RG8UI);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RG8()
{
texture_create_upload_read<GPU_RG8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RG8);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_R8UI()
{
texture_create_upload_read<GPU_R8UI, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_R8UI);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_R8()
{
texture_create_upload_read<GPU_R8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_R8);
#if RUN_SRGB_UNIMPLEMENTED
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_SRGB8_A8()
{
texture_create_upload_read<GPU_SRGB8_A8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_SRGB8_A8);
#endif
#if RUN_UNSUPPORTED
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RGB8I()
{
texture_create_upload_read<GPU_RGB8I, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RGB8I);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_RGB8()
{
texture_create_upload_read<GPU_RGB8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_RGB8);
static void test_texture_roundtrip__GPU_DATA_UBYTE__GPU_SRGB8()
{
texture_create_upload_read<GPU_SRGB8, GPU_DATA_UBYTE, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UBYTE__GPU_SRGB8);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_UINT_24_8
* \{ */
#if RUN_UNSUPPORTED
static void test_texture_roundtrip__GPU_DATA_UINT_24_8__GPU_DEPTH32F_STENCIL8()
{
texture_create_upload_read<GPU_DEPTH32F_STENCIL8, GPU_DATA_UINT_24_8, void>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT_24_8__GPU_DEPTH32F_STENCIL8);
static void test_texture_roundtrip__GPU_DATA_UINT_24_8__GPU_DEPTH24_STENCIL8()
{
texture_create_upload_read<GPU_DEPTH24_STENCIL8, GPU_DATA_UINT_24_8, void>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_UINT_24_8__GPU_DEPTH24_STENCIL8);
#endif
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_10_11_11_REV
* \{ */
static void test_texture_roundtrip__GPU_DATA_10_11_11_REV__GPU_R11F_G11F_B10F()
{
texture_create_upload_read<GPU_R11F_G11F_B10F, GPU_DATA_10_11_11_REV, uint8_t>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_10_11_11_REV__GPU_R11F_G11F_B10F);
/* \} */
/* -------------------------------------------------------------------- */
/** \name Roundtrip testing GPU_DATA_2_10_10_10_REV
* \{ */
static void test_texture_roundtrip__GPU_DATA_2_10_10_10_REV__GPU_RGB10_A2()
{
texture_create_upload_read_pixel<GPU_RGB10_A2, GPU_DATA_2_10_10_10_REV>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_2_10_10_10_REV__GPU_RGB10_A2);
static void test_texture_roundtrip__GPU_DATA_2_10_10_10_REV__GPU_RGB10_A2UI()
{
texture_create_upload_read_pixel<GPU_RGB10_A2UI, GPU_DATA_2_10_10_10_REV>();
}
GPU_TEST(texture_roundtrip__GPU_DATA_2_10_10_10_REV__GPU_RGB10_A2UI);
/* \} */
} // namespace blender::gpu::tests

View File

@ -66,13 +66,14 @@ void VKBackend::compute_dispatch(int groups_x_len, int groups_y_len, int groups_
VKShader *shader = static_cast<VKShader *>(context.shader);
VKCommandBuffer &command_buffer = context.command_buffer_get();
VKPipeline &pipeline = shader->pipeline_get();
VKDescriptorSet &descriptor_set = pipeline.descriptor_set_get();
VKDescriptorSetTracker &descriptor_set = pipeline.descriptor_set_get();
VKPushConstants &push_constants = pipeline.push_constants_get();
push_constants.update(context);
descriptor_set.update(context.device_get());
command_buffer.bind(
descriptor_set, shader->vk_pipeline_layout_get(), VK_PIPELINE_BIND_POINT_COMPUTE);
descriptor_set.update(context);
command_buffer.bind(*descriptor_set.active_descriptor_set(),
shader->vk_pipeline_layout_get(),
VK_PIPELINE_BIND_POINT_COMPUTE);
command_buffer.dispatch(groups_x_len, groups_y_len, groups_z_len);
}

View File

@ -6,6 +6,7 @@
*/
#include "vk_buffer.hh"
#include "vk_context.hh"
namespace blender::gpu {

View File

@ -10,9 +10,9 @@
#include "gpu_context_private.hh"
#include "vk_common.hh"
#include "vk_context.hh"
namespace blender::gpu {
class VKContext;
/**
* Class for handing vulkan buffers (allocation/updating/binding).

View File

@ -32,6 +32,7 @@ void VKCommandBuffer::init(const VkDevice vk_device,
vk_device_ = vk_device;
vk_queue_ = vk_queue;
vk_command_buffer_ = vk_command_buffer;
submission_id_.reset();
if (vk_fence_ == VK_NULL_HANDLE) {
VK_ALLOCATION_CALLBACKS;
@ -102,6 +103,30 @@ void VKCommandBuffer::copy(VKBuffer &dst_buffer,
regions.size(),
regions.data());
}
void VKCommandBuffer::copy(VKTexture &dst_texture,
VKBuffer &src_buffer,
Span<VkBufferImageCopy> regions)
{
vkCmdCopyBufferToImage(vk_command_buffer_,
src_buffer.vk_handle(),
dst_texture.vk_image_handle(),
VK_IMAGE_LAYOUT_GENERAL,
regions.size(),
regions.data());
}
void VKCommandBuffer::clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges)
{
vkCmdClearColorImage(vk_command_buffer_,
vk_image,
vk_image_layout,
&vk_clear_color,
ranges.size(),
ranges.data());
}
void VKCommandBuffer::pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages)
@ -160,6 +185,7 @@ void VKCommandBuffer::submit_encoded_commands()
submit_info.pCommandBuffers = &vk_command_buffer_;
vkQueueSubmit(vk_queue_, 1, &submit_info, vk_fence_);
submission_id_.next();
}
} // namespace blender::gpu

View File

@ -8,6 +8,7 @@
#pragma once
#include "vk_common.hh"
#include "vk_resource_tracker.hh"
#include "BLI_utility_mixins.hh"
@ -27,6 +28,7 @@ class VKCommandBuffer : NonCopyable, NonMovable {
/** Owning handles */
VkFence vk_fence_ = VK_NULL_HANDLE;
VKSubmissionID submission_id_;
public:
virtual ~VKCommandBuffer();
@ -48,9 +50,17 @@ class VKCommandBuffer : NonCopyable, NonMovable {
void dispatch(int groups_x_len, int groups_y_len, int groups_z_len);
/** Copy the contents of a texture MIP level to the dst buffer. */
void copy(VKBuffer &dst_buffer, VKTexture &src_texture, Span<VkBufferImageCopy> regions);
void copy(VKTexture &dst_texture, VKBuffer &src_buffer, Span<VkBufferImageCopy> regions);
void pipeline_barrier(VkPipelineStageFlags source_stages,
VkPipelineStageFlags destination_stages);
void pipeline_barrier(Span<VkImageMemoryBarrier> image_memory_barriers);
/**
* Clear color image resource.
*/
void clear(VkImage vk_image,
VkImageLayout vk_image_layout,
const VkClearColorValue &vk_clear_color,
Span<VkImageSubresourceRange> ranges);
void fill(VKBuffer &buffer, uint32_t data);
/**
@ -59,6 +69,11 @@ class VKCommandBuffer : NonCopyable, NonMovable {
*/
void submit();
const VKSubmissionID &submission_id_get() const
{
return submission_id_;
}
private:
void encode_recorded_commands();
void submit_encoded_commands();

View File

@ -0,0 +1,780 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_data_conversion.hh"
#include "Imath/half.h"
namespace blender::gpu {
/* -------------------------------------------------------------------- */
/** \name Conversion types
* \{ */
enum class ConversionType {
/** No conversion needed, result can be directly read back to host memory. */
PASS_THROUGH,
FLOAT_TO_UNORM8,
UNORM8_TO_FLOAT,
FLOAT_TO_SNORM8,
SNORM8_TO_FLOAT,
FLOAT_TO_UNORM16,
UNORM16_TO_FLOAT,
FLOAT_TO_SNORM16,
SNORM16_TO_FLOAT,
UI32_TO_UI16,
UI16_TO_UI32,
UI32_TO_UI8,
UI8_TO_UI32,
I32_TO_I16,
I16_TO_I32,
I32_TO_I8,
I8_TO_I32,
/** Convert device 16F to floats. */
HALF_TO_FLOAT,
FLOAT_TO_HALF,
/**
* The requested conversion isn't supported.
*/
UNSUPPORTED,
};
static ConversionType type_of_conversion_float(eGPUTextureFormat device_format)
{
switch (device_format) {
case GPU_RGBA32F:
case GPU_RG32F:
case GPU_R32F:
case GPU_DEPTH_COMPONENT32F:
return ConversionType::PASS_THROUGH;
case GPU_RGBA16F:
case GPU_RG16F:
case GPU_R16F:
case GPU_RGB16F:
return ConversionType::FLOAT_TO_HALF;
case GPU_RGBA8:
case GPU_RG8:
case GPU_R8:
return ConversionType::FLOAT_TO_UNORM8;
case GPU_RGBA8_SNORM:
case GPU_RGB8_SNORM:
case GPU_RG8_SNORM:
case GPU_R8_SNORM:
return ConversionType::FLOAT_TO_SNORM8;
case GPU_RGBA16:
case GPU_RG16:
case GPU_R16:
return ConversionType::FLOAT_TO_UNORM16;
case GPU_RGBA16_SNORM:
case GPU_RGB16_SNORM:
case GPU_RG16_SNORM:
case GPU_R16_SNORM:
return ConversionType::FLOAT_TO_SNORM16;
case GPU_RGB32F: /* GPU_RGB32F Not supported by vendors. */
case GPU_RGBA8UI:
case GPU_RGBA8I:
case GPU_RGBA16UI:
case GPU_RGBA16I:
case GPU_RGBA32UI:
case GPU_RGBA32I:
case GPU_RG8UI:
case GPU_RG8I:
case GPU_RG16UI:
case GPU_RG16I:
case GPU_RG32UI:
case GPU_RG32I:
case GPU_R8UI:
case GPU_R8I:
case GPU_R16UI:
case GPU_R16I:
case GPU_R32UI:
case GPU_R32I:
case GPU_RGB10_A2:
case GPU_RGB10_A2UI:
case GPU_R11F_G11F_B10F:
case GPU_DEPTH32F_STENCIL8:
case GPU_DEPTH24_STENCIL8:
case GPU_SRGB8_A8:
case GPU_RGB8UI:
case GPU_RGB8I:
case GPU_RGB8:
case GPU_RGB16UI:
case GPU_RGB16I:
case GPU_RGB16:
case GPU_RGB32UI:
case GPU_RGB32I:
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
case GPU_SRGB8:
case GPU_RGB9_E5:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
return ConversionType::UNSUPPORTED;
}
return ConversionType::UNSUPPORTED;
}
static ConversionType type_of_conversion_int(eGPUTextureFormat device_format)
{
switch (device_format) {
case GPU_RGBA32I:
case GPU_RG32I:
case GPU_R32I:
return ConversionType::PASS_THROUGH;
case GPU_RGBA16I:
case GPU_RG16I:
case GPU_R16I:
return ConversionType::I32_TO_I16;
case GPU_RGBA8I:
case GPU_RG8I:
case GPU_R8I:
return ConversionType::I32_TO_I8;
case GPU_RGBA8UI:
case GPU_RGBA8:
case GPU_RGBA16UI:
case GPU_RGBA16F:
case GPU_RGBA16:
case GPU_RGBA32UI:
case GPU_RGBA32F:
case GPU_RG8UI:
case GPU_RG8:
case GPU_RG16UI:
case GPU_RG16F:
case GPU_RG32UI:
case GPU_RG32F:
case GPU_RG16:
case GPU_R8UI:
case GPU_R8:
case GPU_R16UI:
case GPU_R16F:
case GPU_R16:
case GPU_R32UI:
case GPU_R32F:
case GPU_RGB10_A2:
case GPU_RGB10_A2UI:
case GPU_R11F_G11F_B10F:
case GPU_DEPTH32F_STENCIL8:
case GPU_DEPTH24_STENCIL8:
case GPU_SRGB8_A8:
case GPU_RGBA8_SNORM:
case GPU_RGBA16_SNORM:
case GPU_RGB8UI:
case GPU_RGB8I:
case GPU_RGB8:
case GPU_RGB8_SNORM:
case GPU_RGB16UI:
case GPU_RGB16I:
case GPU_RGB16F:
case GPU_RGB16:
case GPU_RGB16_SNORM:
case GPU_RGB32UI:
case GPU_RGB32I:
case GPU_RGB32F:
case GPU_RG8_SNORM:
case GPU_RG16_SNORM:
case GPU_R8_SNORM:
case GPU_R16_SNORM:
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
case GPU_SRGB8:
case GPU_RGB9_E5:
case GPU_DEPTH_COMPONENT32F:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
return ConversionType::UNSUPPORTED;
}
return ConversionType::UNSUPPORTED;
}
static ConversionType type_of_conversion_uint(eGPUTextureFormat device_format)
{
switch (device_format) {
case GPU_RGBA32UI:
case GPU_RG32UI:
case GPU_R32UI:
return ConversionType::PASS_THROUGH;
case GPU_RGBA16UI:
case GPU_RG16UI:
case GPU_R16UI:
case GPU_RGB16UI:
return ConversionType::UI32_TO_UI16;
case GPU_RGBA8UI:
case GPU_RG8UI:
case GPU_R8UI:
return ConversionType::UI32_TO_UI8;
case GPU_RGBA8I:
case GPU_RGBA8:
case GPU_RGBA16I:
case GPU_RGBA16F:
case GPU_RGBA16:
case GPU_RGBA32I:
case GPU_RGBA32F:
case GPU_RG8I:
case GPU_RG8:
case GPU_RG16I:
case GPU_RG16F:
case GPU_RG16:
case GPU_RG32I:
case GPU_RG32F:
case GPU_R8I:
case GPU_R8:
case GPU_R16I:
case GPU_R16F:
case GPU_R16:
case GPU_R32I:
case GPU_R32F:
case GPU_RGB10_A2:
case GPU_RGB10_A2UI:
case GPU_R11F_G11F_B10F:
case GPU_DEPTH32F_STENCIL8:
case GPU_DEPTH24_STENCIL8:
case GPU_SRGB8_A8:
case GPU_RGBA8_SNORM:
case GPU_RGBA16_SNORM:
case GPU_RGB8UI:
case GPU_RGB8I:
case GPU_RGB8:
case GPU_RGB8_SNORM:
case GPU_RGB16I:
case GPU_RGB16F:
case GPU_RGB16:
case GPU_RGB16_SNORM:
case GPU_RGB32UI:
case GPU_RGB32I:
case GPU_RGB32F:
case GPU_RG8_SNORM:
case GPU_RG16_SNORM:
case GPU_R8_SNORM:
case GPU_R16_SNORM:
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
case GPU_SRGB8:
case GPU_RGB9_E5:
case GPU_DEPTH_COMPONENT32F:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
return ConversionType::UNSUPPORTED;
}
return ConversionType::UNSUPPORTED;
}
static ConversionType type_of_conversion_half(eGPUTextureFormat device_format)
{
switch (device_format) {
case GPU_RGBA16F:
case GPU_RG16F:
case GPU_R16F:
return ConversionType::PASS_THROUGH;
case GPU_RGBA8UI:
case GPU_RGBA8I:
case GPU_RGBA8:
case GPU_RGBA16UI:
case GPU_RGBA16I:
case GPU_RGBA16:
case GPU_RGBA32UI:
case GPU_RGBA32I:
case GPU_RGBA32F:
case GPU_RG8UI:
case GPU_RG8I:
case GPU_RG8:
case GPU_RG16UI:
case GPU_RG16I:
case GPU_RG16:
case GPU_RG32UI:
case GPU_RG32I:
case GPU_RG32F:
case GPU_R8UI:
case GPU_R8I:
case GPU_R8:
case GPU_R16UI:
case GPU_R16I:
case GPU_R16:
case GPU_R32UI:
case GPU_R32I:
case GPU_R32F:
case GPU_RGB10_A2:
case GPU_RGB10_A2UI:
case GPU_R11F_G11F_B10F:
case GPU_DEPTH32F_STENCIL8:
case GPU_DEPTH24_STENCIL8:
case GPU_SRGB8_A8:
case GPU_RGBA8_SNORM:
case GPU_RGBA16_SNORM:
case GPU_RGB8UI:
case GPU_RGB8I:
case GPU_RGB8:
case GPU_RGB8_SNORM:
case GPU_RGB16UI:
case GPU_RGB16I:
case GPU_RGB16F:
case GPU_RGB16:
case GPU_RGB16_SNORM:
case GPU_RGB32UI:
case GPU_RGB32I:
case GPU_RGB32F:
case GPU_RG8_SNORM:
case GPU_RG16_SNORM:
case GPU_R8_SNORM:
case GPU_R16_SNORM:
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
case GPU_SRGB8:
case GPU_RGB9_E5:
case GPU_DEPTH_COMPONENT32F:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
return ConversionType::UNSUPPORTED;
}
return ConversionType::UNSUPPORTED;
}
static ConversionType type_of_conversion_ubyte(eGPUTextureFormat device_format)
{
switch (device_format) {
case GPU_RGBA8UI:
case GPU_RGBA8:
case GPU_RG8UI:
case GPU_RG8:
case GPU_R8UI:
case GPU_R8:
return ConversionType::PASS_THROUGH;
case GPU_RGBA8I:
case GPU_RGBA16UI:
case GPU_RGBA16I:
case GPU_RGBA16F:
case GPU_RGBA16:
case GPU_RGBA32UI:
case GPU_RGBA32I:
case GPU_RGBA32F:
case GPU_RG8I:
case GPU_RG16UI:
case GPU_RG16I:
case GPU_RG16F:
case GPU_RG16:
case GPU_RG32UI:
case GPU_RG32I:
case GPU_RG32F:
case GPU_R8I:
case GPU_R16UI:
case GPU_R16I:
case GPU_R16F:
case GPU_R16:
case GPU_R32UI:
case GPU_R32I:
case GPU_R32F:
case GPU_RGB10_A2:
case GPU_RGB10_A2UI:
case GPU_R11F_G11F_B10F:
case GPU_DEPTH32F_STENCIL8:
case GPU_DEPTH24_STENCIL8:
case GPU_SRGB8_A8:
case GPU_RGBA8_SNORM:
case GPU_RGBA16_SNORM:
case GPU_RGB8UI:
case GPU_RGB8I:
case GPU_RGB8:
case GPU_RGB8_SNORM:
case GPU_RGB16UI:
case GPU_RGB16I:
case GPU_RGB16F:
case GPU_RGB16:
case GPU_RGB16_SNORM:
case GPU_RGB32UI:
case GPU_RGB32I:
case GPU_RGB32F:
case GPU_RG8_SNORM:
case GPU_RG16_SNORM:
case GPU_R8_SNORM:
case GPU_R16_SNORM:
case GPU_SRGB8_A8_DXT1:
case GPU_SRGB8_A8_DXT3:
case GPU_SRGB8_A8_DXT5:
case GPU_RGBA8_DXT1:
case GPU_RGBA8_DXT3:
case GPU_RGBA8_DXT5:
case GPU_SRGB8:
case GPU_RGB9_E5:
case GPU_DEPTH_COMPONENT32F:
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
return ConversionType::UNSUPPORTED;
}
return ConversionType::UNSUPPORTED;
}
static ConversionType type_of_conversion_r11g11b10(eGPUTextureFormat device_format)
{
if (device_format == GPU_R11F_G11F_B10F) {
return ConversionType::PASS_THROUGH;
}
return ConversionType::UNSUPPORTED;
}
static ConversionType type_of_conversion_r10g10b10a2(eGPUTextureFormat device_format)
{
if (ELEM(device_format, GPU_RGB10_A2, GPU_RGB10_A2UI)) {
return ConversionType::PASS_THROUGH;
}
return ConversionType::UNSUPPORTED;
}
static ConversionType host_to_device(eGPUDataFormat host_format, eGPUTextureFormat device_format)
{
BLI_assert(validate_data_format(device_format, host_format));
switch (host_format) {
case GPU_DATA_FLOAT:
return type_of_conversion_float(device_format);
case GPU_DATA_UINT:
return type_of_conversion_uint(device_format);
case GPU_DATA_INT:
return type_of_conversion_int(device_format);
case GPU_DATA_HALF_FLOAT:
return type_of_conversion_half(device_format);
case GPU_DATA_UBYTE:
return type_of_conversion_ubyte(device_format);
case GPU_DATA_10_11_11_REV:
return type_of_conversion_r11g11b10(device_format);
case GPU_DATA_2_10_10_10_REV:
return type_of_conversion_r10g10b10a2(device_format);
case GPU_DATA_UINT_24_8:
return ConversionType::UNSUPPORTED;
}
return ConversionType::UNSUPPORTED;
}
static ConversionType reversed(ConversionType type)
{
#define CASE_SINGLE(a, b) \
case ConversionType::a##_TO_##b: \
return ConversionType::b##_TO_##a;
#define CASE_PAIR(a, b) \
CASE_SINGLE(a, b) \
CASE_SINGLE(b, a)
switch (type) {
case ConversionType::PASS_THROUGH:
return ConversionType::PASS_THROUGH;
CASE_PAIR(FLOAT, UNORM8)
CASE_PAIR(FLOAT, SNORM8)
CASE_PAIR(FLOAT, UNORM16)
CASE_PAIR(FLOAT, SNORM16)
CASE_PAIR(UI32, UI16)
CASE_PAIR(I32, I16)
CASE_PAIR(UI32, UI8)
CASE_PAIR(I32, I8)
CASE_PAIR(FLOAT, HALF)
case ConversionType::UNSUPPORTED:
return ConversionType::UNSUPPORTED;
}
#undef CASE_PAIR
#undef CASE_SINGLE
return ConversionType::UNSUPPORTED;
}
/* \} */
/* -------------------------------------------------------------------- */
/** \name Data Conversion
* \{ */
template<typename InnerType> struct SignedNormalized {
static_assert(std::is_same<InnerType, uint8_t>() || std::is_same<InnerType, uint16_t>());
InnerType value;
static constexpr int32_t scalar()
{
return (1 << (sizeof(InnerType) * 8 - 1));
}
static constexpr int32_t delta()
{
return (1 << (sizeof(InnerType) * 8 - 1)) - 1;
}
static constexpr int32_t max()
{
return ((1 << (sizeof(InnerType) * 8)) - 1);
}
};
template<typename InnerType> struct UnsignedNormalized {
static_assert(std::is_same<InnerType, uint8_t>() || std::is_same<InnerType, uint16_t>());
InnerType value;
static constexpr int32_t scalar()
{
return (1 << (sizeof(InnerType) * 8)) - 1;
}
static constexpr int32_t max()
{
return ((1 << (sizeof(InnerType) * 8)) - 1);
}
};
template<typename InnerType> struct ComponentValue {
InnerType value;
};
using UI8 = ComponentValue<uint8_t>;
using UI16 = ComponentValue<uint16_t>;
using UI32 = ComponentValue<uint32_t>;
using I8 = ComponentValue<int8_t>;
using I16 = ComponentValue<int16_t>;
using I32 = ComponentValue<int32_t>;
using F32 = ComponentValue<float>;
using F16 = ComponentValue<uint16_t>;
template<typename StorageType>
void convert_component(SignedNormalized<StorageType> &dst, const F32 &src)
{
static constexpr int32_t scalar = SignedNormalized<StorageType>::scalar();
static constexpr int32_t delta = SignedNormalized<StorageType>::delta();
static constexpr int32_t max = SignedNormalized<StorageType>::max();
dst.value = (clamp_i((src.value * scalar + delta), 0, max));
}
template<typename StorageType>
void convert_component(F32 &dst, const SignedNormalized<StorageType> &src)
{
static constexpr int32_t scalar = SignedNormalized<StorageType>::scalar();
static constexpr int32_t delta = SignedNormalized<StorageType>::delta();
dst.value = float(int32_t(src.value) - delta) / scalar;
}
template<typename StorageType>
void convert_component(UnsignedNormalized<StorageType> &dst, const F32 &src)
{
static constexpr int32_t scalar = UnsignedNormalized<StorageType>::scalar();
static constexpr int32_t max = scalar;
dst.value = (clamp_i((src.value * scalar), 0, max));
}
template<typename StorageType>
void convert_component(F32 &dst, const UnsignedNormalized<StorageType> &src)
{
static constexpr int32_t scalar = UnsignedNormalized<StorageType>::scalar();
dst.value = float(src.value) / scalar;
}
/* Copy the contents of src to dst with out performing any actual conversion.*/
template<typename DestinationType, typename SourceType>
void convert_component(DestinationType &dst, const SourceType &src)
{
static_assert(std::is_same<DestinationType, UI8>() || std::is_same<DestinationType, UI16>() ||
std::is_same<DestinationType, UI32>() || std::is_same<DestinationType, I8>() ||
std::is_same<DestinationType, I16>() || std::is_same<DestinationType, I32>());
static_assert(std::is_same<SourceType, UI8>() || std::is_same<SourceType, UI16>() ||
std::is_same<SourceType, UI32>() || std::is_same<SourceType, I8>() ||
std::is_same<SourceType, I16>() || std::is_same<SourceType, I32>());
static_assert(!std::is_same<DestinationType, SourceType>());
dst.value = src.value;
}
static void convert_component(F16 &dst, const F32 &src)
{
dst.value = imath_float_to_half(src.value);
}
static void convert_component(F32 &dst, const F16 &src)
{
dst.value = imath_half_to_float(src.value);
}
/* \} */
template<typename DestinationType, typename SourceType>
void convert_per_component(MutableSpan<DestinationType> dst, Span<SourceType> src)
{
BLI_assert(src.size() == dst.size());
for (int64_t index : IndexRange(src.size())) {
convert_component(dst[index], src[index]);
}
}
template<typename DestinationType, typename SourceType>
void convert_per_component(void *dst_memory,
const void *src_memory,
size_t buffer_size,
eGPUTextureFormat device_format)
{
size_t total_components = to_component_len(device_format) * buffer_size;
Span<SourceType> src = Span<SourceType>(static_cast<const SourceType *>(src_memory),
total_components);
MutableSpan<DestinationType> dst = MutableSpan<DestinationType>(
static_cast<DestinationType *>(dst_memory), total_components);
convert_per_component<DestinationType, SourceType>(dst, src);
}
static void convert_buffer(void *dst_memory,
const void *src_memory,
size_t buffer_size,
eGPUTextureFormat device_format,
ConversionType type)
{
switch (type) {
case ConversionType::UNSUPPORTED:
return;
case ConversionType::PASS_THROUGH:
memcpy(dst_memory, src_memory, buffer_size * to_bytesize(device_format));
return;
case ConversionType::UI32_TO_UI16:
convert_per_component<UI16, UI32>(dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::UI16_TO_UI32:
convert_per_component<UI32, UI16>(dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::UI32_TO_UI8:
convert_per_component<UI8, UI32>(dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::UI8_TO_UI32:
convert_per_component<UI32, UI8>(dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::I32_TO_I16:
convert_per_component<I16, I32>(dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::I16_TO_I32:
convert_per_component<I32, I16>(dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::I32_TO_I8:
convert_per_component<I8, I32>(dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::I8_TO_I32:
convert_per_component<I32, I8>(dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::FLOAT_TO_SNORM8:
convert_per_component<SignedNormalized<uint8_t>, F32>(
dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::SNORM8_TO_FLOAT:
convert_per_component<F32, SignedNormalized<uint8_t>>(
dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::FLOAT_TO_SNORM16:
convert_per_component<SignedNormalized<uint16_t>, F32>(
dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::SNORM16_TO_FLOAT:
convert_per_component<F32, SignedNormalized<uint16_t>>(
dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::FLOAT_TO_UNORM8:
convert_per_component<UnsignedNormalized<uint8_t>, F32>(
dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::UNORM8_TO_FLOAT:
convert_per_component<F32, UnsignedNormalized<uint8_t>>(
dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::FLOAT_TO_UNORM16:
convert_per_component<UnsignedNormalized<uint16_t>, F32>(
dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::UNORM16_TO_FLOAT:
convert_per_component<F32, UnsignedNormalized<uint16_t>>(
dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::FLOAT_TO_HALF:
convert_per_component<F16, F32>(dst_memory, src_memory, buffer_size, device_format);
break;
case ConversionType::HALF_TO_FLOAT:
convert_per_component<F32, F16>(dst_memory, src_memory, buffer_size, device_format);
break;
}
}
/* -------------------------------------------------------------------- */
/** \name API
* \{ */
void convert_host_to_device(void *dst_buffer,
const void *src_buffer,
size_t buffer_size,
eGPUDataFormat host_format,
eGPUTextureFormat device_format)
{
ConversionType conversion_type = host_to_device(host_format, device_format);
BLI_assert(conversion_type != ConversionType::UNSUPPORTED);
convert_buffer(dst_buffer, src_buffer, buffer_size, device_format, conversion_type);
}
void convert_device_to_host(void *dst_buffer,
const void *src_buffer,
size_t buffer_size,
eGPUDataFormat host_format,
eGPUTextureFormat device_format)
{
ConversionType conversion_type = reversed(host_to_device(host_format, device_format));
BLI_assert(conversion_type != ConversionType::UNSUPPORTED);
convert_buffer(dst_buffer, src_buffer, buffer_size, device_format, conversion_type);
}
/* \} */
} // namespace blender::gpu

View File

@ -0,0 +1,53 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "gpu_texture_private.hh"
namespace blender::gpu {
/**
* Convert host buffer to device buffer.
*
* \param dst_buffer: device buffer.
* \param src_buffer: host buffer.
* \param buffer_size: number of pixels to convert from the start of the given buffer.
* \param host_format: format of the host buffer
* \param device_format: format of the device buffer.
*
* \note Will assert when the host_format/device_format combination isn't valid
* (#validate_data_format) or supported. Some combinations aren't supported in Vulkan due to
* platform incompatibility.
*/
void convert_host_to_device(void *dst_buffer,
const void *src_buffer,
size_t buffer_size,
eGPUDataFormat host_format,
eGPUTextureFormat device_format);
/**
* Convert device buffer to host buffer.
*
* \param dst_buffer: host buffer
* \param src_buffer: device buffer.
* \param buffer_size: number of pixels to convert from the start of the given buffer.
* \param host_format: format of the host buffer
* \param device_format: format of the device buffer.
*
* \note Will assert when the host_format/device_format combination isn't valid
* (#validate_data_format) or supported. Some combinations aren't supported in Vulkan due to
* platform incompatibility.
*/
void convert_device_to_host(void *dst_buffer,
const void *src_buffer,
size_t buffer_size,
eGPUDataFormat host_format,
eGPUTextureFormat device_format);
}; // namespace blender::gpu

View File

@ -80,7 +80,8 @@ bool VKDescriptorPools::is_last_pool_active()
return active_pool_index_ == pools_.size() - 1;
}
VKDescriptorSet VKDescriptorPools::allocate(const VkDescriptorSetLayout &descriptor_set_layout)
std::unique_ptr<VKDescriptorSet> VKDescriptorPools::allocate(
const VkDescriptorSetLayout &descriptor_set_layout)
{
VkDescriptorSetAllocateInfo allocate_info = {};
VkDescriptorPool pool = active_pool_get();
@ -102,7 +103,7 @@ VKDescriptorSet VKDescriptorPools::allocate(const VkDescriptorSetLayout &descrip
return allocate(descriptor_set_layout);
}
return VKDescriptorSet(pool, vk_descriptor_set);
return std::make_unique<VKDescriptorSet>(pool, vk_descriptor_set);
}
void VKDescriptorPools::free(VKDescriptorSet &descriptor_set)

View File

@ -47,7 +47,7 @@ class VKDescriptorPools {
void init(const VkDevice vk_device);
VKDescriptorSet allocate(const VkDescriptorSetLayout &descriptor_set_layout);
std::unique_ptr<VKDescriptorSet> allocate(const VkDescriptorSetLayout &descriptor_set_layout);
void free(VKDescriptorSet &descriptor_set);
/**

View File

@ -7,6 +7,7 @@
#include "vk_descriptor_set.hh"
#include "vk_index_buffer.hh"
#include "vk_shader.hh"
#include "vk_storage_buffer.hh"
#include "vk_texture.hh"
#include "vk_uniform_buffer.hh"
@ -17,9 +18,7 @@
namespace blender::gpu {
VKDescriptorSet::VKDescriptorSet(VKDescriptorSet &&other)
: vk_descriptor_pool_(other.vk_descriptor_pool_),
vk_descriptor_set_(other.vk_descriptor_set_),
bindings_(std::move(other.bindings_))
: vk_descriptor_pool_(other.vk_descriptor_pool_), vk_descriptor_set_(other.vk_descriptor_set_)
{
other.mark_freed();
}
@ -40,7 +39,8 @@ void VKDescriptorSet::mark_freed()
vk_descriptor_pool_ = VK_NULL_HANDLE;
}
void VKDescriptorSet::bind(VKStorageBuffer &buffer, const Location location)
void VKDescriptorSetTracker::bind(VKStorageBuffer &buffer,
const VKDescriptorSet::Location location)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
@ -48,7 +48,8 @@ void VKDescriptorSet::bind(VKStorageBuffer &buffer, const Location location)
binding.buffer_size = buffer.size_in_bytes();
}
void VKDescriptorSet::bind_as_ssbo(VKVertexBuffer &buffer, const Location location)
void VKDescriptorSetTracker::bind_as_ssbo(VKVertexBuffer &buffer,
const VKDescriptorSet::Location location)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
@ -56,7 +57,8 @@ void VKDescriptorSet::bind_as_ssbo(VKVertexBuffer &buffer, const Location locati
binding.buffer_size = buffer.size_used_get();
}
void VKDescriptorSet::bind(VKUniformBuffer &buffer, const Location location)
void VKDescriptorSetTracker::bind(VKUniformBuffer &buffer,
const VKDescriptorSet::Location location)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
@ -64,7 +66,8 @@ void VKDescriptorSet::bind(VKUniformBuffer &buffer, const Location location)
binding.buffer_size = buffer.size_in_bytes();
}
void VKDescriptorSet::bind_as_ssbo(VKIndexBuffer &buffer, const Location location)
void VKDescriptorSetTracker::bind_as_ssbo(VKIndexBuffer &buffer,
const VKDescriptorSet::Location location)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER;
@ -72,14 +75,16 @@ void VKDescriptorSet::bind_as_ssbo(VKIndexBuffer &buffer, const Location locatio
binding.buffer_size = buffer.size_get();
}
void VKDescriptorSet::image_bind(VKTexture &texture, const Location location)
void VKDescriptorSetTracker::image_bind(VKTexture &texture,
const VKDescriptorSet::Location location)
{
Binding &binding = ensure_location(location);
binding.type = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
binding.vk_image_view = texture.vk_image_view_handle();
}
VKDescriptorSet::Binding &VKDescriptorSet::ensure_location(const Location location)
VKDescriptorSetTracker::Binding &VKDescriptorSetTracker::ensure_location(
const VKDescriptorSet::Location location)
{
for (Binding &binding : bindings_) {
if (binding.location == location) {
@ -93,8 +98,12 @@ VKDescriptorSet::Binding &VKDescriptorSet::ensure_location(const Location locati
return bindings_.last();
}
void VKDescriptorSet::update(VkDevice vk_device)
void VKDescriptorSetTracker::update(VKContext &context)
{
tracked_resource_for(context, !bindings_.is_empty());
std::unique_ptr<VKDescriptorSet> &descriptor_set = active_descriptor_set();
VkDescriptorSet vk_descriptor_set = descriptor_set->vk_handle();
Vector<VkDescriptorBufferInfo> buffer_infos;
Vector<VkWriteDescriptorSet> descriptor_writes;
@ -109,7 +118,7 @@ void VKDescriptorSet::update(VkDevice vk_device)
VkWriteDescriptorSet write_descriptor = {};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = vk_descriptor_set_;
write_descriptor.dstSet = vk_descriptor_set;
write_descriptor.dstBinding = binding.location;
write_descriptor.descriptorCount = 1;
write_descriptor.descriptorType = binding.type;
@ -129,7 +138,7 @@ void VKDescriptorSet::update(VkDevice vk_device)
VkWriteDescriptorSet write_descriptor = {};
write_descriptor.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
write_descriptor.dstSet = vk_descriptor_set_;
write_descriptor.dstSet = vk_descriptor_set;
write_descriptor.dstBinding = binding.location;
write_descriptor.descriptorCount = 1;
write_descriptor.descriptorType = binding.type;
@ -141,10 +150,16 @@ void VKDescriptorSet::update(VkDevice vk_device)
"Not all changes have been converted to a write descriptor. Check "
"`Binding::is_buffer` and `Binding::is_image`.");
VkDevice vk_device = context.device_get();
vkUpdateDescriptorSets(
vk_device, descriptor_writes.size(), descriptor_writes.data(), 0, nullptr);
bindings_.clear();
}
std::unique_ptr<VKDescriptorSet> VKDescriptorSetTracker::create_resource(VKContext &context)
{
return context.descriptor_pools_get().allocate(layout_);
}
} // namespace blender::gpu

View File

@ -12,7 +12,10 @@
#include "gpu_shader_private.hh"
#include "vk_buffer.hh"
#include "vk_common.hh"
#include "vk_resource_tracker.hh"
#include "vk_uniform_buffer.hh"
namespace blender::gpu {
class VKIndexBuffer;
@ -21,6 +24,7 @@ class VKStorageBuffer;
class VKTexture;
class VKUniformBuffer;
class VKVertexBuffer;
class VKDescriptorSetTracker;
/**
* In vulkan shader resources (images and buffers) are grouped in descriptor sets.
@ -31,7 +35,6 @@ class VKVertexBuffer;
* to use 2 descriptor sets per shader. One for each #blender::gpu::shader::Frequency.
*/
class VKDescriptorSet : NonCopyable {
struct Binding;
public:
/**
@ -69,42 +72,13 @@ class VKDescriptorSet : NonCopyable {
return binding;
}
friend struct Binding;
friend struct VKDescriptorSetTracker;
friend class VKShaderInterface;
};
private:
struct Binding {
Location location;
VkDescriptorType type;
VkBuffer vk_buffer = VK_NULL_HANDLE;
VkDeviceSize buffer_size = 0;
VkImageView vk_image_view = VK_NULL_HANDLE;
Binding()
{
location.binding = 0;
}
bool is_buffer() const
{
return ELEM(type, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
}
bool is_image() const
{
return ELEM(type, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
}
};
VkDescriptorPool vk_descriptor_pool_ = VK_NULL_HANDLE;
VkDescriptorSet vk_descriptor_set_ = VK_NULL_HANDLE;
/** A list of bindings that needs to be updated. */
Vector<Binding> bindings_;
public:
VKDescriptorSet() = default;
VKDescriptorSet(VkDescriptorPool vk_descriptor_pool, VkDescriptorSet vk_descriptor_set)
@ -131,22 +105,73 @@ class VKDescriptorSet : NonCopyable {
{
return vk_descriptor_pool_;
}
void mark_freed();
};
void bind_as_ssbo(VKVertexBuffer &buffer, Location location);
void bind_as_ssbo(VKIndexBuffer &buffer, Location location);
void bind(VKStorageBuffer &buffer, Location location);
void bind(VKUniformBuffer &buffer, Location location);
void image_bind(VKTexture &texture, Location location);
class VKDescriptorSetTracker : protected VKResourceTracker<VKDescriptorSet> {
friend class VKDescriptorSet;
public:
struct Binding {
VKDescriptorSet::Location location;
VkDescriptorType type;
VkBuffer vk_buffer = VK_NULL_HANDLE;
VkDeviceSize buffer_size = 0;
VkImageView vk_image_view = VK_NULL_HANDLE;
Binding()
{
location.binding = 0;
}
bool is_buffer() const
{
return ELEM(type, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
}
bool is_image() const
{
return ELEM(type, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE);
}
};
private:
/** A list of bindings that needs to be updated.*/
Vector<Binding> bindings_;
VkDescriptorSetLayout layout_;
public:
VKDescriptorSetTracker()
{
}
VKDescriptorSetTracker(VkDescriptorSetLayout layout) : layout_(layout)
{
}
void bind_as_ssbo(VKVertexBuffer &buffer, VKDescriptorSet::Location location);
void bind_as_ssbo(VKIndexBuffer &buffer, VKDescriptorSet::Location location);
void bind(VKStorageBuffer &buffer, VKDescriptorSet::Location location);
void bind(VKUniformBuffer &buffer, VKDescriptorSet::Location location);
void image_bind(VKTexture &texture, VKDescriptorSet::Location location);
/**
* Update the descriptor set on the device.
*/
void update(VkDevice vk_device);
void update(VKContext &context);
void mark_freed();
std::unique_ptr<VKDescriptorSet> &active_descriptor_set()
{
return active_resource();
}
protected:
std::unique_ptr<VKDescriptorSet> create_resource(VKContext &context) override;
private:
Binding &ensure_location(Location location);
Binding &ensure_location(VKDescriptorSet::Location location);
};
} // namespace blender::gpu

View File

@ -12,10 +12,10 @@
namespace blender::gpu {
VKPipeline::VKPipeline(VkPipeline vk_pipeline,
VKDescriptorSet &&descriptor_set,
VkDescriptorSetLayout vk_descriptor_set_layout,
VKPushConstants &&push_constants)
: vk_pipeline_(vk_pipeline),
descriptor_set_(std::move(descriptor_set)),
descriptor_set_(vk_descriptor_set_layout),
push_constants_(std::move(push_constants))
{
}
@ -56,9 +56,8 @@ VKPipeline VKPipeline::create_compute_pipeline(
return VKPipeline();
}
VKDescriptorSet descriptor_set = context.descriptor_pools_get().allocate(descriptor_set_layout);
VKPushConstants push_constants(&push_constants_layout);
return VKPipeline(vk_pipeline, std::move(descriptor_set), std::move(push_constants));
return VKPipeline(vk_pipeline, descriptor_set_layout, std::move(push_constants));
}
VkPipeline VKPipeline::vk_handle() const

View File

@ -21,7 +21,7 @@ class VKContext;
class VKPipeline : NonCopyable {
VkPipeline vk_pipeline_ = VK_NULL_HANDLE;
VKDescriptorSet descriptor_set_;
VKDescriptorSetTracker descriptor_set_;
VKPushConstants push_constants_;
public:
@ -29,7 +29,7 @@ class VKPipeline : NonCopyable {
virtual ~VKPipeline();
VKPipeline(VkPipeline vk_pipeline,
VKDescriptorSet &&vk_descriptor_set,
VkDescriptorSetLayout vk_descriptor_set_layout,
VKPushConstants &&push_constants);
VKPipeline &operator=(VKPipeline &&other)
{
@ -46,7 +46,7 @@ class VKPipeline : NonCopyable {
VkPipelineLayout &pipeline_layouts,
const VKPushConstants::Layout &push_constants_layout);
VKDescriptorSet &descriptor_set_get()
VKDescriptorSetTracker &descriptor_set_get()
{
return descriptor_set_;
}

View File

@ -7,6 +7,8 @@
#include "vk_pixel_buffer.hh"
#include "vk_context.hh"
namespace blender::gpu {
VKPixelBuffer::VKPixelBuffer(int64_t size) : PixelBuffer(size)

View File

@ -7,6 +7,7 @@
#include "vk_push_constants.hh"
#include "vk_backend.hh"
#include "vk_context.hh"
#include "vk_memory_layout.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
@ -103,24 +104,12 @@ VKPushConstants::VKPushConstants() = default;
VKPushConstants::VKPushConstants(const Layout *layout) : layout_(layout)
{
data_ = MEM_mallocN(layout->size_in_bytes(), __func__);
switch (layout_->storage_type_get()) {
case StorageType::UNIFORM_BUFFER:
uniform_buffer_ = new VKUniformBuffer(layout_->size_in_bytes(), __func__);
break;
case StorageType::PUSH_CONSTANTS:
case StorageType::NONE:
break;
}
}
VKPushConstants::VKPushConstants(VKPushConstants &&other) : layout_(other.layout_)
{
data_ = other.data_;
other.data_ = nullptr;
uniform_buffer_ = other.uniform_buffer_;
other.uniform_buffer_ = nullptr;
}
VKPushConstants::~VKPushConstants()
@ -129,9 +118,6 @@ VKPushConstants::~VKPushConstants()
MEM_freeN(data_);
data_ = nullptr;
}
delete uniform_buffer_;
uniform_buffer_ = nullptr;
}
VKPushConstants &VKPushConstants::operator=(VKPushConstants &&other)
@ -141,9 +127,6 @@ VKPushConstants &VKPushConstants::operator=(VKPushConstants &&other)
data_ = other.data_;
other.data_ = nullptr;
uniform_buffer_ = other.uniform_buffer_;
other.uniform_buffer_ = nullptr;
return *this;
}
@ -155,7 +138,7 @@ void VKPushConstants::update(VKContext &context)
BLI_assert_msg(&pipeline.push_constants_get() == this,
"Invalid state detected. Push constants doesn't belong to the active shader of "
"the given context.");
VKDescriptorSet &descriptor_set = pipeline.descriptor_set_get();
VKDescriptorSetTracker &descriptor_set = pipeline.descriptor_set_get();
switch (layout_get().storage_type_get()) {
case VKPushConstants::StorageType::NONE:
@ -167,7 +150,7 @@ void VKPushConstants::update(VKContext &context)
case VKPushConstants::StorageType::UNIFORM_BUFFER:
update_uniform_buffer();
descriptor_set.bind(uniform_buffer_get(), layout_get().descriptor_set_location_get());
descriptor_set.bind(*uniform_buffer_get(), layout_get().descriptor_set_location_get());
break;
}
}
@ -175,16 +158,22 @@ void VKPushConstants::update(VKContext &context)
void VKPushConstants::update_uniform_buffer()
{
BLI_assert(layout_->storage_type_get() == StorageType::UNIFORM_BUFFER);
BLI_assert(uniform_buffer_ != nullptr);
BLI_assert(data_ != nullptr);
uniform_buffer_->update(data_);
VKContext &context = *VKContext::get();
std::unique_ptr<VKUniformBuffer> &uniform_buffer = tracked_resource_for(context, is_dirty_);
uniform_buffer->update(data_);
is_dirty_ = false;
}
VKUniformBuffer &VKPushConstants::uniform_buffer_get()
std::unique_ptr<VKUniformBuffer> &VKPushConstants::uniform_buffer_get()
{
BLI_assert(layout_->storage_type_get() == StorageType::UNIFORM_BUFFER);
BLI_assert(uniform_buffer_ != nullptr);
return *uniform_buffer_;
return active_resource();
}
std::unique_ptr<VKUniformBuffer> VKPushConstants::create_resource(VKContext & /*context*/)
{
return std::make_unique<VKUniformBuffer>(layout_->size_in_bytes(), __func__);
}
} // namespace blender::gpu

View File

@ -43,7 +43,7 @@ class VKContext;
* It should also keep track of the submissions in order to reuse the allocated
* data.
*/
class VKPushConstants : NonCopyable {
class VKPushConstants : VKResourceTracker<VKUniformBuffer> {
public:
/** Different methods to store push constants. */
enum class StorageType {
@ -151,7 +151,7 @@ class VKPushConstants : NonCopyable {
private:
const Layout *layout_ = nullptr;
void *data_ = nullptr;
VKUniformBuffer *uniform_buffer_ = nullptr;
bool is_dirty_ = false;
public:
VKPushConstants();
@ -171,6 +171,11 @@ class VKPushConstants : NonCopyable {
return *layout_;
}
/**
* Part of Resource Tracking API is called when new resource is needed.
*/
std::unique_ptr<VKUniformBuffer> create_resource(VKContext &context) override;
/**
* Get the reference to the active data.
*
@ -209,6 +214,7 @@ class VKPushConstants : NonCopyable {
layout_->size_in_bytes(),
"Tried to write outside the push constant allocated memory.");
memcpy(dst, input_data, comp_len * array_size * sizeof(T));
is_dirty_ = true;
return;
}
@ -222,6 +228,7 @@ class VKPushConstants : NonCopyable {
src += comp_len;
dst += 4;
}
is_dirty_ = true;
}
/**
@ -243,7 +250,7 @@ class VKPushConstants : NonCopyable {
*
* Only valid when storage type = StorageType::UNIFORM_BUFFER.
*/
VKUniformBuffer &uniform_buffer_get();
std::unique_ptr<VKUniformBuffer> &uniform_buffer_get();
};
} // namespace blender::gpu

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#include "vk_resource_tracker.hh"
#include "vk_context.hh"
namespace blender::gpu {
bool VKSubmissionTracker::is_changed(VKContext &context)
{
VKCommandBuffer &command_buffer = context.command_buffer_get();
const VKSubmissionID &current_id = command_buffer.submission_id_get();
if (last_known_id_ != current_id) {
last_known_id_ = current_id;
return true;
}
return false;
}
} // namespace blender::gpu

View File

@ -0,0 +1,177 @@
/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2023 Blender Foundation. All rights reserved. */
/** \file
* \ingroup gpu
*/
#pragma once
#include "BLI_utility_mixins.hh"
#include "BLI_vector.hh"
#include "vk_common.hh"
namespace blender::gpu {
class VKContext;
class VKCommandBuffer;
/**
* In vulkan multiple commands can be in flight simultaneously.
*
* These commands can share the same resources like descriptor sets
* or push constants. When between commands these resources are updated
* a new version of these resources should be created.
*
* When a resource is updated it should check the submission id of the
* command buffer. If it is different, then the resource can be reused.
* If the submission id is the same a new version of the resource to now
* intervene with other commands that uses the resource.
*
* VKSubmissionID is the identifier to keep track if a new submission is
* being recorded.
*/
struct VKSubmissionID {
private:
int64_t id_ = -1;
public:
VKSubmissionID() = default;
private:
/**
* Reset the submission id.
*
* This should only be called during initialization of the command buffer.
* As it leads to undesired behavior after resources are already tracking
* the submission id.
*/
void reset()
{
id_ = 0;
}
/**
* Change the submission id.
*
* Is called when submitting a command buffer to the queue. In this case resource
* known that the next time it is used that it can free its sub resources used by
* the previous submission.
*/
void next()
{
id_++;
}
public:
const VKSubmissionID &operator=(const VKSubmissionID &other)
{
id_ = other.id_;
return *this;
}
bool operator==(const VKSubmissionID &other)
{
return id_ == other.id_;
}
bool operator!=(const VKSubmissionID &other)
{
return id_ != other.id_;
}
friend class VKCommandBuffer;
};
/**
* Submission tracker keeps track of the last known submission id of the
* command buffer.
*/
class VKSubmissionTracker {
VKSubmissionID last_known_id_;
public:
/**
* Check if the submission_id has changed since the last time it was called
* on this VKSubmissionTracker.
*/
bool is_changed(VKContext &context);
};
/**
* VKResourceTracker will keep track of resources.
*/
template<typename Resource> class VKResourceTracker : NonCopyable {
VKSubmissionTracker submission_tracker_;
Vector<std::unique_ptr<Resource>> tracked_resources_;
protected:
VKResourceTracker<Resource>() = default;
VKResourceTracker<Resource>(VKResourceTracker<Resource> &&other)
: submission_tracker_(other.submission_tracker_),
tracked_resources_(std::move(other.tracked_resources_))
{
}
VKResourceTracker<Resource> &operator=(VKResourceTracker<Resource> &&other)
{
submission_tracker_ = other.submission_tracker_;
tracked_resources_ = std::move(other.tracked_resources_);
return *this;
}
virtual ~VKResourceTracker()
{
free_tracked_resources();
}
/**
* Get a resource what can be used by the resource tracker.
*
* When a different submission was detected all previous resources
* will be freed and a new resource will be returned.
*
* When still in the same submission and we need to update the resource
* (is_dirty=true) then a new resource will be returned. Otherwise
* the previous used resource will be used.
*
* When no resources exists, a new resource will be created.
*
* The resource given back is owned by this resource tracker. And
* the resource should not be stored outside this class as it might
* be destroyed when the next submission is detected.
*/
std::unique_ptr<Resource> &tracked_resource_for(VKContext &context, const bool is_dirty)
{
if (submission_tracker_.is_changed(context)) {
free_tracked_resources();
tracked_resources_.append(create_resource(context));
}
else if (is_dirty || tracked_resources_.is_empty()) {
tracked_resources_.append(create_resource(context));
}
return active_resource();
}
/**
* Callback to create a new resource. Can be called by the `tracked_resource_for` method.
*/
virtual std::unique_ptr<Resource> create_resource(VKContext &context) = 0;
/**
* Return the active resource of the tracker.
*/
std::unique_ptr<Resource> &active_resource()
{
BLI_assert(!tracked_resources_.is_empty());
return tracked_resources_.last();
}
private:
void free_tracked_resources()
{
tracked_resources_.clear();
}
};
} // namespace blender::gpu

View File

@ -9,10 +9,13 @@
#include "vk_buffer.hh"
#include "vk_context.hh"
#include "vk_data_conversion.hh"
#include "vk_memory.hh"
#include "vk_shader.hh"
#include "vk_shader_interface.hh"
#include "BLI_math_vector.hh"
#include "BKE_global.h"
namespace blender::gpu {
@ -34,8 +37,64 @@ void VKTexture::copy_to(Texture * /*tex*/)
{
}
void VKTexture::clear(eGPUDataFormat /*format*/, const void * /*data*/)
template<typename T> void copy_color(T dst[4], const T *src)
{
dst[0] = src[0];
dst[1] = src[1];
dst[2] = src[2];
dst[3] = src[3];
}
static VkClearColorValue to_vk_clear_color_value(eGPUDataFormat format, const void *data)
{
VkClearColorValue result = {0.0f};
switch (format) {
case GPU_DATA_FLOAT: {
const float *float_data = static_cast<const float *>(data);
copy_color<float>(result.float32, float_data);
break;
}
case GPU_DATA_INT: {
const int32_t *int_data = static_cast<const int32_t *>(data);
copy_color<int32_t>(result.int32, int_data);
break;
}
case GPU_DATA_UINT: {
const uint32_t *uint_data = static_cast<const uint32_t *>(data);
copy_color<uint32_t>(result.uint32, uint_data);
break;
}
case GPU_DATA_HALF_FLOAT:
case GPU_DATA_UBYTE:
case GPU_DATA_UINT_24_8:
case GPU_DATA_10_11_11_REV:
case GPU_DATA_2_10_10_10_REV: {
BLI_assert_unreachable();
break;
}
}
return result;
}
void VKTexture::clear(eGPUDataFormat format, const void *data)
{
if (!is_allocated()) {
allocate();
}
VKContext &context = *VKContext::get();
VKCommandBuffer &command_buffer = context.command_buffer_get();
VkClearColorValue clear_color = to_vk_clear_color_value(format, data);
VkImageSubresourceRange range = {0};
range.aspectMask = to_vk_image_aspect_flag_bits(format_);
range.levelCount = VK_REMAINING_MIP_LEVELS;
range.layerCount = VK_REMAINING_ARRAY_LAYERS;
command_buffer.clear(
vk_image_, VK_IMAGE_LAYOUT_GENERAL, clear_color, Span<VkImageSubresourceRange>(&range, 1));
}
void VKTexture::swizzle_set(const char /*swizzle_mask*/[4])
@ -79,22 +138,41 @@ void *VKTexture::read(int mip, eGPUDataFormat format)
command_buffer.submit();
void *data = MEM_mallocN(host_memory_size, __func__);
/* TODO: add conversion when data format is different. */
BLI_assert_msg(device_memory_size == host_memory_size,
"Memory data conversions not implemented yet");
staging_buffer.read(data);
convert_device_to_host(data, staging_buffer.mapped_memory_get(), sample_len, format, format_);
return data;
}
void VKTexture::update_sub(int /*mip*/,
int /*offset*/[3],
int /*extent*/[3],
eGPUDataFormat /*format*/,
const void * /*data*/)
void VKTexture::update_sub(
int mip, int offset[3], int extent[3], eGPUDataFormat format, const void *data)
{
if (!is_allocated()) {
allocate();
}
/* Vulkan images cannot be directly mapped to host memory and requires a staging buffer. */
VKContext &context = *VKContext::get();
VKBuffer staging_buffer;
size_t sample_len = extent[0] * extent[1] * extent[2];
size_t device_memory_size = sample_len * to_bytesize(format_);
staging_buffer.create(
context, device_memory_size, GPU_USAGE_DEVICE_ONLY, VK_BUFFER_USAGE_TRANSFER_SRC_BIT);
convert_host_to_device(staging_buffer.mapped_memory_get(), data, sample_len, format, format_);
VkBufferImageCopy region = {};
region.imageExtent.width = extent[0];
region.imageExtent.height = extent[1];
region.imageExtent.depth = extent[2];
region.imageOffset.x = offset[0];
region.imageOffset.y = offset[1];
region.imageOffset.z = offset[2];
region.imageSubresource.aspectMask = to_vk_image_aspect_flag_bits(format_);
region.imageSubresource.mipLevel = mip;
region.imageSubresource.layerCount = 1;
VKCommandBuffer &command_buffer = context.command_buffer_get();
command_buffer.copy(*this, staging_buffer, Span<VkBufferImageCopy>(&region, 1));
command_buffer.submit();
}
void VKTexture::update_sub(int /*offset*/[3],
@ -115,6 +193,8 @@ bool VKTexture::init_internal()
/* Initialization can only happen after the usage is known. By the current API this isn't set
* at this moment, so we cannot initialize here. The initialization is postponed until the
* allocation of the texture on the device. */
/* TODO: return false when texture format isn't supported. */
return true;
}
@ -152,12 +232,12 @@ bool VKTexture::allocate()
image_info.format = to_vk_format(format_);
image_info.tiling = VK_IMAGE_TILING_LINEAR;
image_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_SAMPLED_BIT |
VK_IMAGE_USAGE_STORAGE_BIT;
image_info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT |
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT;
image_info.samples = VK_SAMPLE_COUNT_1_BIT;
VkResult result;
if (G.debug &= G_DEBUG_GPU) {
if (G.debug & G_DEBUG_GPU) {
VkImageFormatProperties image_format = {};
result = vkGetPhysicalDeviceImageFormatProperties(context.physical_device_get(),
image_info.format,

View File

@ -7,13 +7,15 @@
#pragma once
#include "BLI_utility_mixins.hh"
#include "gpu_uniform_buffer_private.hh"
#include "vk_buffer.hh"
namespace blender::gpu {
class VKUniformBuffer : public UniformBuf {
class VKUniformBuffer : public UniformBuf, NonCopyable {
VKBuffer buffer_;
public:

View File

@ -676,12 +676,7 @@ ImBuf *imb_loadtarga(const uchar *mem, size_t mem_size, int flags, char colorspa
cmap[count] = cp_data;
}
size = 0;
for (int cmap_index = cmap_max - 1; cmap_index > 0; cmap_index >>= 1) {
size++;
}
ibuf->planes = size;
ibuf->planes = tga.mapbits;
if (tga.mapbits != 32) { /* Set alpha bits. */
cmap[0] &= BIG_LONG(0x00ffffffl);
}

View File

@ -332,7 +332,7 @@ static void rna_ColorRamp_update(Main *bmain, Scene *UNUSED(scene), PointerRNA *
WM_main_add_notifier(NC_LINESTYLE, linestyle);
break;
}
/* ColorRamp for particle display is owned by the object (see #54422) */
/* Color Ramp for particle display is owned by the object (see #54422) */
case ID_OB:
case ID_PA: {
ParticleSettings *part = (ParticleSettings *)ptr->owner_id;
@ -979,7 +979,7 @@ static void rna_def_color_ramp_element_api(BlenderRNA *brna, PropertyRNA *cprop)
/* TODO: make these functions generic in `texture.c`. */
func = RNA_def_function(srna, "new", "rna_ColorRampElement_new");
RNA_def_function_ui_description(func, "Add element to ColorRamp");
RNA_def_function_ui_description(func, "Add element to Color Ramp");
RNA_def_function_flag(func, FUNC_USE_REPORTS);
parm = RNA_def_float(
func, "position", 0.0f, 0.0f, 1.0f, "Position", "Position to add element", 0.0f, 1.0f);
@ -989,7 +989,7 @@ static void rna_def_color_ramp_element_api(BlenderRNA *brna, PropertyRNA *cprop)
RNA_def_function_return(func, parm);
func = RNA_def_function(srna, "remove", "rna_ColorRampElement_remove");
RNA_def_function_ui_description(func, "Delete element from ColorRamp");
RNA_def_function_ui_description(func, "Delete element from Color Ramp");
RNA_def_function_flag(func, FUNC_USE_REPORTS);
parm = RNA_def_pointer(func, "element", "ColorRampElement", "", "Element to remove");
RNA_def_parameter_flags(parm, PROP_NEVER_NULL, PARM_REQUIRED | PARM_RNAPTR);
@ -1069,14 +1069,14 @@ static void rna_def_color_ramp(BlenderRNA *brna)
# endif
func = RNA_def_function(srna, "evaluate", "rna_ColorRamp_eval");
RNA_def_function_ui_description(func, "Evaluate ColorRamp");
RNA_def_function_ui_description(func, "Evaluate Color Ramp");
parm = RNA_def_float(func,
"position",
1.0f,
0.0f,
1.0f,
"Position",
"Evaluate ColorRamp at position",
"Evaluate Color Ramp at position",
0.0f,
1.0f);
RNA_def_parameter_flags(parm, 0, PARM_REQUIRED);

View File

@ -26,7 +26,7 @@ DefNode(Node, NODE_REROUTE, 0, "REROUT
DefNode(ShaderNode, SH_NODE_RGB, 0, "RGB", RGB, "RGB", "A color picker")
DefNode(ShaderNode, SH_NODE_VALUE, 0, "VALUE", Value, "Value", "Used to Input numerical values to other nodes in the tree")
DefNode(ShaderNode, SH_NODE_MIX_RGB_LEGACY, def_mix_rgb, "MIX_RGB", MixRGB, "MixRGB", "Mix two input colors")
DefNode(ShaderNode, SH_NODE_VALTORGB, def_colorramp, "VALTORGB", ValToRGB, "ColorRamp", "Map values to colors with the use of a gradient")
DefNode(ShaderNode, SH_NODE_VALTORGB, def_colorramp, "VALTORGB", ValToRGB, "Color Ramp", "Map values to colors with the use of a gradient")
DefNode(ShaderNode, SH_NODE_RGBTOBW, 0, "RGBTOBW", RGBToBW, "RGB to BW", "Convert a color's luminance to a grayscale value")
DefNode(ShaderNode, SH_NODE_SHADERTORGB, 0, "SHADERTORGB", ShaderToRGB, "Shader to RGB", "Convert rendering effect (such as light and shadow) to color. Typically used for non-photorealistic rendering, to apply additional effects on the output of BSDFs.\nNote: only supported for Eevee")
DefNode(ShaderNode, SH_NODE_NORMAL, 0, "NORMAL", Normal, "Normal", "Generate a normal vector and a dot product")
@ -128,7 +128,7 @@ DefNode(CompositorNode, CMP_NODE_VIEWER, def_cmp_viewer, "VIEWER
DefNode(CompositorNode, CMP_NODE_RGB, 0, "RGB", RGB, "RGB", "" )
DefNode(CompositorNode, CMP_NODE_VALUE, 0, "VALUE", Value, "Value", "" )
DefNode(CompositorNode, CMP_NODE_MIX_RGB, def_mix_rgb, "MIX_RGB", MixRGB, "Mix", "" )
DefNode(CompositorNode, CMP_NODE_VALTORGB, def_colorramp, "VALTORGB", ValToRGB, "ColorRamp", "" )
DefNode(CompositorNode, CMP_NODE_VALTORGB, def_colorramp, "VALTORGB", ValToRGB, "Color Ramp", "" )
DefNode(CompositorNode, CMP_NODE_RGBTOBW, 0, "RGBTOBW", RGBToBW, "RGB to BW", "" )
DefNode(CompositorNode, CMP_NODE_NORMAL, 0, "NORMAL", Normal, "Normal", "" )
DefNode(CompositorNode, CMP_NODE_CURVE_VEC, def_vector_curve, "CURVE_VEC", CurveVec, "Vector Curves", "" )
@ -231,7 +231,7 @@ DefNode(TextureNode, TEX_NODE_BRICKS, def_tex_bricks, "BRICKS
DefNode(TextureNode, TEX_NODE_MATH, def_math, "MATH", Math, "Math", "" )
DefNode(TextureNode, TEX_NODE_MIX_RGB, def_mix_rgb, "MIX_RGB", MixRGB, "Mix RGB", "" )
DefNode(TextureNode, TEX_NODE_RGBTOBW, 0, "RGBTOBW", RGBToBW, "RGB to BW", "" )
DefNode(TextureNode, TEX_NODE_VALTORGB, def_colorramp, "VALTORGB", ValToRGB, "ColorRamp", "" )
DefNode(TextureNode, TEX_NODE_VALTORGB, def_colorramp, "VALTORGB", ValToRGB, "Color Ramp", "" )
DefNode(TextureNode, TEX_NODE_IMAGE, def_tex_image, "IMAGE", Image, "Image", "" )
DefNode(TextureNode, TEX_NODE_CURVE_RGB, def_rgb_curve, "CURVE_RGB", CurveRGB, "RGB Curves", "" )
DefNode(TextureNode, TEX_NODE_INVERT, 0, "INVERT", Invert, "Invert", "" )

View File

@ -135,7 +135,7 @@ void register_node_type_cmp_valtorgb()
static bNodeType ntype;
cmp_node_type_base(&ntype, CMP_NODE_VALTORGB, "ColorRamp", NODE_CLASS_CONVERTER);
cmp_node_type_base(&ntype, CMP_NODE_VALTORGB, "Color Ramp", NODE_CLASS_CONVERTER);
ntype.declare = file_ns::cmp_node_valtorgb_declare;
node_type_size(&ntype, 240, 200, 320);
ntype.initfunc = file_ns::node_composit_init_valtorgb;

View File

@ -136,7 +136,7 @@ void register_node_type_sh_valtorgb()
static bNodeType ntype;
sh_fn_node_type_base(&ntype, SH_NODE_VALTORGB, "ColorRamp", NODE_CLASS_CONVERTER);
sh_fn_node_type_base(&ntype, SH_NODE_VALTORGB, "Color Ramp", NODE_CLASS_CONVERTER);
ntype.declare = file_ns::sh_node_valtorgb_declare;
ntype.initfunc = file_ns::node_shader_init_valtorgb;
node_type_size_preset(&ntype, NODE_SIZE_LARGE);

View File

@ -47,7 +47,7 @@ void register_node_type_tex_valtorgb(void)
{
static bNodeType ntype;
tex_node_type_base(&ntype, TEX_NODE_VALTORGB, "ColorRamp", NODE_CLASS_CONVERTER);
tex_node_type_base(&ntype, TEX_NODE_VALTORGB, "Color Ramp", NODE_CLASS_CONVERTER);
node_type_socket_templates(&ntype, valtorgb_in, valtorgb_out);
node_type_size_preset(&ntype, NODE_SIZE_LARGE);
ntype.initfunc = valtorgb_init;

View File

@ -526,7 +526,7 @@ static int py_to_array(PyObject *seq,
// totdim = RNA_property_array_dimension(ptr, prop, dim_size); /* UNUSED */
const int flag = RNA_property_flag(prop);
/* Use #ParameterDynAlloc which defines it's own array length. */
/* Use #ParameterDynAlloc which defines its own array length. */
const bool prop_is_param_dyn_alloc = param_data && (flag & PROP_DYNAMIC);
if (validate_array(seq,

View File

@ -129,6 +129,11 @@ typedef enum eWM_CapabilitiesFlag {
WM_CAPABILITY_CURSOR_WARP = (1 << 0),
/** Ability to access window positions & move them. */
WM_CAPABILITY_WINDOW_POSITION = (1 << 1),
/**
* The windowing system supports a separate primary clipboard
* (typically set when interactively selecting text).
*/
WM_CAPABILITY_PRIMARY_CLIPBOARD = (1 << 2),
} eWM_CapabilitiesFlag;
eWM_CapabilitiesFlag WM_capabilities_flag(void);

View File

@ -129,7 +129,7 @@ wmGizmoGroupTypeRef *WM_gizmogrouptype_append_and_link(wmGizmoMapType *gzmap_typ
*/
static void gizmogrouptype_free(wmGizmoGroupType *gzgt)
{
/* Python gizmo group, allocates it's own string. */
/* Python gizmo group, allocates its own string. */
if (gzgt->rna_ext.srna) {
MEM_freeN((void *)gzgt->idname);
}

View File

@ -106,7 +106,7 @@ void WM_gizmotype_append_ptr(void (*gtfunc)(struct wmGizmoType *, void *), void
void WM_gizmotype_free_ptr(wmGizmoType *gzt)
{
/* Python gizmo, allocates it's own string. */
/* Python gizmo, allocates its own string. */
if (gzt->rna_ext.srna) {
MEM_freeN((void *)gzt->idname);
}

View File

@ -1149,7 +1149,7 @@ static void wm_operator_finished(bContext *C,
}
}
else if (has_undo_step) {
/* An undo step was added but the operator wasn't registered (and won't register it's self),
/* An undo step was added but the operator wasn't registered (and won't register itself),
* therefor a redo panel wouldn't redo this action but the previous registered action,
* causing the "redo" to remove/loose this operator. See: #101743.
* Register check is needed so nested operator calls don't clear the HUD. See: #103587. */

View File

@ -1112,7 +1112,7 @@ void wm_homefile_read_ex(bContext *C,
const bool use_userdef = params_homefile->use_userdef;
bool use_factory_settings = params_homefile->use_factory_settings;
/* Currently this only impacts preferences as it doesn't make much sense to keep the default
* startup open in the case the app-template doesn't happen to define it's own startup.
* startup open in the case the app-template doesn't happen to define its own startup.
* Unlike preferences where we might want to only reset the app-template part of the preferences
* so as not to reset the preferences for all other Blender instances, see: #96427. */
const bool use_factory_settings_app_template_only =
@ -1604,7 +1604,7 @@ static void wm_history_file_update(void)
*
* - An image is saved to the thumbnail cache, sized at #PREVIEW_RENDER_LARGE_HEIGHT.
*
* - A smaller thumbnail is stored in the `.blend` file it's self, sized at #BLEN_THUMB_SIZE.
* - A smaller thumbnail is stored in the `.blend` file itself, sized at #BLEN_THUMB_SIZE.
* The size is kept small to prevent thumbnails bloating the size of `.blend` files.
*
* The this thumbnail will be extracted if the file is shared or the local thumbnail cache

View File

@ -187,7 +187,7 @@ static void operatortype_ghash_free_cb(wmOperatorType *ot)
}
if (ot->rna_ext.srna) {
/* A Python operator, allocates it's own string. */
/* A Python operator, allocates its own string. */
MEM_freeN((void *)ot->idname);
}

View File

@ -1834,6 +1834,10 @@ eWM_CapabilitiesFlag WM_capabilities_flag(void)
if (GHOST_SupportsWindowPosition()) {
flag |= WM_CAPABILITY_WINDOW_POSITION;
}
if (GHOST_SupportsPrimaryClipboard()) {
flag |= WM_CAPABILITY_PRIMARY_CLIPBOARD;
}
return flag;
}

View File

@ -20,6 +20,10 @@ endif()
get_property(_test_libs GLOBAL PROPERTY BLENDER_TEST_LIBS)
if(WIN32 OR APPLE)
# Windows and macOS set target_link_options after target creation.
#
# Still need to ensure dependency between the test libraries and the blender_test binary, so that
# the latter one is re-linked when the test library is re-compiled.
list(APPEND TEST_LIBS ${_test_libs})
elseif(UNIX)
list(APPEND TEST_LIBS "-Wl,--whole-archive" ${_test_libs} "-Wl,--no-whole-archive")
else()

View File

@ -234,7 +234,7 @@ class TestPropArrayDynamicAssign(unittest.TestCase):
class TestPropArrayDynamicArg(unittest.TestCase):
"""
Index array, a dynamic array argument which defines it's own length.
Index array, a dynamic array argument which defines its own length.
"""
dims = 8

View File

@ -418,7 +418,7 @@ def main() -> None:
source_paths_include=(".",),
source_paths_exclude=(
# Directories:
# This is an exception, it has it's own CMake files we do not maintain.
# This is an exception, it has its own CMake files we do not maintain.
"./extern/audaspace",
"./extern/quadriflow/3rd/lemon-1.3.1",
),
@ -428,7 +428,7 @@ def main() -> None:
source_paths_include=(".",),
source_paths_exclude=(
# Directories:
# This is an exception, it has it's own CMake files we do not maintain.
# This is an exception, it has its own CMake files we do not maintain.
"./extern",
"./scripts/addons_contrib",
# Just data.

View File

@ -40,7 +40,7 @@
}
##----------------------------------------------------------------------##
# Python Calls aren't so useful unless we're debugging Python it's self
# Python Calls aren't so useful unless we're debugging Python itself
# _PyObject_Free
{