WIP: Basic Blender Project Support (experimental feature) #107655

Draft
Julian Eisel wants to merge 94 commits from blender-projects-basics into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
38 changed files with 1918 additions and 303 deletions
Showing only changes of commit 30b08fbec5 - Show all commits

View File

@ -101,34 +101,16 @@ else()
set(LIBPREFIX "lib")
if(APPLE)
# Let's get the current Xcode dir, to support xcode-select
execute_process(
COMMAND xcode-select --print-path
OUTPUT_VARIABLE XCODE_DEV_PATH OUTPUT_STRIP_TRAILING_WHITESPACE
)
execute_process(
COMMAND xcodebuild -version -sdk macosx SDKVersion
OUTPUT_VARIABLE MACOSX_SDK_VERSION OUTPUT_STRIP_TRAILING_WHITESPACE)
if(NOT CMAKE_OSX_ARCHITECTURES)
execute_process(COMMAND uname -m OUTPUT_VARIABLE ARCHITECTURE OUTPUT_STRIP_TRAILING_WHITESPACE)
message(STATUS "Detected native architecture ${ARCHITECTURE}.")
set(CMAKE_OSX_ARCHITECTURES "${ARCHITECTURE}")
endif()
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64")
set(OSX_DEPLOYMENT_TARGET 10.13)
else()
set(OSX_DEPLOYMENT_TARGET 11.00)
endif()
set(OSX_SYSROOT ${XCODE_DEV_PATH}/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk)
# Use same Xcode detection as Blender itself.
include(../cmake/platform/platform_apple_xcode.cmake)
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64")
set(BLENDER_PLATFORM_ARM ON)
endif()
set(PLATFORM_CFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -arch ${CMAKE_OSX_ARCHITECTURES}")
set(PLATFORM_CXXFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -std=c++11 -stdlib=libc++ -arch ${CMAKE_OSX_ARCHITECTURES}")
set(PLATFORM_LDFLAGS "-isysroot ${OSX_SYSROOT} -mmacosx-version-min=${OSX_DEPLOYMENT_TARGET} -arch ${CMAKE_OSX_ARCHITECTURES}")
set(PLATFORM_CFLAGS "-isysroot ${CMAKE_OSX_SYSROOT} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -arch ${CMAKE_OSX_ARCHITECTURES}")
set(PLATFORM_CXXFLAGS "-isysroot ${CMAKE_OSX_SYSROOT} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -std=c++11 -stdlib=libc++ -arch ${CMAKE_OSX_ARCHITECTURES}")
set(PLATFORM_LDFLAGS "-isysroot ${CMAKE_OSX_SYSROOT} -mmacosx-version-min=${CMAKE_OSX_DEPLOYMENT_TARGET} -arch ${CMAKE_OSX_ARCHITECTURES}")
if("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "x86_64")
set(PLATFORM_BUILD_TARGET --build=x86_64-apple-darwin17.0.0) # OS X 10.13
else()
@ -136,8 +118,8 @@ else()
endif()
set(PLATFORM_CMAKE_FLAGS
-DCMAKE_OSX_ARCHITECTURES:STRING=${CMAKE_OSX_ARCHITECTURES}
-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=${OSX_DEPLOYMENT_TARGET}
-DCMAKE_OSX_SYSROOT:PATH=${OSX_SYSROOT}
-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=${CMAKE_OSX_DEPLOYMENT_TARGET}
-DCMAKE_OSX_SYSROOT:PATH=${CMAKE_OSX_SYSROOT}
)
else()
if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "aarch64")
@ -171,8 +153,8 @@ else()
set(BLENDER_CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O2 -g -DNDEBUG ${PLATFORM_CXXFLAGS}")
set(CONFIGURE_ENV
export MACOSX_DEPLOYMENT_TARGET=${OSX_DEPLOYMENT_TARGET} &&
export MACOSX_SDK_VERSION=${OSX_DEPLOYMENT_TARGET} &&
export MACOSX_DEPLOYMENT_TARGET=${CMAKE_OSX_DEPLOYMENT_TARGET} &&
export MACOSX_SDK_VERSION=${CMAKE_OSX_DEPLOYMENT_TARGET} &&
export CFLAGS=${PLATFORM_CFLAGS} &&
export CXXFLAGS=${PLATFORM_CXXFLAGS} &&
export LDFLAGS=${PLATFORM_LDFLAGS}

View File

@ -1296,6 +1296,7 @@ void PathTrace::set_guiding_params(const GuidingParams &guiding_params, const bo
# if OPENPGL_VERSION_MINOR >= 4
field_args.deterministic = guiding_params.deterministic;
# endif
reinterpret_cast<PGLKDTreeArguments *>(field_args.spatialSturctureArguments)->maxDepth = 16;
openpgl::cpp::Device *guiding_device = static_cast<openpgl::cpp::Device *>(
device_->get_guiding_device());
if (guiding_device) {

View File

@ -76,6 +76,9 @@ ccl_device_forceinline IntegratorShadowState integrator_shadow_path_init(
&kernel_integrator_state.next_shadow_path_index[0], 1);
atomic_fetch_and_add_uint32(&kernel_integrator_state.queue_counter->num_queued[next_kernel], 1);
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, queued_kernel) = next_kernel;
# ifdef __PATH_GUIDING__
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, path_segment) = nullptr;
# endif
return shadow_state;
}
@ -181,6 +184,9 @@ ccl_device_forceinline IntegratorShadowState integrator_shadow_path_init(
{
IntegratorShadowState shadow_state = (is_ao) ? &state->ao : &state->shadow;
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, queued_kernel) = next_kernel;
# ifdef __PATH_GUIDING__
INTEGRATOR_STATE_WRITE(shadow_state, shadow_path, path_segment) = nullptr;
# endif
return shadow_state;
}

View File

@ -196,11 +196,16 @@ bool OIIOImageLoader::load_pixels(const ImageMetaData &metadata,
if (associate_alpha) {
do_associate_alpha = spec.get_int_attribute("oiio:UnassociatedAlpha", 0);
/* Workaround OIIO not detecting TGA file alpha the same as Blender (since #3019).
* We want anything not marked as premultiplied alpha to get associated. */
if (!do_associate_alpha && spec.alpha_channel != -1 &&
strcmp(in->format_name(), "targa") == 0) {
do_associate_alpha = spec.get_int_attribute("targa:alpha_type", -1) != 4;
if (!do_associate_alpha && spec.alpha_channel != -1) {
/* Workaround OIIO not detecting TGA file alpha the same as Blender (since #3019).
* We want anything not marked as premultiplied alpha to get associated. */
if (strcmp(in->format_name(), "targa") == 0) {
do_associate_alpha = spec.get_int_attribute("targa:alpha_type", -1) != 4;
}
/* OIIO DDS reader never sets UnassociatedAlpha attribute. */
if (strcmp(in->format_name(), "dds") == 0) {
do_associate_alpha = true;
}
}
}

View File

@ -526,7 +526,7 @@ typedef struct {
} GHOST_TStringArray;
typedef enum {
GHOST_kNotStarted,
GHOST_kNotStarted = 0,
GHOST_kStarting,
GHOST_kInProgress,
GHOST_kFinishing,

View File

@ -7,18 +7,23 @@
#include "GHOST_WindowManager.h"
#include "GHOST_utildefines.h"
/* Logging, use `ghost.ndof.*` prefix. */
#include "CLG_log.h"
#include <climits>
#include <cmath>
#include <cstdio> /* For error/info reporting. */
#include <cstring> /* For memory functions. */
#ifdef DEBUG_NDOF_MOTION
/* Printable version of each GHOST_TProgress value. */
static const char *progress_string[] = {
"not started", "starting", "in progress", "finishing", "finished"};
#endif
"not started",
"starting",
"in progress",
"finishing",
"finished",
};
#ifdef DEBUG_NDOF_BUTTONS
static const char *ndof_button_names[] = {
/* used internally, never sent */
"NDOF_BUTTON_NONE",
@ -69,8 +74,8 @@ static const char *ndof_button_names[] = {
"NDOF_BUTTON_B",
"NDOF_BUTTON_C",
/* the end */
"NDOF_BUTTON_LAST"};
#endif
"NDOF_BUTTON_LAST",
};
/* Shared by the latest 3Dconnexion hardware
* SpacePilotPro uses all of these
@ -150,6 +155,13 @@ GHOST_NDOFManager::GHOST_NDOFManager(GHOST_System &sys)
memset(m_rotation, 0, sizeof(m_rotation));
}
/* -------------------------------------------------------------------- */
/** \name NDOF Device Setup
* \{ */
static CLG_LogRef LOG_NDOF_DEVICE = {"ghost.ndof.device"};
#define LOG (&LOG_NDOF_DEVICE)
bool GHOST_NDOFManager::setDevice(ushort vendor_id, ushort product_id)
{
/* Call this function until it returns true
@ -260,13 +272,19 @@ bool GHOST_NDOFManager::setDevice(ushort vendor_id, ushort product_id)
m_buttonMask = int(~(UINT_MAX << m_buttonCount));
}
#ifdef DEBUG_NDOF_BUTTONS
printf("ndof: %d buttons -> hex:%X\n", m_buttonCount, m_buttonMask);
#endif
CLOG_INFO(LOG, 2, "%d buttons -> hex:%X", m_buttonCount, (uint)m_buttonMask);
return m_deviceType != NDOF_UnknownDevice;
}
#undef LOG
/** \} */
/* -------------------------------------------------------------------- */
/** \name NDOF Update State
* \{ */
void GHOST_NDOFManager::updateTranslation(const int t[3], uint64_t time)
{
memcpy(m_translation, t, sizeof(m_translation));
@ -281,6 +299,36 @@ void GHOST_NDOFManager::updateRotation(const int r[3], uint64_t time)
m_motionEventPending = true;
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name NDOF Buttons
* \{ */
static CLG_LogRef LOG_NDOF_BUTTONS = {"ghost.ndof.buttons"};
#define LOG (&LOG_NDOF_BUTTONS)
static GHOST_TKey ghost_map_keyboard_from_ndof_buttom(const NDOF_ButtonT button)
{
switch (button) {
case NDOF_BUTTON_ESC: {
return GHOST_kKeyEsc;
}
case NDOF_BUTTON_ALT: {
return GHOST_kKeyLeftAlt;
}
case NDOF_BUTTON_SHIFT: {
return GHOST_kKeyLeftShift;
}
case NDOF_BUTTON_CTRL: {
return GHOST_kKeyLeftControl;
}
default: {
return GHOST_kKeyUnknown;
}
}
}
void GHOST_NDOFManager::sendButtonEvent(NDOF_ButtonT button,
bool press,
uint64_t time,
@ -295,10 +343,6 @@ void GHOST_NDOFManager::sendButtonEvent(NDOF_ButtonT button,
data->action = press ? GHOST_kPress : GHOST_kRelease;
data->button = button;
#ifdef DEBUG_NDOF_BUTTONS
printf("%s %s\n", ndof_button_names[button], press ? "pressed" : "released");
#endif
m_system.pushEvent(event);
}
@ -310,44 +354,41 @@ void GHOST_NDOFManager::sendKeyEvent(GHOST_TKey key,
GHOST_TEventType type = press ? GHOST_kEventKeyDown : GHOST_kEventKeyUp;
GHOST_EventKey *event = new GHOST_EventKey(time, type, window, key, false);
#ifdef DEBUG_NDOF_BUTTONS
printf("keyboard %s\n", press ? "down" : "up");
#endif
m_system.pushEvent(event);
}
void GHOST_NDOFManager::updateButton(int button_number, bool press, uint64_t time)
{
if (button_number >= m_buttonCount) {
CLOG_INFO(LOG,
2,
"button=%d, press=%d (out of range %d, ignoring!)",
button_number,
(int)press,
m_buttonCount);
return;
}
const NDOF_ButtonT button = m_hidMap[button_number];
if (button == NDOF_BUTTON_NONE) {
CLOG_INFO(
LOG, 2, "button=%d, press=%d (mapped to none, ignoring!)", button_number, (int)press);
return;
}
CLOG_INFO(LOG,
2,
"button=%d, press=%d, name=%s",
button_number,
(int)press,
ndof_button_names[button]);
GHOST_IWindow *window = m_system.getWindowManager()->getActiveWindow();
#ifdef DEBUG_NDOF_BUTTONS
printf("ndof: button %d -> ", button_number);
#endif
NDOF_ButtonT button = (button_number < m_buttonCount) ? m_hidMap[button_number] :
NDOF_BUTTON_NONE;
switch (button) {
case NDOF_BUTTON_NONE:
#ifdef DEBUG_NDOF_BUTTONS
printf("discarded\n");
#endif
break;
case NDOF_BUTTON_ESC:
sendKeyEvent(GHOST_kKeyEsc, press, time, window);
break;
case NDOF_BUTTON_ALT:
sendKeyEvent(GHOST_kKeyLeftAlt, press, time, window);
break;
case NDOF_BUTTON_SHIFT:
sendKeyEvent(GHOST_kKeyLeftShift, press, time, window);
break;
case NDOF_BUTTON_CTRL:
sendKeyEvent(GHOST_kKeyLeftControl, press, time, window);
break;
default:
sendButtonEvent(button, press, time, window);
const GHOST_TKey key = ghost_map_keyboard_from_ndof_buttom(button);
if (key != GHOST_kKeyUnknown) {
sendKeyEvent(key, press, time, window);
}
else {
sendButtonEvent(button, press, time, window);
}
int mask = 1 << button_number;
@ -375,19 +416,27 @@ void GHOST_NDOFManager::updateButtons(int button_bits, uint64_t time)
}
}
#undef LOG
/** \} */
/* -------------------------------------------------------------------- */
/** \name NDOF Motion
* \{ */
static CLG_LogRef LOG_NDOF_MOTION = {"ghost.ndof.motion"};
#define LOG (&LOG_NDOF_MOTION)
void GHOST_NDOFManager::setDeadZone(float dz)
{
if (dz < 0.0f) {
/* Negative values don't make sense, so clamp at zero. */
dz = 0.0f;
}
else if (dz > 0.5f) {
/* Warn the rogue user/developer, but allow it. */
GHOST_PRINTF("ndof: dead zone of %.2f is rather high...\n", dz);
}
m_deadZone = dz;
GHOST_PRINTF("ndof: dead zone set to %.2f\n", dz);
/* Warn the rogue user/developer about high dead-zone, but allow it. */
CLOG_INFO(LOG, 2, "dead zone set to %.2f%s", dz, (dz > 0.5f) ? " (unexpectedly high)" : "");
}
static bool atHomePosition(GHOST_TEventNDOFMotionData *ndof)
@ -402,11 +451,9 @@ static bool nearHomePosition(GHOST_TEventNDOFMotionData *ndof, float threshold)
if (threshold == 0.0f) {
return atHomePosition(ndof);
}
else {
#define HOME(foo) (fabsf(ndof->foo) < threshold)
return HOME(tx) && HOME(ty) && HOME(tz) && HOME(rx) && HOME(ry) && HOME(rz);
return HOME(tx) && HOME(ty) && HOME(tz) && HOME(rx) && HOME(ry) && HOME(rz);
#undef HOME
}
}
bool GHOST_NDOFManager::sendMotionEvent()
@ -419,7 +466,7 @@ bool GHOST_NDOFManager::sendMotionEvent()
GHOST_IWindow *window = m_system.getWindowManager()->getActiveWindow();
if (window == NULL) {
if (window == nullptr) {
m_motionState = GHOST_kNotStarted; /* Avoid large `dt` times when changing windows. */
return false; /* Delivery will fail, so don't bother sending. */
}
@ -439,7 +486,6 @@ bool GHOST_NDOFManager::sendMotionEvent()
data->rx = scale * m_rotation[0];
data->ry = scale * m_rotation[1];
data->rz = scale * m_rotation[2];
data->dt = 0.001f * (m_motionTime - m_prevMotionTime); /* In seconds. */
m_prevMotionTime = m_motionTime;
@ -449,7 +495,7 @@ bool GHOST_NDOFManager::sendMotionEvent()
* and where that leaves this NDOF manager `(NotStarted, InProgress, Finished)`. */
switch (m_motionState) {
case GHOST_kNotStarted:
case GHOST_kFinished:
case GHOST_kFinished: {
if (weHaveMotion) {
data->progress = GHOST_kStarting;
m_motionState = GHOST_kInProgress;
@ -458,14 +504,13 @@ bool GHOST_NDOFManager::sendMotionEvent()
}
else {
/* Send no event and keep current state. */
#ifdef DEBUG_NDOF_MOTION
printf("ndof motion ignored -- %s\n", progress_string[data->progress]);
#endif
CLOG_INFO(LOG, 2, "motion ignored");
delete event;
return false;
}
break;
case GHOST_kInProgress:
}
case GHOST_kInProgress: {
if (weHaveMotion) {
data->progress = GHOST_kInProgress;
/* Remain 'InProgress'. */
@ -475,33 +520,41 @@ bool GHOST_NDOFManager::sendMotionEvent()
m_motionState = GHOST_kFinished;
}
break;
default:
}
default: {
/* Will always be one of the above. */
break;
}
}
#ifdef DEBUG_NDOF_MOTION
printf("ndof motion sent -- %s\n", progress_string[data->progress]);
/* Show details about this motion event. */
printf(" T=(%d,%d,%d) R=(%d,%d,%d) raw\n",
m_translation[0],
m_translation[1],
m_translation[2],
m_rotation[0],
m_rotation[1],
m_rotation[2]);
printf(" T=(%.2f,%.2f,%.2f) R=(%.2f,%.2f,%.2f) dt=%.3f\n",
data->tx,
data->ty,
data->tz,
data->rx,
data->ry,
data->rz,
data->dt);
#if 1
CLOG_INFO(LOG,
2,
"motion sent, T=(%.2f,%.2f,%.2f), R=(%.2f,%.2f,%.2f) dt=%.3f, status=%s",
data->tx,
data->ty,
data->tz,
data->rx,
data->ry,
data->rz,
data->dt,
progress_string[data->progress]);
#else
/* Raw values, may be useful for debugging. */
CLOG_INFO(LOG,
2,
"motion sent, T=(%d,%d,%d) R=(%d,%d,%d) status=%s",
m_translation[0],
m_translation[1],
m_translation[2],
m_rotation[0],
m_rotation[1],
m_rotation[2],
progress_string[data->progress]);
#endif
m_system.pushEvent(event);
return true;
}
/** \} */

View File

@ -8,9 +8,6 @@
#include "GHOST_System.h"
// #define DEBUG_NDOF_MOTION
// #define DEBUG_NDOF_BUTTONS
typedef enum {
NDOF_UnknownDevice,
@ -33,7 +30,7 @@ typedef enum {
/* NDOF device button event types */
typedef enum {
/* Used internally, never sent. */
NDOF_BUTTON_NONE,
NDOF_BUTTON_NONE = 0,
/* These two are available from any 3Dconnexion device. */
NDOF_BUTTON_MENU,
NDOF_BUTTON_FIT,

View File

@ -182,6 +182,11 @@ void BLO_blendfiledata_free(BlendFileData *bfd);
typedef struct BLODataBlockInfo {
char name[64]; /* MAX_NAME */
struct AssetMetaData *asset_data;
/* Optimization: Tag data-blocks for which we know there is no preview.
* Knowing this can be used to skip the (potentially expensive) preview loading process. If this
* is set to true it means we looked for a preview and couldn't find one. False may mean that
* either no preview was found, or that it wasn't looked for in the first place. */
bool no_preview_found;
} BLODataBlockInfo;
/**

View File

@ -47,7 +47,7 @@ set(SRC
intern/versioning_400.cc
intern/versioning_common.cc
intern/versioning_cycles.c
intern/versioning_defaults.c
intern/versioning_defaults.cc
intern/versioning_dna.c
intern/versioning_legacy.c
intern/versioning_userdef.c

View File

@ -138,11 +138,15 @@ LinkNode *BLO_blendhandle_get_datablock_info(BlendHandle *bh,
BHead *bhead;
int tot = 0;
const int sdna_nr_preview_image = DNA_struct_find_nr(fd->filesdna, "PreviewImage");
for (bhead = blo_bhead_first(fd); bhead; bhead = blo_bhead_next(fd, bhead)) {
if (bhead->code == ENDB) {
break;
}
if (bhead->code == ofblocktype) {
BHead *id_bhead = bhead;
const char *name = blo_bhead_id_name(fd, bhead) + 2;
AssetMetaData *asset_meta_data = blo_bhead_id_asset_data_address(fd, bhead);
@ -165,6 +169,17 @@ LinkNode *BLO_blendhandle_get_datablock_info(BlendHandle *bh,
STRNCPY(info->name, name);
info->asset_data = asset_meta_data;
bool has_preview = false;
/* See if we can find a preview in the data of this ID. */
for (BHead *data_bhead = blo_bhead_next(fd, id_bhead); data_bhead->code == DATA;
data_bhead = blo_bhead_next(fd, data_bhead)) {
if (data_bhead->SDNAnr == sdna_nr_preview_image) {
has_preview = true;
break;
}
}
info->no_preview_found = !has_preview;
BLI_linklist_prepend(&infos, info);
tot++;
}

View File

@ -15,6 +15,7 @@
#include "BLI_listbase.h"
#include "BLI_math.h"
#include "BLI_math_vec_types.hh"
#include "BLI_string.h"
#include "BLI_system.h"
#include "BLI_utildefines.h"
@ -36,6 +37,7 @@
#include "DNA_workspace_types.h"
#include "BKE_appdir.h"
#include "BKE_attribute.hh"
#include "BKE_brush.h"
#include "BKE_colortools.h"
#include "BKE_curveprofile.h"
@ -119,7 +121,7 @@ static void blo_update_defaults_screen(bScreen *screen,
if (area->spacetype == SPACE_IMAGE) {
if (STREQ(workspace_name, "UV Editing")) {
SpaceImage *sima = area->spacedata.first;
SpaceImage *sima = static_cast<SpaceImage *>(area->spacedata.first);
if (sima->mode == SI_MODE_VIEW) {
sima->mode = SI_MODE_UV;
}
@ -127,7 +129,7 @@ static void blo_update_defaults_screen(bScreen *screen,
}
else if (area->spacetype == SPACE_ACTION) {
/* Show markers region, hide channels and collapse summary in timelines. */
SpaceAction *saction = area->spacedata.first;
SpaceAction *saction = static_cast<SpaceAction *>(area->spacedata.first);
saction->flag |= SACTION_SHOW_MARKERS;
if (saction->mode == SACTCONT_TIMELINE) {
saction->ads.flag |= ADS_FLAG_SUMMARY_COLLAPSED;
@ -148,15 +150,15 @@ static void blo_update_defaults_screen(bScreen *screen,
}
}
else if (area->spacetype == SPACE_GRAPH) {
SpaceGraph *sipo = area->spacedata.first;
SpaceGraph *sipo = static_cast<SpaceGraph *>(area->spacedata.first);
sipo->flag |= SIPO_SHOW_MARKERS;
}
else if (area->spacetype == SPACE_NLA) {
SpaceNla *snla = area->spacedata.first;
SpaceNla *snla = static_cast<SpaceNla *>(area->spacedata.first);
snla->flag |= SNLA_SHOW_MARKERS;
}
else if (area->spacetype == SPACE_SEQ) {
SpaceSeq *seq = area->spacedata.first;
SpaceSeq *seq = static_cast<SpaceSeq *>(area->spacedata.first);
seq->flag |= SEQ_SHOW_MARKERS | SEQ_ZOOM_TO_FIT | SEQ_USE_PROXIES | SEQ_SHOW_OVERLAY;
seq->render_size = SEQ_RENDER_SIZE_PROXY_100;
seq->timeline_overlay.flag |= SEQ_TIMELINE_SHOW_STRIP_SOURCE | SEQ_TIMELINE_SHOW_STRIP_NAME |
@ -166,12 +168,12 @@ static void blo_update_defaults_screen(bScreen *screen,
}
else if (area->spacetype == SPACE_TEXT) {
/* Show syntax and line numbers in Script workspace text editor. */
SpaceText *stext = area->spacedata.first;
SpaceText *stext = static_cast<SpaceText *>(area->spacedata.first);
stext->showsyntax = true;
stext->showlinenrs = true;
}
else if (area->spacetype == SPACE_VIEW3D) {
View3D *v3d = area->spacedata.first;
View3D *v3d = static_cast<View3D *>(area->spacedata.first);
/* Screen space cavity by default for faster performance. */
v3d->shading.cavity_type = V3D_SHADING_CAVITY_CURVATURE;
v3d->shading.flag |= V3D_SHADING_SPECULAR_HIGHLIGHT;
@ -195,7 +197,7 @@ static void blo_update_defaults_screen(bScreen *screen,
v3d->overlay.normals_constant_screen_size = 7.0f;
}
else if (area->spacetype == SPACE_CLIP) {
SpaceClip *sclip = area->spacedata.first;
SpaceClip *sclip = static_cast<SpaceClip *>(area->spacedata.first);
sclip->around = V3D_AROUND_CENTER_MEDIAN;
sclip->mask_info.blend_factor = 0.7f;
sclip->mask_info.draw_flag = MASK_DRAWFLAG_SPLINE;
@ -206,7 +208,9 @@ static void blo_update_defaults_screen(bScreen *screen,
const bool hide_image_tool_header = STREQ(workspace_name, "Rendering");
LISTBASE_FOREACH (ScrArea *, area, &screen->areabase) {
LISTBASE_FOREACH (SpaceLink *, sl, &area->spacedata) {
ListBase *regionbase = (sl == area->spacedata.first) ? &area->regionbase : &sl->regionbase;
ListBase *regionbase = (sl == static_cast<SpaceLink *>(area->spacedata.first)) ?
&area->regionbase :
&sl->regionbase;
LISTBASE_FOREACH (ARegion *, region, regionbase) {
if (region->regiontype == RGN_TYPE_TOOL_HEADER) {
@ -226,12 +230,12 @@ static void blo_update_defaults_screen(bScreen *screen,
if (app_template && STREQ(app_template, "2D_Animation")) {
LISTBASE_FOREACH (ScrArea *, area, &screen->areabase) {
if (area->spacetype == SPACE_ACTION) {
SpaceAction *saction = area->spacedata.first;
SpaceAction *saction = static_cast<SpaceAction *>(area->spacedata.first);
/* Enable Sliders. */
saction->flag |= SACTION_SLIDERS;
}
else if (area->spacetype == SPACE_VIEW3D) {
View3D *v3d = area->spacedata.first;
View3D *v3d = static_cast<View3D *>(area->spacedata.first);
/* Set Material Color by default. */
v3d->shading.color_type = V3D_SHADING_MATERIAL_COLOR;
/* Enable Annotations. */
@ -252,7 +256,7 @@ void BLO_update_defaults_workspace(WorkSpace *workspace, const char *app_templat
if (blo_is_builtin_template(app_template)) {
/* Clear all tools to use default options instead, ignore the tool saved in the file. */
while (!BLI_listbase_is_empty(&workspace->tools)) {
BKE_workspace_tool_remove(workspace, workspace->tools.first);
BKE_workspace_tool_remove(workspace, static_cast<bToolRef *>(workspace->tools.first));
}
/* For 2D animation template. */
@ -268,7 +272,7 @@ void BLO_update_defaults_workspace(WorkSpace *workspace, const char *app_templat
LISTBASE_FOREACH (ScrArea *, area, &screen->areabase) {
LISTBASE_FOREACH (ARegion *, region, &area->regionbase) {
if (area->spacetype == SPACE_VIEW3D) {
View3D *v3d = area->spacedata.first;
View3D *v3d = static_cast<View3D *>(area->spacedata.first);
v3d->shading.flag &= ~V3D_SHADING_CAVITY;
copy_v3_fl(v3d->shading.single_color, 1.0f);
STRNCPY(v3d->shading.matcap, "basic_1");
@ -296,7 +300,8 @@ static void blo_update_defaults_scene(Main *bmain, Scene *scene)
}
/* Rename render layers. */
BKE_view_layer_rename(bmain, scene, scene->view_layers.first, "ViewLayer");
BKE_view_layer_rename(
bmain, scene, static_cast<ViewLayer *>(scene->view_layers.first), "ViewLayer");
/* Disable Z pass by default. */
LISTBASE_FOREACH (ViewLayer *, view_layer, &scene->view_layers) {
@ -308,7 +313,7 @@ static void blo_update_defaults_scene(Main *bmain, Scene *scene)
scene->eevee.bloom_clamp = 0.0f;
scene->eevee.motion_blur_shutter = 0.5f;
copy_v3_v3(scene->display.light_direction, (float[3]){M_SQRT1_3, M_SQRT1_3, M_SQRT1_3});
copy_v3_v3(scene->display.light_direction, blender::float3(M_SQRT1_3));
copy_v2_fl2(scene->safe_areas.title, 0.1f, 0.05f);
copy_v2_fl2(scene->safe_areas.action, 0.035f, 0.035f);
@ -344,9 +349,9 @@ static void blo_update_defaults_scene(Main *bmain, Scene *scene)
}
/* Correct default startup UV's. */
Mesh *me = BLI_findstring(&bmain->meshes, "Cube", offsetof(ID, name) + 2);
Mesh *me = static_cast<Mesh *>(BLI_findstring(&bmain->meshes, "Cube", offsetof(ID, name) + 2));
if (me && (me->totloop == 24) && CustomData_has_layer(&me->ldata, CD_MLOOPUV)) {
MLoopUV *mloopuv = CustomData_get_layer(&me->ldata, CD_MLOOPUV);
MLoopUV *mloopuv = static_cast<MLoopUV *>(CustomData_get_layer(&me->ldata, CD_MLOOPUV));
const float uv_values[24][2] = {
{0.625, 0.50}, {0.875, 0.50}, {0.875, 0.75}, {0.625, 0.75}, {0.375, 0.75}, {0.625, 0.75},
{0.625, 1.00}, {0.375, 1.00}, {0.375, 0.00}, {0.625, 0.00}, {0.625, 0.25}, {0.375, 0.25},
@ -373,7 +378,7 @@ static void blo_update_defaults_scene(Main *bmain, Scene *scene)
void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
{
/* For all app templates. */
for (WorkSpace *workspace = bmain->workspaces.first; workspace; workspace = workspace->id.next) {
LISTBASE_FOREACH (WorkSpace *, workspace, &bmain->workspaces) {
BLO_update_defaults_workspace(workspace, app_template);
}
@ -389,7 +394,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
do_versions_rename_id(bmain, ID_BR, "Draw Pen", "Pen");
/* Pen Soft brush. */
brush = (Brush *)do_versions_rename_id(bmain, ID_BR, "Draw Soft", "Pencil Soft");
brush = reinterpret_cast<Brush *>(
do_versions_rename_id(bmain, ID_BR, "Draw Soft", "Pencil Soft"));
if (brush) {
brush->gpencil_settings->icon_id = GP_BRUSH_ICON_PEN;
}
@ -407,7 +413,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
do_versions_rename_id(bmain, ID_BR, "Draw Block", "Marker Chisel");
/* Remove useless Fill Area.001 brush. */
brush = BLI_findstring(&bmain->brushes, "Fill Area.001", offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, "Fill Area.001", offsetof(ID, name) + 2));
if (brush) {
BKE_id_delete(bmain, brush);
}
@ -421,21 +428,24 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
do_versions_rename_id(bmain, ID_MA, "Black Dots", "Dots Stroke");
/* Dots Stroke. */
ma = BLI_findstring(&bmain->materials, "Dots Stroke", offsetof(ID, name) + 2);
ma = static_cast<Material *>(
BLI_findstring(&bmain->materials, "Dots Stroke", offsetof(ID, name) + 2));
if (ma == NULL) {
ma = BKE_gpencil_material_add(bmain, "Dots Stroke");
}
ma->gp_style->mode = GP_MATERIAL_MODE_DOT;
/* Squares Stroke. */
ma = BLI_findstring(&bmain->materials, "Squares Stroke", offsetof(ID, name) + 2);
ma = static_cast<Material *>(
BLI_findstring(&bmain->materials, "Squares Stroke", offsetof(ID, name) + 2));
if (ma == NULL) {
ma = BKE_gpencil_material_add(bmain, "Squares Stroke");
}
ma->gp_style->mode = GP_MATERIAL_MODE_SQUARE;
/* Change Solid Stroke settings. */
ma = BLI_findstring(&bmain->materials, "Solid Stroke", offsetof(ID, name) + 2);
ma = static_cast<Material *>(
BLI_findstring(&bmain->materials, "Solid Stroke", offsetof(ID, name) + 2));
if (ma != NULL) {
ma->gp_style->mix_rgba[3] = 1.0f;
ma->gp_style->texture_offset[0] = -0.5f;
@ -443,7 +453,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
/* Change Solid Fill settings. */
ma = BLI_findstring(&bmain->materials, "Solid Fill", offsetof(ID, name) + 2);
ma = static_cast<Material *>(
BLI_findstring(&bmain->materials, "Solid Fill", offsetof(ID, name) + 2));
if (ma != NULL) {
ma->gp_style->flag &= ~GP_MATERIAL_STROKE_SHOW;
ma->gp_style->mix_rgba[3] = 1.0f;
@ -451,14 +462,15 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
ma->gp_style->mix_factor = 0.5f;
}
Object *ob = BLI_findstring(&bmain->objects, "Stroke", offsetof(ID, name) + 2);
Object *ob = static_cast<Object *>(
BLI_findstring(&bmain->objects, "Stroke", offsetof(ID, name) + 2));
if (ob && ob->type == OB_GPENCIL) {
ob->dtx |= OB_USE_GPENCIL_LIGHTS;
}
}
/* Reset all grease pencil brushes. */
Scene *scene = bmain->scenes.first;
Scene *scene = static_cast<Scene *>(bmain->scenes.first);
BKE_brush_gpencil_paint_presets(bmain, scene->toolsettings, true);
BKE_brush_gpencil_sculpt_presets(bmain, scene->toolsettings, true);
BKE_brush_gpencil_vertex_presets(bmain, scene->toolsettings, true);
@ -511,7 +523,7 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
/* Scenes */
for (Scene *scene = bmain->scenes.first; scene; scene = scene->id.next) {
LISTBASE_FOREACH (Scene *, scene, &bmain->scenes) {
blo_update_defaults_scene(bmain, scene);
if (app_template && STREQ(app_template, "Video_Editing")) {
@ -537,7 +549,7 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
do_versions_rename_id(bmain, ID_LA, "Lamp", "Light");
if (app_template && STREQ(app_template, "2D_Animation")) {
for (Object *object = bmain->objects.first; object; object = object->id.next) {
LISTBASE_FOREACH (Object *, object, &bmain->objects) {
if (object->type == OB_GPENCIL) {
/* Set grease pencil object in drawing mode */
bGPdata *gpd = (bGPdata *)object->data;
@ -548,7 +560,7 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
}
for (Mesh *mesh = bmain->meshes.first; mesh; mesh = mesh->id.next) {
LISTBASE_FOREACH (Mesh *, mesh, &bmain->meshes) {
/* Match default for new meshes. */
mesh->smoothresh = DEG2RADF(30);
/* Match voxel remesher options for all existing meshes in templates. */
@ -565,22 +577,23 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
CustomData_free_layers(&mesh->vdata, CD_PAINT_MASK, mesh->totvert);
CustomData_free_layers(&mesh->ldata, CD_GRID_PAINT_MASK, mesh->totloop);
}
mesh->attributes_for_write().remove(".sculpt_face_set");
}
for (Camera *camera = bmain->cameras.first; camera; camera = camera->id.next) {
LISTBASE_FOREACH (Camera *, camera, &bmain->cameras) {
/* Initialize to a useful value. */
camera->dof.focus_distance = 10.0f;
camera->dof.aperture_fstop = 2.8f;
}
for (Light *light = bmain->lights.first; light; light = light->id.next) {
LISTBASE_FOREACH (Light *, light, &bmain->lights) {
/* Fix lights defaults. */
light->clipsta = 0.05f;
light->att_dist = 40.0f;
}
/* Materials */
for (Material *ma = bmain->materials.first; ma; ma = ma->id.next) {
LISTBASE_FOREACH (Material *, ma, &bmain->materials) {
/* Update default material to be a bit more rough. */
ma->roughness = 0.5f;
@ -588,7 +601,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
LISTBASE_FOREACH (bNode *, node, &ma->nodetree->nodes) {
if (node->type == SH_NODE_BSDF_PRINCIPLED) {
bNodeSocket *roughness_socket = nodeFindSocket(node, SOCK_IN, "Roughness");
bNodeSocketValueFloat *roughness_data = roughness_socket->default_value;
bNodeSocketValueFloat *roughness_data = static_cast<bNodeSocketValueFloat *>(
roughness_socket->default_value);
roughness_data->value = 0.5f;
node->custom2 = SHD_SUBSURFACE_RANDOM_WALK;
BKE_ntree_update_tag_node_property(ma->nodetree, node);
@ -606,13 +620,14 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
/* Enable for UV sculpt (other brush types will be created as needed),
* without this the grab brush will be active but not selectable from the list. */
const char *brush_name = "Grab";
Brush *brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
Brush *brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (brush) {
brush->ob_mode |= OB_MODE_EDIT;
}
}
for (Brush *brush = bmain->brushes.first; brush; brush = brush->id.next) {
LISTBASE_FOREACH (Brush *, brush, &bmain->brushes) {
brush->blur_kernel_radius = 2;
/* Use full strength for all non-sculpt brushes,
@ -632,13 +647,15 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
Brush *brush;
brush_name = "Smear";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (brush) {
brush->spacing = 3.0;
}
brush_name = "Draw Sharp";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -646,7 +663,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Elastic Deform";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -654,7 +672,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Pose";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -662,7 +681,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Multi-plane Scrape";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -670,7 +690,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Clay Thumb";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -678,7 +699,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Cloth";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -686,7 +708,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Slide Relax";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -694,7 +717,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Paint";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -702,7 +726,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Smear";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -710,7 +735,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Boundary";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -718,7 +744,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Simplify";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -726,7 +753,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Draw Face Sets";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -734,7 +762,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Multires Displacement Eraser";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -742,7 +771,8 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
brush_name = "Multires Displacement Smear";
brush = BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2);
brush = static_cast<Brush *>(
BLI_findstring(&bmain->brushes, brush_name, offsetof(ID, name) + 2));
if (!brush) {
brush = BKE_brush_add(bmain, brush_name, OB_MODE_SCULPT);
id_us_min(&brush->id);
@ -750,7 +780,7 @@ void BLO_update_defaults_startup_blend(Main *bmain, const char *app_template)
}
/* Use the same tool icon color in the brush cursor */
for (brush = bmain->brushes.first; brush; brush = brush->id.next) {
LISTBASE_FOREACH (Brush *, brush, &bmain->brushes) {
if (brush->ob_mode & OB_MODE_SCULPT) {
BLI_assert(brush->sculpt_tool != 0);
BKE_brush_sculpt_reset(brush);

View File

@ -108,8 +108,8 @@ void main()
vec3 in_pos0 = vertex_fetch_attribute(base_vertex_id, pos, vec3);
vec3 in_pos1 = vertex_fetch_attribute(base_vertex_id + 1, pos, vec3);
vec4 out_pos0 = ProjectionMatrix * (ViewMatrix * vec4(in_pos0, 1.0));
vec4 out_pos1 = ProjectionMatrix * (ViewMatrix * vec4(in_pos1, 1.0));
vec4 out_pos0 = drw_view.winmat * (drw_view.viewmat * vec4(in_pos0, 1.0));
vec4 out_pos1 = drw_view.winmat * (drw_view.viewmat * vec4(in_pos1, 1.0));
/* Final calculations required for Geometry Shader alternative.
* We need to calculate values for each vertex position to correctly determine the final output
@ -130,28 +130,28 @@ void main()
float line_size = float(lineThickness) * sizePixel;
if (quad_vertex_id == 0) {
view_clipping_distances(out_pos0);
view_clipping_distances(out_pos0.xyz);
interp.color = finalColor_geom[0];
t = edge_dir * (line_size * (is_persp ? out_pos0.w : 1.0));
gl_Position = out_pos0 + vec4(t, 0.0, 0.0);
}
else if (quad_vertex_id == 1 || quad_vertex_id == 3) {
view_clipping_distances(out_pos0);
view_clipping_distances(out_pos0.xyz);
interp.color = finalColor_geom[0];
t = edge_dir * (line_size * (is_persp ? out_pos0.w : 1.0));
gl_Position = out_pos0 - vec4(t, 0.0, 0.0);
}
else if (quad_vertex_id == 2 || quad_vertex_id == 5) {
view_clipping_distances(out_pos1);
view_clipping_distances(out_pos1.xyz);
interp.color = finalColor_geom[1];
t = edge_dir * (line_size * (is_persp ? out_pos1.w : 1.0));
gl_Position = out_pos1 + vec4(t, 0.0, 0.0);
}
else if (quad_vertex_id == 4) {
view_clipping_distances(out_pos1);
view_clipping_distances(out_pos1.xyz);
interp.color = finalColor_geom[1];
t = edge_dir * (line_size * (is_persp ? out_pos1.w : 1.0));

View File

@ -1714,23 +1714,32 @@ static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader)
}
#ifdef DEBUG
int debug_print_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_PRINT);
if (debug_print_location != -1) {
GPUStorageBuf *buf = drw_debug_gpu_print_buf_get();
drw_shgroup_uniform_create_ex(
shgroup, debug_print_location, DRW_UNIFORM_STORAGE_BLOCK, buf, GPU_SAMPLER_DEFAULT, 0, 1);
/* TODO(Metal): Support Shader debug print.
* This is not currently supported by Metal Backend. */
if (GPU_backend_get_type() != GPU_BACKEND_METAL) {
int debug_print_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_PRINT);
if (debug_print_location != -1) {
GPUStorageBuf *buf = drw_debug_gpu_print_buf_get();
drw_shgroup_uniform_create_ex(shgroup,
debug_print_location,
DRW_UNIFORM_STORAGE_BLOCK,
buf,
GPU_SAMPLER_DEFAULT,
0,
1);
# ifndef DISABLE_DEBUG_SHADER_PRINT_BARRIER
/* Add a barrier to allow multiple shader writing to the same buffer. */
DRW_shgroup_barrier(shgroup, GPU_BARRIER_SHADER_STORAGE);
/* Add a barrier to allow multiple shader writing to the same buffer. */
DRW_shgroup_barrier(shgroup, GPU_BARRIER_SHADER_STORAGE);
# endif
}
}
int debug_draw_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_VERTS);
if (debug_draw_location != -1) {
GPUStorageBuf *buf = drw_debug_gpu_draw_buf_get();
drw_shgroup_uniform_create_ex(
shgroup, debug_draw_location, DRW_UNIFORM_STORAGE_BLOCK, buf, GPU_SAMPLER_DEFAULT, 0, 1);
/* NOTE(fclem): No barrier as ordering is not important. */
int debug_draw_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_VERTS);
if (debug_draw_location != -1) {
GPUStorageBuf *buf = drw_debug_gpu_draw_buf_get();
drw_shgroup_uniform_create_ex(
shgroup, debug_draw_location, DRW_UNIFORM_STORAGE_BLOCK, buf, GPU_SAMPLER_DEFAULT, 0, 1);
/* NOTE(fclem): No barrier as ordering is not important. */
}
}
#endif

View File

@ -117,6 +117,9 @@ struct FileListInternEntry {
* Owning pointer. */
AssetMetaData *imported_asset_data;
/* See #FILE_ENTRY_BLENDERLIB_NO_PREVIEW. */
bool blenderlib_has_no_preview;
/** Defined in BLI_fileops.h */
eFileAttributes attributes;
BLI_stat_t st;
@ -1587,6 +1590,14 @@ static void filelist_cache_previews_push(FileList *filelist, FileDirEntry *entry
return;
}
/* If we know this is an external ID without a preview, skip loading the preview. Can save quite
* some time in heavy files, because otherwise for each missing preview and for each preview
* reload, we'd reopen the .blend to look for the preview. */
if ((entry->typeflag & FILE_TYPE_BLENDERLIB) &&
(entry->flags & FILE_ENTRY_BLENDERLIB_NO_PREVIEW)) {
return;
}
FileListInternEntry *intern_entry = filelist->filelist_intern.filtered[index];
PreviewImage *preview_in_memory = intern_entry->local_data.preview_image;
if (preview_in_memory && !BKE_previewimg_is_finished(preview_in_memory, ICON_SIZE_PREVIEW)) {
@ -2054,6 +2065,9 @@ static FileDirEntry *filelist_file_create_entry(FileList *filelist, const int in
ret->preview_icon_id = BKE_icon_imbuf_create(ibuf);
}
}
if (entry->blenderlib_has_no_preview) {
ret->flags |= FILE_ENTRY_BLENDERLIB_NO_PREVIEW;
}
BLI_addtail(&cache->cached_entries, ret);
return ret;
}
@ -3013,10 +3027,15 @@ static void filelist_readjob_list_lib_add_datablock(ListBase *entries,
entry->relpath = BLI_strdup(datablock_info->name);
}
entry->typeflag |= FILE_TYPE_BLENDERLIB;
if (datablock_info && datablock_info->asset_data) {
entry->typeflag |= FILE_TYPE_ASSET;
/* Moves ownership! */
entry->imported_asset_data = datablock_info->asset_data;
if (datablock_info) {
entry->blenderlib_has_no_preview = datablock_info->no_preview_found;
if (datablock_info->asset_data) {
entry->typeflag |= FILE_TYPE_ASSET;
/* Moves ownership! */
entry->imported_asset_data = datablock_info->asset_data;
}
}
entry->blentype = idcode;
BLI_addtail(entries, entry);

View File

@ -60,8 +60,6 @@
* and being able to set it to zero is handy. */
/* #define USE_NUM_NO_ZERO */
static void initSnapSpatial(TransInfo *t, float r_snap[2]);
bool transdata_check_local_islands(TransInfo *t, short around)
{
if (t->options & (CTX_CURSOR | CTX_TEXTURE_SPACE)) {
@ -1723,7 +1721,7 @@ void saveTransform(bContext *C, TransInfo *t, wmOperator *op)
}
}
static void initSnapSpatial(TransInfo *t, float r_snap[2])
static void initSnapSpatial(TransInfo *t, float r_snap[2], float r_snap_y[2])
{
if (t->spacetype == SPACE_VIEW3D) {
if (t->region->regiondata) {
@ -1737,18 +1735,15 @@ static void initSnapSpatial(TransInfo *t, float r_snap[2])
View2D *v2d = &t->region->v2d;
int grid_size = SI_GRID_STEPS_LEN;
float zoom_factor = ED_space_image_zoom_level(v2d, grid_size);
float grid_steps[SI_GRID_STEPS_LEN];
float grid_steps_x[SI_GRID_STEPS_LEN];
float grid_steps_y[SI_GRID_STEPS_LEN];
ED_space_image_grid_steps(sima, grid_steps, grid_steps_y, grid_size);
ED_space_image_grid_steps(sima, grid_steps_x, grid_steps_y, grid_size);
/* Snapping value based on what type of grid is used (adaptive-subdividing or custom-grid). */
r_snap[0] = ED_space_image_increment_snap_value(grid_size, grid_steps, zoom_factor);
r_snap[0] = ED_space_image_increment_snap_value(grid_size, grid_steps_x, zoom_factor);
r_snap[1] = r_snap[0] / 2.0f;
/* TODO: Implement snapping for custom grid sizes with `grid_steps[0] != grid_steps_y[0]`.
* r_snap_y[0] = ED_space_image_increment_snap_value(grid_size, grid_steps_y, zoom_factor);
* r_snap_y[1] = r_snap_y[0] / 2.0f;
*/
r_snap_y[0] = ED_space_image_increment_snap_value(grid_size, grid_steps_y, zoom_factor);
r_snap_y[1] = r_snap_y[0] / 2.0f;
}
else if (t->spacetype == SPACE_CLIP) {
r_snap[0] = 0.125f;
@ -1903,7 +1898,7 @@ bool initTransform(bContext *C, TransInfo *t, wmOperator *op, const wmEvent *eve
initSnapping(t, op); /* Initialize snapping data AFTER mode flags */
initSnapSpatial(t, t->snap_spatial);
initSnapSpatial(t, t->snap_spatial_x, t->snap_spatial_y);
/* EVIL! posemode code can switch translation to rotate when 1 bone is selected.
* will be removed (ton) */

View File

@ -555,7 +555,9 @@ typedef struct TransInfo {
/** Snapping Gears. */
float snap[2];
/** Spatial snapping gears(even when rotating, scaling... etc). */
float snap_spatial[2];
float snap_spatial_x[2];
/** Spatial snapping in the Y coordinate, for non-uniform grid in UV Editor. */
float snap_spatial_y[2];
/** Mouse side of the current frame, 'L', 'R' or 'B' */
char frame_side;

View File

@ -590,7 +590,7 @@ void initTranslation(TransInfo *t)
t->num.flag = 0;
t->num.idx_max = t->idx_max;
copy_v2_v2(t->snap, t->snap_spatial);
copy_v2_v2(t->snap, t->snap_spatial_x);
copy_v3_fl(t->num.val_inc, t->snap[0]);
t->num.unit_sys = t->scene->unit.system;

View File

@ -519,10 +519,12 @@ void applyGridAbsolute(TransInfo *t)
return;
}
float grid_size = (t->modifiers & MOD_PRECISION) ? t->snap_spatial[1] : t->snap_spatial[0];
float grid_size_x = (t->modifiers & MOD_PRECISION) ? t->snap_spatial_x[1] : t->snap_spatial_x[0];
float grid_size_y = (t->modifiers & MOD_PRECISION) ? t->snap_spatial_y[1] : t->snap_spatial_y[0];
float grid_size_z = grid_size_x;
/* early exit on unusable grid size */
if (grid_size == 0.0f) {
/* Early exit on unusable grid size. */
if (grid_size_x == 0.0f || grid_size_y == 0.0f || grid_size_z == 0.0f) {
return;
}
@ -548,11 +550,9 @@ void applyGridAbsolute(TransInfo *t)
copy_v3_v3(iloc, td->ob->obmat[3]);
}
mul_v3_v3fl(loc, iloc, 1.0f / grid_size);
loc[0] = roundf(loc[0]);
loc[1] = roundf(loc[1]);
loc[2] = roundf(loc[2]);
mul_v3_fl(loc, grid_size);
loc[0] = roundf(iloc[0] / grid_size_x) * grid_size_x;
loc[1] = roundf(iloc[1] / grid_size_y) * grid_size_y;
loc[2] = roundf(iloc[2] / grid_size_z) * grid_size_z;
sub_v3_v3v3(tvec, loc, iloc);
mul_m3_v3(td->smtx, tvec);
@ -1654,8 +1654,12 @@ bool snapNodesTransform(
/** \name snap Grid
* \{ */
static void snap_grid_apply(
TransInfo *t, const int max_index, const float grid_dist, const float loc[3], float r_out[3])
static void snap_grid_apply(TransInfo *t,
const int max_index,
const float grid_dist_x,
const float grid_dist_y,
const float loc[3],
float r_out[3])
{
BLI_assert(max_index <= 2);
snap_target_grid_ensure(t);
@ -1672,7 +1676,7 @@ static void snap_grid_apply(
}
for (int i = 0; i <= max_index; i++) {
const float iter_fac = grid_dist * asp[i];
const float iter_fac = ((i == 1) ? grid_dist_y : grid_dist_x) * asp[i];
r_out[i] = iter_fac * roundf((in[i] + center_global[i]) / iter_fac) - center_global[i];
}
}
@ -1697,14 +1701,15 @@ bool transform_snap_grid(TransInfo *t, float *val)
return false;
}
float grid_dist = (t->modifiers & MOD_PRECISION) ? t->snap[1] : t->snap[0];
float grid_dist_x = (t->modifiers & MOD_PRECISION) ? t->snap_spatial_x[1] : t->snap_spatial_x[0];
float grid_dist_y = (t->modifiers & MOD_PRECISION) ? t->snap_spatial_y[1] : t->snap_spatial_y[0];
/* Early bailing out if no need to snap */
if (grid_dist == 0.0f) {
if (grid_dist_x == 0.0f || grid_dist_y == 0.0f) {
return false;
}
snap_grid_apply(t, t->idx_max, grid_dist, val, val);
snap_grid_apply(t, t->idx_max, grid_dist_x, grid_dist_y, val, val);
t->tsnap.snapElem = SCE_SNAP_MODE_GRID;
return true;
}

View File

@ -859,6 +859,7 @@ static AllMeshesInfo preprocess_meshes(const GeometrySet &geometry_set,
}
}
}
info.create_material_index_attribute |= info.materials.size() > 1;
info.realize_info.reinitialize(info.order.size());
for (const int mesh_index : info.realize_info.index_range()) {
MeshRealizeInfo &mesh_info = info.realize_info[mesh_index];

View File

@ -186,9 +186,11 @@ set(OPENGL_SRC
set(METAL_SRC
metal/mtl_backend.mm
metal/mtl_batch.mm
metal/mtl_command_buffer.mm
metal/mtl_context.mm
metal/mtl_debug.mm
metal/mtl_drawlist.mm
metal/mtl_framebuffer.mm
metal/mtl_immediate.mm
metal/mtl_index_buffer.mm

View File

@ -431,15 +431,16 @@ inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat da
case GPU_DEPTH_COMPONENT24:
case GPU_DEPTH_COMPONENT16:
case GPU_DEPTH_COMPONENT32F:
return data_format == GPU_DATA_FLOAT;
return ELEM(data_format, GPU_DATA_FLOAT, GPU_DATA_UINT);
case GPU_DEPTH24_STENCIL8:
case GPU_DEPTH32F_STENCIL8:
return data_format == GPU_DATA_UINT_24_8;
return ELEM(data_format, GPU_DATA_UINT_24_8, GPU_DATA_UINT);
case GPU_R8UI:
case GPU_R16UI:
case GPU_RG16UI:
case GPU_R32UI:
return data_format == GPU_DATA_UINT;
case GPU_R32I:
case GPU_RG16I:
case GPU_R16I:
return data_format == GPU_DATA_INT;
@ -453,6 +454,8 @@ inline bool validate_data_format(eGPUTextureFormat tex_format, eGPUDataFormat da
return ELEM(data_format, GPU_DATA_2_10_10_10_REV, GPU_DATA_FLOAT);
case GPU_R11F_G11F_B10F:
return ELEM(data_format, GPU_DATA_10_11_11_REV, GPU_DATA_FLOAT);
case GPU_RGBA16F:
return ELEM(data_format, GPU_DATA_HALF_FLOAT, GPU_DATA_FLOAT);
default:
return data_format == GPU_DATA_FLOAT;
}

View File

@ -147,6 +147,10 @@ static void gpu_viewport_textures_create(GPUViewport *viewport)
if (viewport->depth_tx == NULL) {
viewport->depth_tx = GPU_texture_create_2d(
"dtxl_depth", UNPACK2(size), 1, GPU_DEPTH24_STENCIL8, NULL);
if (GPU_clear_viewport_workaround()) {
static int depth_clear = 0;
GPU_texture_clear(viewport->depth_tx, GPU_DATA_UINT_24_8, &depth_clear);
}
}
if (!viewport->depth_tx || !viewport->color_render_tx[0] || !viewport->color_overlay_tx[0]) {

View File

@ -47,13 +47,11 @@ Context *MTLBackend::context_alloc(void *ghost_window, void *ghost_context)
Batch *MTLBackend::batch_alloc()
{
/* TODO(Metal): Full MTLBatch implementation. */
return new MTLBatch();
};
DrawList *MTLBackend::drawlist_alloc(int list_length)
{
/* TODO(Metal): Full MTLDrawList implementation. */
return new MTLDrawList(list_length);
};
@ -420,6 +418,7 @@ void MTLBackend::capabilities_init(MTLContext *ctx)
GCaps.depth_blitting_workaround = false;
GCaps.use_main_context_workaround = false;
GCaps.broken_amd_driver = false;
GCaps.clear_viewport_workaround = true;
/* Metal related workarounds. */
/* Minimum per-vertex stride is 4 bytes in Metal.

View File

@ -10,31 +10,126 @@
#pragma once
#include "MEM_guardedalloc.h"
#include "gpu_batch_private.hh"
#include "mtl_index_buffer.hh"
#include "mtl_primitive.hh"
#include "mtl_shader.hh"
#include "mtl_vertex_buffer.hh"
namespace blender {
namespace gpu {
namespace blender::gpu {
class MTLContext;
class MTLShaderInterface;
#define GPU_VAO_STATIC_LEN 64
struct VertexBufferID {
uint32_t id : 16;
uint32_t is_instance : 15;
uint32_t used : 1;
};
/* Pass-through MTLBatch. TODO(Metal): Implement. */
class MTLBatch : public Batch {
public:
void draw(int v_first, int v_count, int i_first, int i_count) override
{
}
/* Vertex Bind-state Caching for a given shader interface used with the Batch. */
struct VertexDescriptorShaderInterfacePair {
MTLVertexDescriptor vertex_descriptor{};
const ShaderInterface *interface = nullptr;
uint16_t attr_mask{};
int num_buffers{};
VertexBufferID bufferIds[GPU_BATCH_VBO_MAX_LEN] = {};
/* Cache life index compares a cache entry with the active MTLBatch state.
* This is initially set to the cache life index of MTLBatch. If the batch has been modified,
* this index is incremented to cheaply invalidate existing cache entries. */
uint32_t cache_life_index = 0;
};
class MTLVertexDescriptorCache {
private:
MTLBatch *batch_;
VertexDescriptorShaderInterfacePair cache_[GPU_VAO_STATIC_LEN] = {};
MTLContext *cache_context_ = nullptr;
uint32_t cache_life_index_ = 0;
public:
MTLVertexDescriptorCache(MTLBatch *batch) : batch_(batch){};
VertexDescriptorShaderInterfacePair *find(const ShaderInterface *interface);
bool insert(VertexDescriptorShaderInterfacePair &data);
private:
void vertex_descriptor_cache_init(MTLContext *ctx);
void vertex_descriptor_cache_clear();
void vertex_descriptor_cache_ensure();
};
private:
MTLShader *active_shader_ = nullptr;
bool shader_in_use_ = false;
MTLVertexDescriptorCache vao_cache = {this};
/* Topology emulation. */
gpu::MTLBuffer *emulated_topology_buffer_ = nullptr;
GPUPrimType emulated_topology_type_;
uint32_t topology_buffer_input_v_count_ = 0;
uint32_t topology_buffer_output_v_count_ = 0;
public:
MTLBatch(){};
~MTLBatch(){};
void draw(int v_first, int v_count, int i_first, int i_count) override;
void draw_indirect(GPUStorageBuf *indirect_buf, intptr_t offset) override
{
/* TODO(Metal): Support indirect draw commands. */
}
void multi_draw_indirect(GPUStorageBuf *indirect_buf,
int count,
intptr_t offset,
intptr_t stride) override
{
/* TODO(Metal): Support indirect draw commands. */
}
/* Returns an initialized RenderComandEncoder for drawing if all is good.
* Otherwise, nil. */
id<MTLRenderCommandEncoder> bind(uint v_first, uint v_count, uint i_first, uint i_count);
void unbind();
/* Convenience getters. */
MTLIndexBuf *elem_() const
{
return static_cast<MTLIndexBuf *>(unwrap(elem));
}
MTLVertBuf *verts_(const int index) const
{
return static_cast<MTLVertBuf *>(unwrap(verts[index]));
}
MTLVertBuf *inst_(const int index) const
{
return static_cast<MTLVertBuf *>(unwrap(inst[index]));
}
MTLShader *active_shader_get() const
{
return active_shader_;
}
private:
void shader_bind();
void draw_advanced(int v_first, int v_count, int i_first, int i_count);
int prepare_vertex_binding(MTLVertBuf *verts,
MTLRenderPipelineStateDescriptor &desc,
const MTLShaderInterface *interface,
uint16_t &attr_mask,
bool instanced);
id<MTLBuffer> get_emulated_toplogy_buffer(GPUPrimType &in_out_prim_type, uint32_t &v_count);
void prepare_vertex_descriptor_and_bindings(
MTLVertBuf **buffers, int &num_buffers, int v_first, int v_count, int i_first, int i_count);
MEM_CXX_CLASS_ALLOC_FUNCS("MTLBatch");
};
} // namespace gpu
} // namespace blender
} // namespace blender::gpu

View File

@ -0,0 +1,995 @@
/** \file
* \ingroup gpu
*
* Metal implementation of GPUBatch.
*/
#include "BLI_assert.h"
#include "BLI_span.hh"
#include "BKE_global.h"
#include "GPU_common.h"
#include "gpu_batch_private.hh"
#include "gpu_shader_private.hh"
#include "mtl_batch.hh"
#include "mtl_context.hh"
#include "mtl_debug.hh"
#include "mtl_index_buffer.hh"
#include "mtl_shader.hh"
#include "mtl_vertex_buffer.hh"
#include <string>
namespace blender::gpu {
/* -------------------------------------------------------------------- */
/** \name Creation & Deletion
* \{ */
void MTLBatch::draw(int v_first, int v_count, int i_first, int i_count)
{
if (this->flag & GPU_BATCH_INVALID) {
this->shader_in_use_ = false;
}
this->draw_advanced(v_first, v_count, i_first, i_count);
}
void MTLBatch::shader_bind()
{
if (active_shader_ && active_shader_->is_valid()) {
active_shader_->bind();
shader_in_use_ = true;
}
}
void MTLBatch::MTLVertexDescriptorCache::vertex_descriptor_cache_init(MTLContext *ctx)
{
BLI_assert(ctx != nullptr);
this->vertex_descriptor_cache_clear();
cache_context_ = ctx;
}
void MTLBatch::MTLVertexDescriptorCache::vertex_descriptor_cache_clear()
{
cache_life_index_++;
cache_context_ = nullptr;
}
void MTLBatch::MTLVertexDescriptorCache::vertex_descriptor_cache_ensure()
{
if (this->cache_context_ != nullptr) {
/* Invalidate vertex descriptor bindings cache if batch has changed. */
if (batch_->flag & GPU_BATCH_DIRTY) {
batch_->flag &= ~GPU_BATCH_DIRTY;
this->vertex_descriptor_cache_clear();
}
}
/* Initialise cache if not ready. */
if (cache_context_ == nullptr) {
this->vertex_descriptor_cache_init(MTLContext::get());
}
}
MTLBatch::VertexDescriptorShaderInterfacePair *MTLBatch::MTLVertexDescriptorCache::find(
const ShaderInterface *interface)
{
this->vertex_descriptor_cache_ensure();
for (int i = 0; i < GPU_VAO_STATIC_LEN; ++i) {
if (cache_[i].interface == interface && cache_[i].cache_life_index == cache_life_index_) {
return &cache_[i];
}
}
return nullptr;
}
bool MTLBatch::MTLVertexDescriptorCache::insert(
MTLBatch::VertexDescriptorShaderInterfacePair &data)
{
vertex_descriptor_cache_ensure();
for (int i = 0; i < GPU_VAO_STATIC_LEN; ++i) {
if (cache_[i].interface == nullptr || cache_[i].cache_life_index != cache_life_index_) {
cache_[i] = data;
cache_[i].cache_life_index = cache_life_index_;
return true;
}
}
return false;
}
int MTLBatch::prepare_vertex_binding(MTLVertBuf *verts,
MTLRenderPipelineStateDescriptor &desc,
const MTLShaderInterface *interface,
uint16_t &attr_mask,
bool instanced)
{
const GPUVertFormat *format = &verts->format;
/* Whether the current vertex buffer has been added to the buffer layout descriptor. */
bool buffer_added = false;
/* Per-vertex stride of current vertex buffer. */
int buffer_stride = format->stride;
/* Buffer binding index of the vertex buffer once added to the buffer layout descriptor. */
int buffer_index = -1;
int attribute_offset = 0;
if (!active_shader_->get_uses_ssbo_vertex_fetch()) {
BLI_assert(
buffer_stride >= 4 &&
"In Metal, Vertex buffer stride should be 4. SSBO Vertex fetch is not affected by this");
}
/* Iterate over GPUVertBuf vertex format and find attributes matching those in the active
* shader's interface. */
for (uint32_t a_idx = 0; a_idx < format->attr_len; a_idx++) {
const GPUVertAttr *a = &format->attrs[a_idx];
if (format->deinterleaved) {
attribute_offset += ((a_idx == 0) ? 0 : format->attrs[a_idx - 1].size) * verts->vertex_len;
buffer_stride = a->size;
}
else {
attribute_offset = a->offset;
}
/* Find attribute with the matching name. Attributes may have multiple compatible
* name aliases. */
for (uint32_t n_idx = 0; n_idx < a->name_len; n_idx++) {
const char *name = GPU_vertformat_attr_name_get(format, a, n_idx);
const ShaderInput *input = interface->attr_get(name);
if (input == nullptr || input->location == -1) {
/* Vertex/instance buffers provided have attribute data for attributes which are not needed
* by this particular shader. This shader only needs binding information for the attributes
* has in the shader interface. */
MTL_LOG_WARNING(
"MTLBatch: Could not find attribute with name '%s' (defined in active vertex format) "
"in the shader interface for shader '%s'\n",
name,
interface->get_name());
continue;
}
/* Fetch metal attribute information. */
const MTLShaderInputAttribute &mtl_attr = interface->get_attribute(input->location);
BLI_assert(mtl_attr.location >= 0);
/* Verify that the attribute location from the shader interface
* matches the attribute location returned. */
BLI_assert(mtl_attr.location == input->location);
/* Check if attribute is already present in the given slot. */
if ((~attr_mask) & (1 << mtl_attr.location)) {
MTL_LOG_INFO(
" -- [Batch] Skipping attribute with input location %d (As one is already bound)\n",
mtl_attr.location);
}
else {
/* Update attribute used-slot mask. */
attr_mask &= ~(1 << mtl_attr.location);
/* Add buffer layout entry in descriptor if it has not yet been added
* for current vertex buffer. */
if (!buffer_added) {
buffer_index = desc.vertex_descriptor.num_vert_buffers;
desc.vertex_descriptor.buffer_layouts[buffer_index].step_function =
(instanced) ? MTLVertexStepFunctionPerInstance : MTLVertexStepFunctionPerVertex;
desc.vertex_descriptor.buffer_layouts[buffer_index].step_rate = 1;
desc.vertex_descriptor.buffer_layouts[buffer_index].stride = buffer_stride;
desc.vertex_descriptor.num_vert_buffers++;
buffer_added = true;
MTL_LOG_INFO(" -- [Batch] Adding source %s buffer (Index: %d, Stride: %d)\n",
(instanced) ? "instance" : "vertex",
buffer_index,
buffer_stride);
}
else {
/* Ensure stride is correct for de-interlevaed attributes. */
desc.vertex_descriptor.buffer_layouts[buffer_index].stride = buffer_stride;
}
/* Handle Matrix/Array vertex attribute types.
* Metal does not natively support these as attribute types, so we handle these cases
* by stacking together compatible types (e.g. 4xVec4 for Mat4) and combining
* the data in the shader.
* The generated Metal shader will contain a generated input binding, which reads
* in individual attributes and merges them into the desired type after vertex
* assembly. e.g. a Mat4 (Float4x4) will generate 4 Float4 attributes. */
if (a->comp_len == 16 || a->comp_len == 12 || a->comp_len == 8) {
BLI_assert_msg(
a->comp_len == 16,
"only mat4 attributes currently supported -- Not ready to handle other long "
"component length attributes yet");
/* SSBO Vertex Fetch Attribute safety checks. */
if (active_shader_->get_uses_ssbo_vertex_fetch()) {
/* When using SSBO vertex fetch, we do not need to expose split attributes,
* A matrix can be read directly as a whole block of contiguous data. */
MTLSSBOAttribute ssbo_attr(mtl_attr.index,
buffer_index,
attribute_offset,
buffer_stride,
GPU_SHADER_ATTR_TYPE_MAT4,
instanced);
active_shader_->ssbo_vertex_fetch_bind_attribute(ssbo_attr);
desc.vertex_descriptor.ssbo_attributes[desc.vertex_descriptor.num_ssbo_attributes] =
ssbo_attr;
desc.vertex_descriptor.num_ssbo_attributes++;
}
else {
/* Handle Mat4 attributes. */
if (a->comp_len == 16) {
/* Debug safety checks. */
BLI_assert_msg(mtl_attr.matrix_element_count == 4,
"mat4 type expected but there are fewer components");
BLI_assert_msg(mtl_attr.size == 16, "Expecting subtype 'vec4' with 16 bytes");
BLI_assert_msg(
mtl_attr.format == MTLVertexFormatFloat4,
"Per-attribute vertex format MUST be float4 for an input type of 'mat4'");
/* We have found the 'ROOT' attribute. A mat4 contains 4 consecutive float4 attribute
* locations we must map to. */
for (int i = 0; i < a->comp_len / 4; i++) {
desc.vertex_descriptor.attributes[mtl_attr.location + i].format =
MTLVertexFormatFloat4;
/* Data is consecutive in the buffer for the whole matrix, each float4 will shift
* the offset by 16 bytes. */
desc.vertex_descriptor.attributes[mtl_attr.location + i].offset =
attribute_offset + i * 16;
/* All source data for a matrix is in the same singular buffer. */
desc.vertex_descriptor.attributes[mtl_attr.location + i].buffer_index =
buffer_index;
/* Update total attribute account. */
desc.vertex_descriptor.num_attributes = max_ii(
mtl_attr.location + i + 1, desc.vertex_descriptor.num_attributes);
MTL_LOG_INFO("-- Sub-Attrib Location: %d, offset: %d, buffer index: %d\n",
mtl_attr.location + i,
attribute_offset + i * 16,
buffer_index);
}
MTL_LOG_INFO(
"Float4x4 attribute type added for '%s' at attribute locations: %d to %d\n",
name,
mtl_attr.location,
mtl_attr.location + 3);
}
/* Ensure we are not exceeding the attribute limit. */
BLI_assert(desc.vertex_descriptor.num_attributes <= MTL_MAX_VERTEX_INPUT_ATTRIBUTES);
}
}
else {
/* Handle Any required format conversions.
* NOTE(Metal): If there is a mis-match between the format of an attribute
* in the shader interface, and the specified format in the VertexBuffer VertexFormat,
* we need to perform a format conversion.
*
* The Metal API can perform certain conversions internally during vertex assembly:
* - Type Normalization e.g short2 to float2 between 0.0 to 1.0.
* - Type Truncation e.g. Float4 to Float2.
* - Type expansion e,g, Float3 to Float4 (Following 0,0,0,1 for assignment to empty
* elements).
*
* Certain conversion cannot be performed however, and in these cases, we need to
* instruct the shader to generate a specialised version with a conversion routine upon
* attribute read.
* - This handles cases such as conversion between types e.g. Integer to float without
* normalization.
*
* For more information on the supported and unsupported conversions, see:
* https://developer.apple.com/documentation/metal/mtlvertexattributedescriptor/1516081-format?language=objc
*/
MTLVertexFormat converted_format;
bool can_use_internal_conversion = mtl_convert_vertex_format(
mtl_attr.format,
(GPUVertCompType)a->comp_type,
a->comp_len,
(GPUVertFetchMode)a->fetch_mode,
&converted_format);
bool is_floating_point_format = (a->comp_type == GPU_COMP_F32);
if (can_use_internal_conversion) {
desc.vertex_descriptor.attributes[mtl_attr.location].format = converted_format;
desc.vertex_descriptor.attributes[mtl_attr.location].format_conversion_mode =
is_floating_point_format ? (GPUVertFetchMode)GPU_FETCH_FLOAT :
(GPUVertFetchMode)GPU_FETCH_INT;
BLI_assert(converted_format != MTLVertexFormatInvalid);
}
else {
/* The internal implicit conversion is not supported.
* In this case, we need to handle conversion inside the shader.
* This is handled using `format_conversion_mode`.
* `format_conversion_mode` is assigned the blender-specified fetch mode (GPU_FETCH_*).
* This then controls how a given attribute is interpreted. The data will be read
* as specified and then converted appropriately to the correct form.
*
* e.g. if `GPU_FETCH_INT_TO_FLOAT` is specified, the specialised read-routine
* in the shader will read the data as an int, and cast this to floating point
* representation. (Rather than reading the source data as float).
*
* NOTE: Even if full conversion is not supported, we may still partially perform an
* implicit conversion where possible, such as vector truncation or expansion. */
MTLVertexFormat converted_format;
bool can_convert = mtl_vertex_format_resize(
mtl_attr.format, a->comp_len, &converted_format);
desc.vertex_descriptor.attributes[mtl_attr.location].format = can_convert ?
converted_format :
mtl_attr.format;
desc.vertex_descriptor.attributes[mtl_attr.location].format_conversion_mode =
(GPUVertFetchMode)a->fetch_mode;
BLI_assert(desc.vertex_descriptor.attributes[mtl_attr.location].format !=
MTLVertexFormatInvalid);
}
desc.vertex_descriptor.attributes[mtl_attr.location].offset = attribute_offset;
desc.vertex_descriptor.attributes[mtl_attr.location].buffer_index = buffer_index;
desc.vertex_descriptor.num_attributes = ((mtl_attr.location + 1) >
desc.vertex_descriptor.num_attributes) ?
(mtl_attr.location + 1) :
desc.vertex_descriptor.num_attributes;
/* SSBO Vertex Fetch attribute bind. */
if (active_shader_->get_uses_ssbo_vertex_fetch()) {
BLI_assert_msg(desc.vertex_descriptor.attributes[mtl_attr.location].format ==
mtl_attr.format,
"SSBO Vertex Fetch does not support attribute conversion.");
MTLSSBOAttribute ssbo_attr(
mtl_attr.index,
buffer_index,
attribute_offset,
buffer_stride,
MTLShader::ssbo_vertex_type_to_attr_type(
desc.vertex_descriptor.attributes[mtl_attr.location].format),
instanced);
active_shader_->ssbo_vertex_fetch_bind_attribute(ssbo_attr);
desc.vertex_descriptor.ssbo_attributes[desc.vertex_descriptor.num_ssbo_attributes] =
ssbo_attr;
desc.vertex_descriptor.num_ssbo_attributes++;
}
/* NOTE: We are setting num_attributes to be up to the maximum found index, because of
* this, it is possible that we may skip over certain attributes if they were not in the
* source GPUVertFormat. */
MTL_LOG_INFO(
" -- Batch Attribute(%d): ORIG Shader Format: %d, ORIG Vert format: %d, Vert "
"components: %d, Fetch Mode %d --> FINAL FORMAT: %d\n",
mtl_attr.location,
(int)mtl_attr.format,
(int)a->comp_type,
(int)a->comp_len,
(int)a->fetch_mode,
(int)desc.vertex_descriptor.attributes[mtl_attr.location].format);
MTL_LOG_INFO(
" -- [Batch] matching %s attribute '%s' (Attribute Index: %d, Buffer index: %d, "
"offset: %d)\n",
(instanced) ? "instance" : "vertex",
name,
mtl_attr.location,
buffer_index,
attribute_offset);
}
}
}
}
if (buffer_added) {
return buffer_index;
}
return -1;
}
id<MTLRenderCommandEncoder> MTLBatch::bind(uint v_first, uint v_count, uint i_first, uint i_count)
{
/* Setup draw call and render pipeline state here. Called by every draw, but setup here so that
* MTLDrawList only needs to perform setup a single time. */
BLI_assert(this);
/* Fetch Metal device. */
MTLContext *ctx = MTLContext::get();
if (!ctx) {
BLI_assert_msg(false, "No context available for rendering.");
return nil;
}
/* Verify Shader. */
active_shader_ = (shader) ? static_cast<MTLShader *>(unwrap(shader)) : nullptr;
if (active_shader_ == nullptr || !active_shader_->is_valid()) {
/* Skip drawing if there is no vaid Metal shader.
* This will occur if the path through which the shader is prepared
* is invalid (e.g. Python without create-info), or, the source shader uses a geometry pass. */
BLI_assert_msg(false, "No valid Metal shader!");
return nil;
}
/* Check if using SSBO Fetch Mode.
* This is an alternative drawing mode to geometry shaders, wherein vertex buffers
* are bound as readable (random-access) GPU buffers and certain descriptor properties
* are passed using Shader uniforms. */
bool uses_ssbo_fetch = active_shader_->get_uses_ssbo_vertex_fetch();
/* Prepare Vertex Descriptor and extract VertexBuffers to bind. */
MTLVertBuf *buffers[GPU_BATCH_VBO_MAX_LEN] = {nullptr};
int num_buffers = 0;
/* Ensure Index Buffer is ready. */
MTLIndexBuf *mtl_elem = static_cast<MTLIndexBuf *>(reinterpret_cast<IndexBuf *>(this->elem));
if (mtl_elem != NULL) {
mtl_elem->upload_data();
}
/* Populate vertex descriptor with attribute binding information.
* The vertex descriptor and buffer layout descriptors describe
* how vertex data from bound vertex buffers maps to the
* shader's input.
* A unique vertex descriptor will result in a new PipelineStateObject
* being generated for the currently bound shader. */
prepare_vertex_descriptor_and_bindings(buffers, num_buffers, v_first, v_count, i_first, i_count);
/* Prepare Vertex Buffers - Run before RenderCommandEncoder in case BlitCommandEncoder buffer
* data operations are required. */
for (int i = 0; i < num_buffers; i++) {
MTLVertBuf *buf_at_index = buffers[i];
if (buf_at_index == NULL) {
BLI_assert_msg(
false,
"Total buffer count does not match highest buffer index, could be gaps in bindings");
continue;
}
MTLVertBuf *mtlvbo = static_cast<MTLVertBuf *>(reinterpret_cast<VertBuf *>(buf_at_index));
mtlvbo->bind();
}
/* Ensure render pass is active and fetch active RenderCommandEncoder. */
id<MTLRenderCommandEncoder> rec = ctx->ensure_begin_render_pass();
/* Fetch RenderPassState to enable resource binding for active pass. */
MTLRenderPassState &rps = ctx->main_command_buffer.get_render_pass_state();
/* Debug Check: Ensure Framebuffer instance is not dirty. */
BLI_assert(!ctx->main_command_buffer.get_active_framebuffer()->get_dirty());
/* Bind Shader. */
this->shader_bind();
/* GPU debug markers. */
if (G.debug & G_DEBUG_GPU) {
[rec pushDebugGroup:[NSString stringWithFormat:@"batch_bind%@(shader: %s)",
this->elem ? @"(indexed)" : @"",
active_shader_->get_interface()->get_name()]];
[rec insertDebugSignpost:[NSString
stringWithFormat:@"batch_bind%@(shader: %s)",
this->elem ? @"(indexed)" : @"",
active_shader_->get_interface()->get_name()]];
}
/* Ensure Context Render Pipeline State is fully setup and ready to execute the draw. */
MTLPrimitiveType mtl_prim_type = gpu_prim_type_to_metal(this->prim_type);
if (!ctx->ensure_render_pipeline_state(mtl_prim_type)) {
printf("FAILED TO ENSURE RENDER PIPELINE STATE");
BLI_assert(false);
if (G.debug & G_DEBUG_GPU) {
[rec popDebugGroup];
}
return nil;
}
/*** Bind Vertex Buffers and Index Buffers **/
/* SSBO Vertex Fetch Buffer bindings. */
if (uses_ssbo_fetch) {
/* SSBO Vertex Fetch - Bind Index Buffer to appropriate slot -- if used. */
id<MTLBuffer> idx_buffer = nil;
GPUPrimType final_prim_type = this->prim_type;
if (mtl_elem != nullptr) {
/* Fetch index buffer. This function can situationally return an optimised
* index buffer of a different primtiive type. If this is the case, `final_prim_type`
* and `v_count` will be updated with the new format.
* NOTE: For indexed rendering, v_count represents the number of indices. */
idx_buffer = mtl_elem->get_index_buffer(final_prim_type, v_count);
BLI_assert(idx_buffer != nil);
/* Update uniforms for SSBO-vertex-fetch-mode indexed rendering to flag usage. */
int &uniform_ssbo_index_mode_u16 = active_shader_->uni_ssbo_uses_index_mode_u16;
BLI_assert(uniform_ssbo_index_mode_u16 != -1);
int uses_index_mode_u16 = (mtl_elem->index_type_ == GPU_INDEX_U16) ? 1 : 0;
active_shader_->uniform_int(uniform_ssbo_index_mode_u16, 1, 1, &uses_index_mode_u16);
}
else {
idx_buffer = ctx->get_null_buffer();
}
rps.bind_vertex_buffer(idx_buffer, 0, MTL_SSBO_VERTEX_FETCH_IBO_INDEX);
/* Ensure all attributes are set */
active_shader_->ssbo_vertex_fetch_bind_attributes_end(rec);
/* Bind NULL Buffers for unused vertex data slots. */
id<MTLBuffer> null_buffer = ctx->get_null_buffer();
BLI_assert(null_buffer != nil);
for (int i = num_buffers; i < MTL_SSBO_VERTEX_FETCH_MAX_VBOS; i++) {
if (rps.cached_vertex_buffer_bindings[i].metal_buffer == nil) {
rps.bind_vertex_buffer(null_buffer, 0, i);
}
}
/* Flag whether Indexed rendering is used or not. */
int &uniform_ssbo_use_indexed = active_shader_->uni_ssbo_uses_indexed_rendering;
BLI_assert(uniform_ssbo_use_indexed != -1);
int uses_indexed_rendering = (mtl_elem != NULL) ? 1 : 0;
active_shader_->uniform_int(uniform_ssbo_use_indexed, 1, 1, &uses_indexed_rendering);
/* Set SSBO-fetch-mode status uniforms. */
BLI_assert(active_shader_->uni_ssbo_input_prim_type_loc != -1);
BLI_assert(active_shader_->uni_ssbo_input_vert_count_loc != -1);
GPU_shader_uniform_vector_int(reinterpret_cast<GPUShader *>(wrap(active_shader_)),
active_shader_->uni_ssbo_input_prim_type_loc,
1,
1,
(const int *)(&final_prim_type));
GPU_shader_uniform_vector_int(reinterpret_cast<GPUShader *>(wrap(active_shader_)),
active_shader_->uni_ssbo_input_vert_count_loc,
1,
1,
(const int *)(&v_count));
}
/* Bind Vertex Buffers. */
for (int i = 0; i < num_buffers; i++) {
MTLVertBuf *buf_at_index = buffers[i];
if (buf_at_index == NULL) {
BLI_assert_msg(
false,
"Total buffer count does not match highest buffer index, could be gaps in bindings");
continue;
}
/* Buffer handle. */
MTLVertBuf *mtlvbo = static_cast<MTLVertBuf *>(reinterpret_cast<VertBuf *>(buf_at_index));
mtlvbo->flag_used();
/* Fetch buffer from MTLVertexBuffer and bind. */
id<MTLBuffer> mtl_buffer = mtlvbo->get_metal_buffer();
BLI_assert(mtl_buffer != nil);
rps.bind_vertex_buffer(mtl_buffer, 0, i);
}
if (G.debug & G_DEBUG_GPU) {
[rec popDebugGroup];
}
/* Return Render Command Encoder used with setup. */
return rec;
}
void MTLBatch::unbind()
{
}
void MTLBatch::prepare_vertex_descriptor_and_bindings(
MTLVertBuf **buffers, int &num_buffers, int v_first, int v_count, int i_first, int i_count)
{
/* Here we populate the MTLContext vertex descriptor and resolve which buffers need to be bound.
*/
MTLStateManager *state_manager = static_cast<MTLStateManager *>(
MTLContext::get()->state_manager);
MTLRenderPipelineStateDescriptor &desc = state_manager->get_pipeline_descriptor();
const MTLShaderInterface *interface = active_shader_->get_interface();
uint16_t attr_mask = interface->get_enabled_attribute_mask();
/* Reset vertex descriptor to default state. */
desc.reset_vertex_descriptor();
/* Fetch Vertex and Instance Buffers. */
Span<MTLVertBuf *> mtl_verts(reinterpret_cast<MTLVertBuf **>(this->verts), GPU_BATCH_VBO_MAX_LEN);
Span<MTLVertBuf *> mtl_inst(reinterpret_cast<MTLVertBuf **>(this->inst), GPU_BATCH_INST_VBO_MAX_LEN);
/* SSBO Vertex fetch also passes vertex descriptor information into the shader. */
if (active_shader_->get_uses_ssbo_vertex_fetch()) {
active_shader_->ssbo_vertex_fetch_bind_attributes_begin();
}
/* Resolve Metal vertex buffer bindings. */
/* Vertex Descriptors
* ------------------
* Vertex Descriptors are required to generate a pipeline state, based on the current Batch's
* buffer bindings. These bindings are a unique matching, depending on what input attributes a
* batch has in its buffers, and those which are supported by the shader interface.
* We iterate through the buffers and resolve which attributes satisfy the requirements of the
* currently bound shader. We cache this data, for a given Batch<->ShderInterface pairing in a
* VAO cache to avoid the need to recalculate this data. */
bool buffer_is_instanced[GPU_BATCH_VBO_MAX_LEN] = {false};
VertexDescriptorShaderInterfacePair *descriptor = this->vao_cache.find(interface);
if (descriptor) {
desc.vertex_descriptor = descriptor->vertex_descriptor;
attr_mask = descriptor->attr_mask;
num_buffers = descriptor->num_buffers;
for (int bid = 0; bid < GPU_BATCH_VBO_MAX_LEN; ++bid) {
if (descriptor->bufferIds[bid].used) {
if (descriptor->bufferIds[bid].is_instance) {
buffers[bid] = mtl_inst[descriptor->bufferIds[bid].id];
buffer_is_instanced[bid] = true;
}
else {
buffers[bid] = mtl_verts[descriptor->bufferIds[bid].id];
buffer_is_instanced[bid] = false;
}
}
}
/* Use cached ssbo attribute binding data. */
if (active_shader_->get_uses_ssbo_vertex_fetch()) {
BLI_assert(desc.vertex_descriptor.uses_ssbo_vertex_fetch);
for (int attr_id = 0; attr_id < desc.vertex_descriptor.num_ssbo_attributes; attr_id++) {
active_shader_->ssbo_vertex_fetch_bind_attribute(
desc.vertex_descriptor.ssbo_attributes[attr_id]);
}
}
}
else {
VertexDescriptorShaderInterfacePair pair{};
pair.interface = interface;
for (int i = 0; i < GPU_BATCH_VBO_MAX_LEN; ++i) {
pair.bufferIds[i].id = -1;
pair.bufferIds[i].is_instance = 0;
pair.bufferIds[i].used = 0;
}
/* NOTE: Attribute extraction order from buffer is the reverse of the OpenGL as we flag once an
* attribute is found, rather than pre-setting the mask. */
/* Extract Instance attributes (These take highest priority). */
for (int v = 0; v < GPU_BATCH_INST_VBO_MAX_LEN; v++) {
if (mtl_inst[v]) {
MTL_LOG_INFO(" -- [Batch] Checking bindings for bound instance buffer %p\n", mtl_inst[v]);
int buffer_ind = this->prepare_vertex_binding(
mtl_inst[v], desc, interface, attr_mask, true);
if (buffer_ind >= 0) {
buffers[buffer_ind] = mtl_inst[v];
buffer_is_instanced[buffer_ind] = true;
pair.bufferIds[buffer_ind].id = v;
pair.bufferIds[buffer_ind].used = 1;
pair.bufferIds[buffer_ind].is_instance = 1;
num_buffers = ((buffer_ind + 1) > num_buffers) ? (buffer_ind + 1) : num_buffers;
}
}
}
/* Extract Vertex attribues (First-bound vertex buffer takes priority). */
for (int v = 0; v < GPU_BATCH_VBO_MAX_LEN; v++) {
if (mtl_verts[v] != NULL) {
MTL_LOG_INFO(" -- [Batch] Checking bindings for bound vertex buffer %p\n", mtl_verts[v]);
int buffer_ind = this->prepare_vertex_binding(
mtl_verts[v], desc, interface, attr_mask, false);
if (buffer_ind >= 0) {
buffers[buffer_ind] = mtl_verts[v];
buffer_is_instanced[buffer_ind] = false;
pair.bufferIds[buffer_ind].id = v;
pair.bufferIds[buffer_ind].used = 1;
pair.bufferIds[buffer_ind].is_instance = 0;
num_buffers = ((buffer_ind + 1) > num_buffers) ? (buffer_ind + 1) : num_buffers;
}
}
}
/* Add to VertexDescriptor cache */
desc.vertex_descriptor.uses_ssbo_vertex_fetch = active_shader_->get_uses_ssbo_vertex_fetch();
pair.attr_mask = attr_mask;
pair.vertex_descriptor = desc.vertex_descriptor;
pair.num_buffers = num_buffers;
if (!this->vao_cache.insert(pair)) {
printf(
"[Performance Warning] cache is full (Size: %d), vertex descriptor will not be cached\n",
GPU_VAO_STATIC_LEN);
}
}
/* DEBUG: verify if our attribute bindings have been fully provided as expected. */
#if MTL_DEBUG_SHADER_ATTRIBUTES == 1
if (attr_mask != 0) {
for (uint16_t mask = 1, a = 0; a < 16; a++, mask <<= 1) {
if (attr_mask & mask) {
/* Fallback for setting default attributes, for missed slots. Attributes flagged with
* 'MTLVertexFormatInvalid' in the vertex descriptor are bound to a NULL buffer during PSO
* creation. */
MTL_LOG_WARNING("MTLBatch: Missing expected attribute '%s' at index '%d' for shader: %s\n",
this->active_shader->interface->attributes[a].name,
a,
interface->name);
/* Ensure any assigned attribute has not been given an invalid format. This should not
* occur and may be the result of an unsupported attribute type conversion. */
BLI_assert(desc.attributes[a].format == MTLVertexFormatInvalid);
}
}
}
#endif
}
void MTLBatch::draw_advanced(int v_first, int v_count, int i_first, int i_count)
{
#if TRUST_NO_ONE
BLI_assert(v_count > 0 && i_count > 0);
#endif
/* Setup RenderPipelineState for batch. */
MTLContext *ctx = reinterpret_cast<MTLContext *>(GPU_context_active_get());
id<MTLRenderCommandEncoder> rec = this->bind(v_first, v_count, i_first, i_count);
if (rec == nil) {
return;
}
/* Fetch IndexBuffer and resolve primitive type. */
MTLIndexBuf *mtl_elem = static_cast<MTLIndexBuf *>(reinterpret_cast<IndexBuf *>(this->elem));
MTLPrimitiveType mtl_prim_type = gpu_prim_type_to_metal(this->prim_type);
/* Render using SSBO Vertex Fetch. */
if (active_shader_->get_uses_ssbo_vertex_fetch()) {
/* Submit draw call with modified vertex count, which reflects vertices per primitive defined
* in the USE_SSBO_VERTEX_FETCH pragma. */
int num_input_primitives = gpu_get_prim_count_from_type(v_count, this->prim_type);
int output_num_verts = num_input_primitives *
active_shader_->get_ssbo_vertex_fetch_output_num_verts();
BLI_assert_msg(
mtl_vertex_count_fits_primitive_type(
output_num_verts, active_shader_->get_ssbo_vertex_fetch_output_prim_type()),
"Output Vertex count is not compatible with the requested output vertex primitive type");
[rec drawPrimitives:active_shader_->get_ssbo_vertex_fetch_output_prim_type()
vertexStart:0
vertexCount:output_num_verts
instanceCount:i_count
baseInstance:i_first];
ctx->main_command_buffer.register_draw_counters(output_num_verts * i_count);
}
/* Perform regular draw. */
else if (mtl_elem == NULL) {
/* Primitive Type toplogy emulation. */
if (mtl_needs_topology_emulation(this->prim_type)) {
/* Generate index buffer for primitive types requiring emulation. */
GPUPrimType emulated_prim_type = this->prim_type;
uint32_t emulated_v_count = v_count;
id<MTLBuffer> generated_index_buffer = this->get_emulated_toplogy_buffer(emulated_prim_type,
emulated_v_count);
BLI_assert(generated_index_buffer != nil);
MTLPrimitiveType emulated_mtl_prim_type = gpu_prim_type_to_metal(emulated_prim_type);
/* Temp: Disable culling for emulated primitive types.
* TODO(Metal): Support face winding in topology buffer. */
[rec setCullMode:MTLCullModeNone];
if (generated_index_buffer != nil) {
BLI_assert(emulated_mtl_prim_type == MTLPrimitiveTypeTriangle ||
emulated_mtl_prim_type == MTLPrimitiveTypeLine);
if (emulated_mtl_prim_type == MTLPrimitiveTypeTriangle) {
BLI_assert(emulated_v_count % 3 == 0);
}
if (emulated_mtl_prim_type == MTLPrimitiveTypeLine) {
BLI_assert(emulated_v_count % 2 == 0);
}
/* Set depth stencil state (requires knowledge of primitive type). */
ctx->ensure_depth_stencil_state(emulated_mtl_prim_type);
[rec drawIndexedPrimitives:emulated_mtl_prim_type
indexCount:emulated_v_count
indexType:MTLIndexTypeUInt32
indexBuffer:generated_index_buffer
indexBufferOffset:0
instanceCount:i_count
baseVertex:v_first
baseInstance:i_first];
}
else {
printf("[Note] Cannot draw batch -- Emulated Topology mode: %u not yet supported\n",
this->prim_type);
}
}
else {
/* Set depth stencil state (requires knowledge of primitive type). */
ctx->ensure_depth_stencil_state(mtl_prim_type);
/* Issue draw call. */
[rec drawPrimitives:mtl_prim_type
vertexStart:v_first
vertexCount:v_count
instanceCount:i_count
baseInstance:i_first];
}
ctx->main_command_buffer.register_draw_counters(v_count * i_count);
}
/* Perform indexed draw. */
else {
MTLIndexType index_type = MTLIndexBuf::gpu_index_type_to_metal(mtl_elem->index_type_);
uint32_t base_index = mtl_elem->index_base_;
uint32_t index_size = (mtl_elem->index_type_ == GPU_INDEX_U16) ? 2 : 4;
uint32_t v_first_ofs = ((v_first + mtl_elem->index_start_) * index_size);
BLI_assert_msg((v_first_ofs % index_size) == 0,
"Index offset is not 2/4-byte aligned as per METAL spec");
/* Fetch index buffer. May return an index buffer of a differing format,
* if index buffer optimisation is used. In these cases, final_prim_type and
* index_count get updated with the new properties. */
GPUPrimType final_prim_type = this->prim_type;
uint index_count = v_count;
id<MTLBuffer> index_buffer = mtl_elem->get_index_buffer(final_prim_type, index_count);
mtl_prim_type = gpu_prim_type_to_metal(final_prim_type);
BLI_assert(index_buffer != nil);
if (index_buffer != nil) {
/* Set depth stencil state (requires knowledge of primitive type). */
ctx->ensure_depth_stencil_state(mtl_prim_type);
/* Issue draw call. */
[rec drawIndexedPrimitives:mtl_prim_type
indexCount:index_count
indexType:index_type
indexBuffer:index_buffer
indexBufferOffset:v_first_ofs
instanceCount:i_count
baseVertex:base_index
baseInstance:i_first];
ctx->main_command_buffer.register_draw_counters(index_count * i_count);
}
else {
BLI_assert_msg(false, "Index buffer does not have backing Metal buffer");
}
}
/* End of draw. */
this->unbind();
}
/** \} */
/* -------------------------------------------------------------------- */
/** \name Topology emulation and optimization
* \{ */
id<MTLBuffer> MTLBatch::get_emulated_toplogy_buffer(GPUPrimType &in_out_prim_type,
uint32_t &in_out_v_count)
{
BLI_assert(in_out_v_count > 0);
/* Determine emulated primitive types. */
GPUPrimType input_prim_type = in_out_prim_type;
uint32_t v_count = in_out_v_count;
GPUPrimType output_prim_type;
switch (input_prim_type) {
case GPU_PRIM_POINTS:
case GPU_PRIM_LINES:
case GPU_PRIM_TRIS:
BLI_assert_msg(false, "Optimal primitive types should not reach here.");
return nil;
break;
case GPU_PRIM_LINES_ADJ:
case GPU_PRIM_TRIS_ADJ:
BLI_assert_msg(false, "Adjacency primitive types should not reach here.");
return nil;
break;
case GPU_PRIM_LINE_STRIP:
case GPU_PRIM_LINE_LOOP:
case GPU_PRIM_LINE_STRIP_ADJ:
output_prim_type = GPU_PRIM_LINES;
break;
case GPU_PRIM_TRI_STRIP:
case GPU_PRIM_TRI_FAN:
output_prim_type = GPU_PRIM_TRIS;
break;
default:
BLI_assert_msg(false, "Invalid primitive type.");
return nil;
}
/* Check if topology buffer exists and is valid. */
if (this->emulated_topology_buffer_ != nullptr &&
(emulated_topology_type_ != input_prim_type || topology_buffer_input_v_count_ != v_count)) {
/* Release existing topology buffer. */
emulated_topology_buffer_->free();
emulated_topology_buffer_ = nullptr;
}
/* Generate new topology index buffer. */
if (this->emulated_topology_buffer_ == nullptr) {
/* Calculate IB len. */
uint32_t output_prim_count = 0;
switch (input_prim_type) {
case GPU_PRIM_LINE_STRIP:
case GPU_PRIM_LINE_STRIP_ADJ:
output_prim_count = v_count - 1;
break;
case GPU_PRIM_LINE_LOOP:
output_prim_count = v_count;
break;
case GPU_PRIM_TRI_STRIP:
case GPU_PRIM_TRI_FAN:
output_prim_count = v_count - 2;
break;
default:
BLI_assert_msg(false, "Cannot generate optimized topology buffer for other types.");
break;
}
uint32_t output_IB_elems = output_prim_count * ((output_prim_type == GPU_PRIM_TRIS) ? 3 : 2);
/* Allocate buffer. */
uint32_t buffer_bytes = output_IB_elems * 4;
BLI_assert(buffer_bytes > 0);
this->emulated_topology_buffer_ = MTLContext::get_global_memory_manager().allocate(
buffer_bytes, true);
/* Populate. */
uint32_t *data = (uint32_t *)this->emulated_topology_buffer_->get_host_ptr();
BLI_assert(data != nullptr);
/* TODO(Metal): Support inverse winding modes. */
bool winding_clockwise = false;
UNUSED_VARS(winding_clockwise);
switch (input_prim_type) {
/* Line Loop. */
case GPU_PRIM_LINE_LOOP: {
int line = 0;
for (line = 0; line < output_prim_count - 1; line++) {
data[line * 3 + 0] = line + 0;
data[line * 3 + 1] = line + 1;
}
/* Closing line. */
data[line * 2 + 0] = line + 0;
data[line * 2 + 1] = 0;
} break;
/* Triangle Fan. */
case GPU_PRIM_TRI_FAN: {
for (int triangle = 0; triangle < output_prim_count; triangle++) {
data[triangle * 3 + 0] = 0; /* Always 0 */
data[triangle * 3 + 1] = triangle + 1;
data[triangle * 3 + 2] = triangle + 2;
}
} break;
default:
BLI_assert_msg(false, "Other primitive types do not require emulation.");
return nil;
}
/* Flush. */
this->emulated_topology_buffer_->flush();
/* Assign members relating to current cached IB. */
topology_buffer_input_v_count_ = v_count;
topology_buffer_output_v_count_ = output_IB_elems;
emulated_topology_type_ = input_prim_type;
}
/* Return. */
in_out_v_count = topology_buffer_output_v_count_;
in_out_prim_type = output_prim_type;
return (emulated_topology_buffer_) ? emulated_topology_buffer_->get_metal_buffer() : nil;
}
/** \} */
} // blender::gpu

View File

@ -995,19 +995,21 @@ bool MTLContext::ensure_uniform_buffer_bindings(
if (ubo.buffer_index >= 0) {
const uint32_t buffer_index = ubo.buffer_index;
/* Uniform Buffer index offset by 1 as the first shader buffer binding slot is reserved for
* the uniform PushConstantBlock. */
const uint32_t buffer_index = ubo.buffer_index + 1;
int ubo_offset = 0;
id<MTLBuffer> ubo_buffer = nil;
int ubo_size = 0;
bool bind_dummy_buffer = false;
if (this->pipeline_state.ubo_bindings[buffer_index].bound) {
if (this->pipeline_state.ubo_bindings[ubo_index].bound) {
/* Fetch UBO global-binding properties from slot. */
ubo_offset = 0;
ubo_buffer = this->pipeline_state.ubo_bindings[buffer_index].ubo->get_metal_buffer(
ubo_buffer = this->pipeline_state.ubo_bindings[ubo_index].ubo->get_metal_buffer(
&ubo_offset);
ubo_size = this->pipeline_state.ubo_bindings[buffer_index].ubo->get_size();
ubo_size = this->pipeline_state.ubo_bindings[ubo_index].ubo->get_size();
/* Use dummy zero buffer if no buffer assigned -- this is an optimization to avoid
* allocating zero buffers. */

View File

@ -9,34 +9,50 @@
#pragma once
#pragma once
#include "BLI_sys_types.h"
#include "GPU_batch.h"
#include "MEM_guardedalloc.h"
#include "gpu_drawlist_private.hh"
namespace blender {
namespace gpu {
#include "mtl_batch.hh"
#include "mtl_context.hh"
namespace blender::gpu {
/**
* TODO(Metal): MTLDrawList Implementation. Included as temporary stub.
*/
* Implementation of Multi Draw Indirect using OpenGL.
**/
class MTLDrawList : public DrawList {
public:
MTLDrawList(int length)
{
}
~MTLDrawList()
{
}
void append(GPUBatch *batch, int i_first, int i_count) override
{
}
void submit() override
{
}
private:
/** Batch for which we are recording commands for. */
MTLBatch *batch_;
/** Mapped memory bounds. */
void *data_;
/** Length of the mapped buffer (in byte). */
size_t data_size_;
/** Current offset inside the mapped buffer (in byte). */
size_t command_offset_;
/** Current number of command recorded inside the mapped buffer. */
uint32_t command_len_;
/** Is UINT_MAX if not drawing indexed geom. Also Avoid dereferencing batch. */
uint32_t base_index_;
/** Also Avoid dereferencing batch. */
uint32_t v_first_, v_count_;
/** Length of whole the buffer (in byte). */
uint32_t buffer_size_;
public:
MTLDrawList(int length);
~MTLDrawList();
void append(GPUBatch *batch, int i_first, int i_count) override;
void submit() override;
private:
void init();
MEM_CXX_CLASS_ALLOC_FUNCS("MTLDrawList");
};
} // namespace gpu
} // namespace blender
} // namespace blender::gpu

View File

@ -0,0 +1,282 @@
/** \file
* \ingroup gpu
*
* Implementation of Multi Draw Indirect using OpenGL.
* Fallback if the needed extensions are not supported.
*/
#include "BLI_assert.h"
#include "GPU_batch.h"
#include "mtl_common.hh"
#include "mtl_drawlist.hh"
#include "mtl_primitive.hh"
using namespace blender::gpu;
namespace blender::gpu {
/* Indirect draw call structure for reference. */
/* MTLDrawPrimitivesIndirectArguments --
* https://developer.apple.com/documentation/metal/mtldrawprimitivesindirectarguments?language=objc
*/
/* struct MTLDrawPrimitivesIndirectArguments {
* uint32_t vertexCount;
* uint32_t instanceCount;
* uint32_t vertexStart;
* uint32_t baseInstance;
};*/
/* MTLDrawIndexedPrimitivesIndirectArguments --
* https://developer.apple.com/documentation/metal/mtldrawindexedprimitivesindirectarguments?language=objc
*/
/* struct MTLDrawIndexedPrimitivesIndirectArguments {
* uint32_t indexCount;
* uint32_t instanceCount;
* uint32_t indexStart;
* uint32_t baseVertex;
* uint32_t baseInstance;
};*/
#define MDI_ENABLED (buffer_size_ != 0)
#define MDI_DISABLED (buffer_size_ == 0)
#define MDI_INDEXED (base_index_ != UINT_MAX)
MTLDrawList::MTLDrawList(int length)
{
BLI_assert(length > 0);
batch_ = nullptr;
command_len_ = 0;
base_index_ = 0;
command_offset_ = 0;
data_size_ = 0;
buffer_size_ = sizeof(MTLDrawIndexedPrimitivesIndirectArguments) * length;
data_ = (void *)MEM_mallocN(buffer_size_, __func__);
}
MTLDrawList::~MTLDrawList()
{
if (data_) {
MEM_freeN(data_);
data_ = nullptr;
}
}
void MTLDrawList::init()
{
MTLContext *ctx = reinterpret_cast<MTLContext *>(GPU_context_active_get());
BLI_assert(ctx);
BLI_assert(MDI_ENABLED);
BLI_assert(data_ == nullptr);
UNUSED_VARS_NDEBUG(ctx);
batch_ = nullptr;
command_len_ = 0;
BLI_assert(data_);
command_offset_ = 0;
}
void MTLDrawList::append(GPUBatch *gpu_batch, int i_first, int i_count)
{
/* Fallback when MultiDrawIndirect is not supported/enabled. */
MTLShader *shader = static_cast<MTLShader *>(unwrap(gpu_batch->shader));
bool requires_ssbo = (shader->get_uses_ssbo_vertex_fetch());
bool requires_emulation = mtl_needs_topology_emulation(gpu_batch->prim_type);
if (MDI_DISABLED || requires_ssbo || requires_emulation) {
GPU_batch_draw_advanced(gpu_batch, 0, 0, i_first, i_count);
return;
}
if (data_ == nullptr) {
this->init();
}
BLI_assert(data_);
MTLBatch *mtl_batch = static_cast<MTLBatch *>(gpu_batch);
BLI_assert(mtl_batch);
if (mtl_batch != batch_) {
/* Submit existing calls. */
this->submit();
/* Begin new batch. */
batch_ = mtl_batch;
/* Cached for faster access. */
MTLIndexBuf *el = batch_->elem_();
base_index_ = el ? el->index_base_ : UINT_MAX;
v_first_ = el ? el->index_start_ : 0;
v_count_ = el ? el->index_len_ : batch_->verts_(0)->vertex_len;
}
if (v_count_ == 0) {
/* Nothing to draw. */
return;
}
if (MDI_INDEXED) {
MTLDrawIndexedPrimitivesIndirectArguments *cmd =
reinterpret_cast<MTLDrawIndexedPrimitivesIndirectArguments *>((char *)data_ +
command_offset_);
cmd->indexStart = v_first_;
cmd->indexCount = v_count_;
cmd->instanceCount = i_count;
cmd->baseVertex = base_index_;
cmd->baseInstance = i_first;
}
else {
MTLDrawPrimitivesIndirectArguments *cmd =
reinterpret_cast<MTLDrawPrimitivesIndirectArguments *>((char *)data_ + command_offset_);
cmd->vertexStart = v_first_;
cmd->vertexCount = v_count_;
cmd->instanceCount = i_count;
cmd->baseInstance = i_first;
}
size_t command_size = MDI_INDEXED ? sizeof(MTLDrawIndexedPrimitivesIndirectArguments) :
sizeof(MTLDrawPrimitivesIndirectArguments);
command_offset_ += command_size;
command_len_++;
/* Check if we can fit at least one other command. */
if (command_offset_ + command_size > buffer_size_) {
this->submit();
}
return;
}
void MTLDrawList::submit()
{
/* Metal does not support MDI from the host side, but we still benefit from only executing the
* batch bind a single time, rather than per-draw.
* NOTE(Metal): Consider using MTLIndirectCommandBuffer to achieve similar behaviour. */
if (command_len_ == 0) {
return;
}
/* Something's wrong if we get here without MDI support. */
BLI_assert(MDI_ENABLED);
BLI_assert(data_);
/* Host-side MDI Currently unsupported on Metal. */
bool can_use_MDI = false;
/* Verify context. */
MTLContext *ctx = reinterpret_cast<MTLContext *>(GPU_context_active_get());
BLI_assert(ctx);
/* Execute indirect draw calls. */
MTLShader *shader = static_cast<MTLShader *>(unwrap(batch_->shader));
bool SSBO_MODE = (shader->get_uses_ssbo_vertex_fetch());
if (SSBO_MODE) {
can_use_MDI = false;
BLI_assert(false);
return;
}
/* Heuristic to determine whether using indirect drawing is more efficient. */
size_t command_size = MDI_INDEXED ? sizeof(MTLDrawIndexedPrimitivesIndirectArguments) :
sizeof(MTLDrawPrimitivesIndirectArguments);
const bool is_finishing_a_buffer = (command_offset_ + command_size > buffer_size_);
can_use_MDI = can_use_MDI && (is_finishing_a_buffer || command_len_ > 2);
/* Bind Batch to setup render pipeline state. */
id<MTLRenderCommandEncoder> rec = batch_->bind(0, 0, 0, 0);
if (!rec) {
BLI_assert_msg(false, "A RenderCommandEncoder should always be available!\n");
return;
}
/* Common properties. */
MTLPrimitiveType mtl_prim_type = gpu_prim_type_to_metal(batch_->prim_type);
/* Execute multidraw indirect. */
if (can_use_MDI && false) {
/* Metal Doesn't support MDI -- Singular Indirect draw calls are supported,
* but Multidraw is not.
* TODO(Metal): Consider using IndirectCommandBuffers to provide similar
* behaviour. */
}
else {
/* Execute draws manually. */
if (MDI_INDEXED) {
MTLDrawIndexedPrimitivesIndirectArguments *cmd =
(MTLDrawIndexedPrimitivesIndirectArguments *)data_;
MTLIndexBuf *mtl_elem = static_cast<MTLIndexBuf *>(
reinterpret_cast<IndexBuf *>(batch_->elem));
BLI_assert(mtl_elem);
MTLIndexType index_type = MTLIndexBuf::gpu_index_type_to_metal(mtl_elem->index_type_);
uint32_t index_size = (mtl_elem->index_type_ == GPU_INDEX_U16) ? 2 : 4;
uint32_t v_first_ofs = (mtl_elem->index_start_ * index_size);
uint32_t index_count = cmd->indexCount;
/* Fetch index buffer. May return an index buffer of a differing format,
* if index buffer optimisation is used. In these cases, mtl_prim_type and
* index_count get updated with the new properties. */
GPUPrimType final_prim_type = batch_->prim_type;
id<MTLBuffer> index_buffer = mtl_elem->get_index_buffer(final_prim_type, index_count);
BLI_assert(index_buffer != nil);
/* Final primitive type. */
mtl_prim_type = gpu_prim_type_to_metal(final_prim_type);
if (index_buffer != nil) {
/* Set depth stencil state (requires knowledge of primitive type). */
ctx->ensure_depth_stencil_state(mtl_prim_type);
for (int i = 0; i < command_len_; i++, cmd++) {
[rec drawIndexedPrimitives:mtl_prim_type
indexCount:index_count
indexType:index_type
indexBuffer:index_buffer
indexBufferOffset:v_first_ofs
instanceCount:cmd->instanceCount
baseVertex:cmd->baseVertex
baseInstance:cmd->baseInstance];
ctx->main_command_buffer.register_draw_counters(cmd->indexCount * cmd->instanceCount);
}
}
else {
BLI_assert_msg(false, "Index buffer does not have backing Metal buffer");
}
}
else {
MTLDrawPrimitivesIndirectArguments *cmd = (MTLDrawPrimitivesIndirectArguments *)data_;
/* Verify if topology emulation is required. */
if (mtl_needs_topology_emulation(batch_->prim_type)) {
BLI_assert_msg(false, "topology emulation cases should use fallback.");
}
else {
/* Set depth stencil state (requires knowledge of primitive type). */
ctx->ensure_depth_stencil_state(mtl_prim_type);
for (int i = 0; i < command_len_; i++, cmd++) {
[rec drawPrimitives:mtl_prim_type
vertexStart:cmd->vertexStart
vertexCount:cmd->vertexCount
instanceCount:cmd->instanceCount
baseInstance:cmd->baseInstance];
ctx->main_command_buffer.register_draw_counters(cmd->vertexCount * cmd->instanceCount);
}
}
}
}
/* Unbind batch. */
batch_->unbind();
/* Reset command offsets. */
command_len_ = 0;
command_offset_ = 0;
/* Avoid keeping reference to the batch. */
batch_ = nullptr;
}
} // namespace blender::gpu

View File

@ -99,6 +99,9 @@ void MTLImmediate::end()
MTLRenderPipelineStateDescriptor &desc = state_manager->get_pipeline_descriptor();
const MTLShaderInterface *interface = active_mtl_shader->get_interface();
/* Reset vertex descriptor to default state. */
desc.reset_vertex_descriptor();
desc.vertex_descriptor.num_attributes = interface->get_total_attributes();
desc.vertex_descriptor.num_vert_buffers = 1;

View File

@ -243,6 +243,19 @@ struct MTLRenderPipelineStateDescriptor {
return hash;
}
/* Reset the Vertex Descriptor to default. */
void reset_vertex_descriptor()
{
vertex_descriptor.num_attributes = 0;
vertex_descriptor.num_vert_buffers = 0;
for (int i = 0; i < GPU_VERT_ATTR_MAX_LEN; i++) {
vertex_descriptor.attributes[i].format = MTLVertexFormatInvalid;
vertex_descriptor.attributes[i].offset = 0;
}
vertex_descriptor.uses_ssbo_vertex_fetch = false;
vertex_descriptor.num_ssbo_attributes = 0;
}
};
} // namespace blender::gpu

View File

@ -117,9 +117,7 @@ uint32_t MTLShaderInterface::add_uniform_block(uint32_t name_offset,
MTLShaderUniformBlock &uni_block = ubos_[total_uniform_blocks_];
uni_block.name_offset = name_offset;
/* We offset the buffer binding index by one, as the first slot is reserved for push constant
* data. */
uni_block.buffer_index = buffer_index + 1;
uni_block.buffer_index = buffer_index;
uni_block.size = size;
uni_block.current_offset = 0;
uni_block.stage_mask = ShaderStage::BOTH;
@ -297,8 +295,10 @@ void MTLShaderInterface::prepare_common_shader_inputs()
current_input->name_hash = BLI_hash_string(this->get_name_at_offset(shd_ubo.name_offset));
/* Location refers to the index in the ubos_ array. */
current_input->location = ubo_index;
/* Final binding location refers to the buffer binding index within the shader (Relative to
* MTL_uniform_buffer_base_index). */
/* Binding location refers to the UBO bind slot in
* #MTLContextGlobalShaderPipelineState::ubo_bindings. The buffer bind index [[buffer(N)]]
* within the shader will apply an offset for bound vertex buffers and the default uniform
* PushConstantBlock. */
current_input->binding = shd_ubo.buffer_index;
current_input++;
}

View File

@ -51,9 +51,9 @@ struct TextureUpdateRoutineSpecialisation {
uint64_t hash() const
{
blender::DefaultHash<std::string> string_hasher;
return uint64_t(string_hasher(
return (uint64_t)string_hasher(
this->input_data_type + this->output_data_type +
std::to_string((this->component_count_input << 8) + this->component_count_output)));
std::to_string((this->component_count_input << 8) + this->component_count_output));
}
};

View File

@ -337,20 +337,6 @@ void gpu::MTLTexture::blit(gpu::MTLTexture *dst,
GPU_batch_draw(quad);
/* TMP draw with IMM TODO(Metal): Remove this once GPUBatch is supported. */
GPUVertFormat *imm_format = immVertexFormat();
uint pos = GPU_vertformat_attr_add(imm_format, "pos", GPU_COMP_F32, 2, GPU_FETCH_FLOAT);
immBindShader(shader);
immBegin(GPU_PRIM_TRI_STRIP, 4);
immVertex2f(pos, 1, 0);
immVertex2f(pos, 0, 0);
immVertex2f(pos, 1, 1);
immVertex2f(pos, 0, 1);
immEnd();
immUnbindProgram();
/**********************/
/* restoring old pipeline state. */
GPU_depth_mask(depth_write_prev);
GPU_stencil_write_mask_set(stencil_mask_prev);
@ -1472,10 +1458,82 @@ bool gpu::MTLTexture::init_internal()
bool gpu::MTLTexture::init_internal(GPUVertBuf *vbo)
{
/* Not a valid vertex buffer format, though verifying texture is not set as such
* as this is not supported on Apple Silicon. */
BLI_assert_msg(this->format_ != GPU_DEPTH24_STENCIL8,
"Apple silicon does not support GPU_DEPTH24_S8");
if (this->format_ == GPU_DEPTH24_STENCIL8) {
/* Apple Silicon requires GPU_DEPTH32F_STENCIL8 instead of GPU_DEPTH24_STENCIL8. */
this->format_ = GPU_DEPTH32F_STENCIL8;
}
MTLPixelFormat mtl_format = gpu_texture_format_to_metal(this->format_);
mtl_max_mips_ = 1;
mipmaps_ = 0;
this->mip_range_set(0, 0);
/* Create texture from GPUVertBuf's buffer. */
MTLVertBuf *mtl_vbo = static_cast<MTLVertBuf *>(unwrap(vbo));
mtl_vbo->bind();
mtl_vbo->flag_used();
/* Get Metal Buffer. */
id<MTLBuffer> source_buffer = mtl_vbo->get_metal_buffer();
BLI_assert(source_buffer);
/* Verify size. */
if (w_ <= 0) {
MTL_LOG_WARNING("Allocating texture buffer of width 0!\n");
w_ = 1;
}
/* Verify Texture and vertex buffer alignment. */
int bytes_per_pixel = get_mtl_format_bytesize(mtl_format);
int bytes_per_row = bytes_per_pixel * w_;
MTLContext *mtl_ctx = MTLContext::get();
uint32_t align_requirement = static_cast<uint32_t>(
[mtl_ctx->device minimumLinearTextureAlignmentForPixelFormat:mtl_format]);
/* Verify per-vertex size aligns with texture size. */
const GPUVertFormat *format = GPU_vertbuf_get_format(vbo);
BLI_assert(bytes_per_pixel == format->stride &&
"Pixel format stride MUST match the texture format stride -- These being different "
"is likely caused by Metal's VBO padding to a minimum of 4-bytes per-vertex");
UNUSED_VARS_NDEBUG(format);
/* Create texture descriptor. */
BLI_assert(type_ == GPU_TEXTURE_BUFFER);
texture_descriptor_ = [[MTLTextureDescriptor alloc] init];
texture_descriptor_.pixelFormat = mtl_format;
texture_descriptor_.textureType = MTLTextureTypeTextureBuffer;
texture_descriptor_.width = w_;
texture_descriptor_.height = 1;
texture_descriptor_.depth = 1;
texture_descriptor_.arrayLength = 1;
texture_descriptor_.mipmapLevelCount = mtl_max_mips_;
texture_descriptor_.usage =
MTLTextureUsageShaderRead | MTLTextureUsageShaderWrite |
MTLTextureUsagePixelFormatView; /* TODO(Metal): Optimise usage flags. */
texture_descriptor_.storageMode = [source_buffer storageMode];
texture_descriptor_.sampleCount = 1;
texture_descriptor_.cpuCacheMode = [source_buffer cpuCacheMode];
texture_descriptor_.hazardTrackingMode = [source_buffer hazardTrackingMode];
texture_ = [source_buffer
newTextureWithDescriptor:texture_descriptor_
offset:0
bytesPerRow:ceil_to_multiple_u(bytes_per_row, align_requirement)];
aligned_w_ = bytes_per_row / bytes_per_pixel;
BLI_assert(texture_);
texture_.label = [NSString stringWithUTF8String:this->get_name()];
is_baked_ = true;
is_dirty_ = false;
resource_mode_ = MTL_TEXTURE_MODE_VBO;
/* Track Status. */
vert_buffer_ = mtl_vbo;
vert_buffer_mtl_ = source_buffer;
/* Cleanup. */
[texture_descriptor_ release];
texture_descriptor_ = nullptr;
return true;
}
@ -1522,7 +1580,6 @@ bool gpu::MTLTexture::texture_is_baked()
/* Prepare texture parameters after initialization, but before baking. */
void gpu::MTLTexture::prepare_internal()
{
/* Derive implicit usage flags for Depth/Stencil attachments. */
if (format_flag_ & GPU_FORMAT_DEPTH || format_flag_ & GPU_FORMAT_STENCIL) {
gpu_image_usage_flags_ |= GPU_TEXTURE_USAGE_ATTACHMENT;
@ -1687,7 +1744,7 @@ void gpu::MTLTexture::ensure_baked()
/* Determine Resource Mode. */
resource_mode_ = MTL_TEXTURE_MODE_DEFAULT;
/* Create texture. */
/* Standard texture allocation. */
texture_ = [ctx->device newTextureWithDescriptor:texture_descriptor_];
[texture_descriptor_ release];

View File

@ -1171,6 +1171,10 @@ enum {
FILE_ENTRY_NAME_FREE = 1 << 1,
/* The preview for this entry is being loaded on another thread. */
FILE_ENTRY_PREVIEW_LOADING = 1 << 2,
/** For #FILE_TYPE_BLENDERLIB only: Denotes that the ID is known to not have a preview (none was
* found in the .blend). Stored so we don't keep trying to find non-existent previews every time
* we reload previews. When dealing with heavy files this can have quite an impact. */
FILE_ENTRY_BLENDERLIB_NO_PREVIEW = 1 << 3,
};
/** \} */

View File

@ -431,7 +431,11 @@ static const EnumPropertyItem rna_enum_shading_color_type_items[] = {
{V3D_SHADING_OBJECT_COLOR, "OBJECT", 0, "Object", "Show object color"},
{V3D_SHADING_RANDOM_COLOR, "RANDOM", 0, "Random", "Show random object color"},
{V3D_SHADING_VERTEX_COLOR, "VERTEX", 0, "Attribute", "Show active color attribute"},
{V3D_SHADING_TEXTURE_COLOR, "TEXTURE", 0, "Texture", "Show texture"},
{V3D_SHADING_TEXTURE_COLOR,
"TEXTURE",
0,
"Texture",
"Show the texture from the active image texture node using the active UV map coordinates"},
{0, NULL, 0, NULL, NULL},
};

View File

@ -379,19 +379,22 @@ static void node_geo_exec(GeoNodeExecParams params)
invalid_uv_count);
/* Then also deform edit curve information for use in sculpt mode. */
const CurvesGeometry &curves_orig = CurvesGeometry::wrap(edit_hints->curves_id_orig.geometry);
deform_curves(curves_orig,
*surface_mesh_orig,
*surface_mesh_eval,
surface_uv_coords,
reverse_uv_sampler_orig,
reverse_uv_sampler_eval,
corner_normals_orig,
corner_normals_eval,
rest_positions,
transforms.surface_to_curves,
edit_hint_positions,
edit_hint_rotations,
invalid_uv_count);
const Span<float2> surface_uv_coords_orig = curves_orig.surface_uv_coords();
if (!surface_uv_coords_orig.is_empty()) {
deform_curves(curves_orig,
*surface_mesh_orig,
*surface_mesh_eval,
surface_uv_coords_orig,
reverse_uv_sampler_orig,
reverse_uv_sampler_eval,
corner_normals_orig,
corner_normals_eval,
rest_positions,
transforms.surface_to_curves,
edit_hint_positions,
edit_hint_rotations,
invalid_uv_count);
}
}
curves.tag_positions_changed();

View File

@ -93,6 +93,10 @@ class CornersOfVertInput final : public bke::MeshFieldInput {
}
const Span<int> corners = vert_to_loop_map[vert_i];
if (corners.is_empty()) {
corner_of_vertex[selection_i] = 0;
continue;
}
/* Retrieve the connected edge indices as 64 bit integers for #materialize_compressed. */
corner_indices.reinitialize(corners.size());

View File

@ -93,6 +93,10 @@ class EdgesOfVertInput final : public bke::MeshFieldInput {
}
const Span<int> edges = vert_to_edge_map[vert_i];
if (edges.is_empty()) {
edge_of_vertex[selection_i] = 0;
continue;
}
/* Retrieve the connected edge indices as 64 bit integers for #materialize_compressed. */
edge_indices.reinitialize(edges.size());