UI: Asset Shelf (Experimental Feature) #104831

Closed
Julian Eisel wants to merge 399 commits from asset-shelf into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
11 changed files with 59 additions and 73 deletions
Showing only changes of commit 9837c846f7 - Show all commits

View File

@ -109,9 +109,6 @@ class MESH_UL_vgroups(UIList):
layout.label(text="", icon_value=icon)
class MESH_UL_shape_keys(UIList):
def draw_item(self, _context, layout, _data, item, icon, active_data, _active_propname, index):
# assert(isinstance(item, bpy.types.ShapeKey))

View File

@ -733,7 +733,7 @@ enum {
typedef struct CustomDataTransferLayerMap {
struct CustomDataTransferLayerMap *next, *prev;
eCustomDataType data_type;
int data_type;
int mix_mode;
float mix_factor;
/** If non-NULL, array of weights, one for each dest item, replaces mix_factor. */

View File

@ -180,7 +180,6 @@ struct Mesh *BKE_mesh_new_nomain_from_curve_displist(const struct Object *ob,
bool BKE_mesh_attribute_required(const char *name);
float (*BKE_mesh_orco_verts_get(struct Object *ob))[3];
void BKE_mesh_orco_verts_transform(struct Mesh *me, float (*orco)[3], int totvert, int invert);

View File

@ -4865,7 +4865,7 @@ static void customdata_data_transfer_interp_generic(const CustomDataTransferLaye
* more than 0.5 of weight. */
int best_src_idx = 0;
const eCustomDataType data_type = laymap->data_type;
const int data_type = laymap->data_type;
const int mix_mode = laymap->mix_mode;
size_t data_size;
@ -4883,7 +4883,7 @@ static void customdata_data_transfer_interp_generic(const CustomDataTransferLaye
data_size = laymap->data_size;
}
else {
const LayerTypeInfo *type_info = layerType_getInfo(data_type);
const LayerTypeInfo *type_info = layerType_getInfo(eCustomDataType(data_type));
data_size = size_t(type_info->size);
interp_cd = type_info->interp;
@ -4952,7 +4952,7 @@ static void customdata_data_transfer_interp_generic(const CustomDataTransferLaye
}
}
else if (!(int(data_type) & CD_FAKE)) {
CustomData_data_mix_value(data_type, tmp_dst, data_dst, mix_mode, mix_factor);
CustomData_data_mix_value(eCustomDataType(data_type), tmp_dst, data_dst, mix_mode, mix_factor);
}
/* Else we can do nothing by default, needs custom interp func!
* Note this is here only for sake of consistency, not expected to be used much actually? */
@ -4975,7 +4975,8 @@ void customdata_data_transfer_interp_normal_normals(const CustomDataTransferLaye
BLI_assert(weights != nullptr);
BLI_assert(count > 0);
const eCustomDataType data_type = laymap->data_type;
const eCustomDataType data_type = eCustomDataType(laymap->data_type);
BLI_assert(data_type == CD_NORMAL);
const int mix_mode = laymap->mix_mode;
SpaceTransform *space_transform = static_cast<SpaceTransform *>(laymap->interp_data);
@ -4985,8 +4986,6 @@ void customdata_data_transfer_interp_normal_normals(const CustomDataTransferLaye
float tmp_dst[3];
BLI_assert(data_type == CD_NORMAL);
if (!sources) {
/* Not supported here, abort. */
return;
@ -5007,7 +5006,7 @@ void CustomData_data_transfer(const MeshPairRemap *me_remap,
MeshPairRemapItem *mapit = me_remap->items;
const int totelem = me_remap->items_num;
const eCustomDataType data_type = laymap->data_type;
const int data_type = laymap->data_type;
const void *data_src = laymap->data_src;
void *data_dst = laymap->data_dst;
@ -5036,7 +5035,7 @@ void CustomData_data_transfer(const MeshPairRemap *me_remap,
data_offset = laymap->data_offset;
}
else {
const LayerTypeInfo *type_info = layerType_getInfo(data_type);
const LayerTypeInfo *type_info = layerType_getInfo(eCustomDataType(data_type));
/* NOTE: we can use 'fake' CDLayers for crease :/. */
data_size = size_t(type_info->size);

View File

@ -628,7 +628,7 @@ void BKE_mesh_remap_calc_verts_from_mesh(const int mode,
if (mesh_remap_bvhtree_query_nearest(
&treedata, &nearest, tmp_co, max_dist_sq, &hit_dist)) {
const int poly_index = looptri_polys[rayhit.index];
const int poly_index = looptri_polys[nearest.index];
if (mode == MREMAP_MODE_VERT_POLY_NEAREST) {
int index;
@ -886,7 +886,7 @@ void BKE_mesh_remap_calc_edges_from_mesh(const int mode,
if (mesh_remap_bvhtree_query_nearest(&treedata, &nearest, tmp_co, max_dist_sq, &hit_dist))
{
const int poly_index = looptri_polys[rayhit.index];
const int poly_index = looptri_polys[nearest.index];
const blender::IndexRange poly_src = polys_src[poly_index];
const int *corner_edge_src = &corner_edges_src[poly_src.start()];
int nloops = int(poly_src.size());

View File

@ -5005,6 +5005,8 @@ static void particlesystem_modifiersForeachIDLink(void *user_data,
void BKE_particlesystem_id_loop(ParticleSystem *psys, ParticleSystemIDFunc func, void *userdata)
{
ParticleTarget *pt;
LibraryForeachIDData *foreachid_data = userdata;
const int foreachid_data_flags = BKE_lib_query_foreachid_process_flags_get(foreachid_data);
func(psys, (ID **)&psys->part, userdata, IDWALK_CB_USER | IDWALK_CB_NEVER_NULL);
func(psys, (ID **)&psys->target_ob, userdata, IDWALK_CB_NOP);
@ -5024,14 +5026,19 @@ void BKE_particlesystem_id_loop(ParticleSystem *psys, ParticleSystemIDFunc func,
func(psys, (ID **)&pt->ob, userdata, IDWALK_CB_NOP);
}
/* Even though psys->part should never be NULL, this can happen as an exception during deletion.
* See ID_REMAP_SKIP/FORCE/FLAG_NEVER_NULL_USAGE in BKE_library_remap. */
if (psys->part && psys->part->phystype == PART_PHYS_BOIDS) {
/* In case `psys->part` is NULL (See ID_REMAP_SKIP/FORCE/FLAG_NEVER_NULL_USAGE in
* #BKE_library_remap), or accessing it is forbidden, always handle particles for potential boids
* data. Unfortunate, but for now there is no other proper way to do this. */
if (!(psys->part && (foreachid_data_flags & IDWALK_NO_ORIG_POINTERS_ACCESS) == 0) ||
psys->part->phystype == PART_PHYS_BOIDS)
{
ParticleData *pa;
int p;
for (p = 0, pa = psys->particles; p < psys->totpart; p++, pa++) {
func(psys, (ID **)&pa->boid->ground, userdata, IDWALK_CB_NOP);
if (pa->boid != NULL) {
func(psys, (ID **)&pa->boid->ground, userdata, IDWALK_CB_NOP);
}
}
}
}

View File

@ -394,8 +394,7 @@ Schedule compute_schedule(const Context &context, const DerivedNodeTree &tree)
int insertion_position = 0;
for (int i = 0; i < sorted_dependency_nodes.size(); i++) {
if (needed_buffers.lookup(doutput.node()) >
needed_buffers.lookup(sorted_dependency_nodes[i]))
{
needed_buffers.lookup(sorted_dependency_nodes[i])) {
insertion_position++;
}
else {

View File

@ -10,7 +10,19 @@ GPU_SHADER_CREATE_INFO(compositor_read_pass_shared)
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.compute_source("compositor_read_pass.glsl");
GPU_SHADER_CREATE_INFO(compositor_read_pass)
GPU_SHADER_CREATE_INFO(compositor_read_pass_float)
.additional_info("compositor_read_pass_shared")
.image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.define("READ_EXPRESSION(pass_color)", "vec4(pass_color.r, vec3(0.0))")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_read_pass_vector)
.additional_info("compositor_read_pass_shared")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.define("READ_EXPRESSION(pass_color)", "pass_color")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_read_pass_color)
.additional_info("compositor_read_pass_shared")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.define("READ_EXPRESSION(pass_color)", "pass_color")

View File

@ -1056,7 +1056,6 @@ void ED_view3d_check_mats_rv3d(struct RegionView3D *rv3d);
struct RV3DMatrixStore *ED_view3d_mats_rv3d_backup(struct RegionView3D *rv3d);
void ED_view3d_mats_rv3d_restore(struct RegionView3D *rv3d, struct RV3DMatrixStore *rv3dmat);
struct RenderEngineType *ED_view3d_engine_type(const struct Scene *scene, int drawtype);
bool ED_view3d_context_activate(struct bContext *C);

View File

@ -226,8 +226,6 @@ void ED_operatortypes_object(void)
WM_operatortype_append(OBJECT_OT_vertex_weight_normalize_active_vertex);
WM_operatortype_append(OBJECT_OT_vertex_weight_copy);
WM_operatortype_append(TRANSFORM_OT_vertex_warp);
WM_operatortype_append(OBJECT_OT_move_to_collection);

View File

@ -825,36 +825,47 @@ class RenderLayerOperation : public NodeOperation {
void execute() override
{
const int view_layer = bnode().custom1;
GPUTexture *pass_texture = context().get_input_texture(view_layer, RE_PASSNAME_COMBINED);
GPUTexture *combined_texture = context().get_input_texture(view_layer, RE_PASSNAME_COMBINED);
execute_image(pass_texture);
execute_alpha(pass_texture);
execute_pass("Image", combined_texture, "compositor_read_pass_color");
execute_pass("Alpha", combined_texture, "compositor_read_pass_alpha");
/* Other output passes are not supported for now, so allocate them as invalid. */
for (const bNodeSocket *output : this->node()->output_sockets()) {
if (!STR_ELEM(output->identifier, "Image", "Alpha")) {
Result &unsupported_result = get_result(output->identifier);
if (unsupported_result.should_compute()) {
unsupported_result.allocate_invalid();
context().set_info_message("Viewport compositor setup not fully supported");
}
if (STR_ELEM(output->identifier, "Image", "Alpha")) {
continue;
}
GPUTexture *pass_texture = context().get_input_texture(view_layer, output->identifier);
if (output->type == SOCK_FLOAT) {
execute_pass(output->identifier, pass_texture, "compositor_read_pass_float");
}
else if (output->type == SOCK_VECTOR) {
execute_pass(output->identifier, pass_texture, "compositor_read_pass_vector");
}
else if (output->type == SOCK_RGBA) {
execute_pass(output->identifier, pass_texture, "compositor_read_pass_color");
}
else {
BLI_assert_unreachable();
}
}
}
void execute_image(GPUTexture *pass_texture)
void execute_pass(const char *pass_name, GPUTexture *pass_texture, const char *shader_name)
{
Result &image_result = get_result("Image");
Result &image_result = get_result(pass_name);
if (!image_result.should_compute()) {
return;
}
if (pass_texture == nullptr) {
/* Pass not rendered (yet). */
/* Pass not rendered yet, or not supported by viewport. */
image_result.allocate_invalid();
context().set_info_message("Viewport compositor setup not fully supported");
return;
}
GPUShader *shader = shader_manager().get("compositor_read_pass");
GPUShader *shader = shader_manager().get(shader_name);
GPU_shader_bind(shader);
/* The compositing space might be limited to a subset of the pass texture, so only read that
@ -876,41 +887,6 @@ class RenderLayerOperation : public NodeOperation {
GPU_texture_unbind(pass_texture);
image_result.unbind_as_image();
}
void execute_alpha(GPUTexture *pass_texture)
{
Result &alpha_result = get_result("Alpha");
if (!alpha_result.should_compute()) {
return;
}
if (pass_texture == nullptr) {
/* Pass not rendered (yet). */
alpha_result.allocate_invalid();
return;
}
GPUShader *shader = shader_manager().get("compositor_read_pass_alpha");
GPU_shader_bind(shader);
/* The compositing space might be limited to a subset of the pass texture, so only read that
* compositing region into an appropriately sized texture. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
const int input_unit = GPU_shader_get_sampler_binding(shader, "input_tx");
GPU_texture_bind(pass_texture, input_unit);
const int2 compositing_region_size = context().get_compositing_region_size();
alpha_result.allocate_texture(Domain(compositing_region_size));
alpha_result.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, compositing_region_size);
GPU_shader_unbind();
GPU_texture_unbind(pass_texture);
alpha_result.unbind_as_image();
}
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)