forked from blender/blender
Fix_105606_MetalTextureUploadRegression #1
@ -567,6 +567,10 @@ void MetalDevice::compile_and_load(int device_id, MetalPipelineType pso_type)
|
|||||||
thread_scoped_lock lock(existing_devices_mutex);
|
thread_scoped_lock lock(existing_devices_mutex);
|
||||||
if (MetalDevice *instance = get_device_by_ID(device_id, lock)) {
|
if (MetalDevice *instance = get_device_by_ID(device_id, lock)) {
|
||||||
if (mtlLibrary) {
|
if (mtlLibrary) {
|
||||||
|
if (error && [error localizedDescription]) {
|
||||||
|
VLOG_WARNING << "MSL compilation messages: " << [[error localizedDescription] UTF8String];
|
||||||
|
}
|
||||||
|
|
||||||
instance->mtlLibrary[pso_type] = mtlLibrary;
|
instance->mtlLibrary[pso_type] = mtlLibrary;
|
||||||
|
|
||||||
starttime = time_dt();
|
starttime = time_dt();
|
||||||
|
@ -715,7 +715,7 @@ void MetalKernelPipeline::compile()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
if (computePipelineStateDescriptor.linkedFunctions) {
|
if (linked_functions) {
|
||||||
addComputePipelineFunctionsWithDescriptor();
|
addComputePipelineFunctionsWithDescriptor();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -748,7 +748,7 @@ void MetalKernelPipeline::compile()
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Add pipeline into the new archive (unless we did it earlier). */
|
/* Add pipeline into the new archive (unless we did it earlier). */
|
||||||
if (pipeline && !computePipelineStateDescriptor.linkedFunctions) {
|
if (pipeline && !linked_functions) {
|
||||||
addComputePipelineFunctionsWithDescriptor();
|
addComputePipelineFunctionsWithDescriptor();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,7 +38,7 @@ ccl_device_inline void gpu_parallel_sort_bucket_pass(const uint num_states,
|
|||||||
ccl_gpu_shared int *buckets,
|
ccl_gpu_shared int *buckets,
|
||||||
const ushort local_id,
|
const ushort local_id,
|
||||||
const ushort local_size,
|
const ushort local_size,
|
||||||
const ushort grid_id)
|
const uint grid_id)
|
||||||
{
|
{
|
||||||
/* Zero the bucket sizes. */
|
/* Zero the bucket sizes. */
|
||||||
if (local_id < max_shaders) {
|
if (local_id < max_shaders) {
|
||||||
@ -89,7 +89,7 @@ ccl_device_inline void gpu_parallel_sort_write_pass(const uint num_states,
|
|||||||
ccl_gpu_shared int *local_offset,
|
ccl_gpu_shared int *local_offset,
|
||||||
const ushort local_id,
|
const ushort local_id,
|
||||||
const ushort local_size,
|
const ushort local_size,
|
||||||
const ushort grid_id)
|
const uint grid_id)
|
||||||
{
|
{
|
||||||
/* Calculate each partition's global offset from the prefix sum of the active state counts per
|
/* Calculate each partition's global offset from the prefix sum of the active state counts per
|
||||||
* partition. */
|
* partition. */
|
||||||
|
@ -109,7 +109,7 @@ struct kernel_gpu_##name \
|
|||||||
const uint metal_global_id, \
|
const uint metal_global_id, \
|
||||||
const ushort metal_local_id, \
|
const ushort metal_local_id, \
|
||||||
const ushort metal_local_size, \
|
const ushort metal_local_size, \
|
||||||
const ushort metal_grid_id, \
|
const uint metal_grid_id, \
|
||||||
uint simdgroup_size, \
|
uint simdgroup_size, \
|
||||||
uint simd_lane_index, \
|
uint simd_lane_index, \
|
||||||
uint simd_group_index, \
|
uint simd_group_index, \
|
||||||
@ -122,7 +122,7 @@ kernel void cycles_metal_##name(device const kernel_gpu_##name *params_struct, \
|
|||||||
const uint metal_global_id [[thread_position_in_grid]], \
|
const uint metal_global_id [[thread_position_in_grid]], \
|
||||||
const ushort metal_local_id [[thread_position_in_threadgroup]], \
|
const ushort metal_local_id [[thread_position_in_threadgroup]], \
|
||||||
const ushort metal_local_size [[threads_per_threadgroup]], \
|
const ushort metal_local_size [[threads_per_threadgroup]], \
|
||||||
const ushort metal_grid_id [[threadgroup_position_in_grid]], \
|
const uint metal_grid_id [[threadgroup_position_in_grid]], \
|
||||||
uint simdgroup_size [[threads_per_simdgroup]], \
|
uint simdgroup_size [[threads_per_simdgroup]], \
|
||||||
uint simd_lane_index [[thread_index_in_simdgroup]], \
|
uint simd_lane_index [[thread_index_in_simdgroup]], \
|
||||||
uint simd_group_index [[simdgroup_index_in_threadgroup]], \
|
uint simd_group_index [[simdgroup_index_in_threadgroup]], \
|
||||||
@ -135,7 +135,7 @@ void kernel_gpu_##name::run(thread MetalKernelContext& context, \
|
|||||||
const uint metal_global_id, \
|
const uint metal_global_id, \
|
||||||
const ushort metal_local_id, \
|
const ushort metal_local_id, \
|
||||||
const ushort metal_local_size, \
|
const ushort metal_local_size, \
|
||||||
const ushort metal_grid_id, \
|
const uint metal_grid_id, \
|
||||||
uint simdgroup_size, \
|
uint simdgroup_size, \
|
||||||
uint simd_lane_index, \
|
uint simd_lane_index, \
|
||||||
uint simd_group_index, \
|
uint simd_group_index, \
|
||||||
|
@ -149,7 +149,7 @@ ccl_device_inline void integrate_distant_lights(KernelGlobals kg,
|
|||||||
((ls.shader & SHADER_EXCLUDE_TRANSMIT) && (path_flag & PATH_RAY_TRANSMIT)) ||
|
((ls.shader & SHADER_EXCLUDE_TRANSMIT) && (path_flag & PATH_RAY_TRANSMIT)) ||
|
||||||
((ls.shader & SHADER_EXCLUDE_CAMERA) && (path_flag & PATH_RAY_CAMERA)) ||
|
((ls.shader & SHADER_EXCLUDE_CAMERA) && (path_flag & PATH_RAY_CAMERA)) ||
|
||||||
((ls.shader & SHADER_EXCLUDE_SCATTER) && (path_flag & PATH_RAY_VOLUME_SCATTER)))
|
((ls.shader & SHADER_EXCLUDE_SCATTER) && (path_flag & PATH_RAY_VOLUME_SCATTER)))
|
||||||
return;
|
continue;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -159,7 +159,7 @@ ccl_device_inline void integrate_distant_lights(KernelGlobals kg,
|
|||||||
* generate a firefly for small lights since it is improbable. */
|
* generate a firefly for small lights since it is improbable. */
|
||||||
const ccl_global KernelLight *klight = &kernel_data_fetch(lights, lamp);
|
const ccl_global KernelLight *klight = &kernel_data_fetch(lights, lamp);
|
||||||
if (klight->use_caustics)
|
if (klight->use_caustics)
|
||||||
return;
|
continue;
|
||||||
}
|
}
|
||||||
#endif /* __MNEE__ */
|
#endif /* __MNEE__ */
|
||||||
|
|
||||||
@ -169,7 +169,7 @@ ccl_device_inline void integrate_distant_lights(KernelGlobals kg,
|
|||||||
ccl_private ShaderData *emission_sd = AS_SHADER_DATA(&emission_sd_storage);
|
ccl_private ShaderData *emission_sd = AS_SHADER_DATA(&emission_sd_storage);
|
||||||
Spectrum light_eval = light_sample_shader_eval(kg, state, emission_sd, &ls, ray_time);
|
Spectrum light_eval = light_sample_shader_eval(kg, state, emission_sd, &ls, ray_time);
|
||||||
if (is_zero(light_eval)) {
|
if (is_zero(light_eval)) {
|
||||||
return;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* MIS weighting. */
|
/* MIS weighting. */
|
||||||
|
@ -252,7 +252,7 @@ class GRAPH_MT_key(Menu):
|
|||||||
|
|
||||||
layout.separator()
|
layout.separator()
|
||||||
layout.operator_menu_enum("graph.keyframe_insert", "type")
|
layout.operator_menu_enum("graph.keyframe_insert", "type")
|
||||||
layout.operator_menu_enum("graph.fmodifier_add", "type")
|
layout.operator_menu_enum("graph.fmodifier_add", "type").only_active = False
|
||||||
layout.operator("graph.sound_bake")
|
layout.operator("graph.sound_bake")
|
||||||
|
|
||||||
layout.separator()
|
layout.separator()
|
||||||
|
@ -378,6 +378,8 @@ typedef struct BMesh {
|
|||||||
* This allows save invalidation of a #BMesh when it's freed,
|
* This allows save invalidation of a #BMesh when it's freed,
|
||||||
* so the Python object will report it as having been removed,
|
* so the Python object will report it as having been removed,
|
||||||
* instead of crashing on invalid memory access.
|
* instead of crashing on invalid memory access.
|
||||||
|
*
|
||||||
|
* Doesn't hold a #PyObject reference, cleared when the last object is de-referenced.
|
||||||
*/
|
*/
|
||||||
void *py_handle;
|
void *py_handle;
|
||||||
} BMesh;
|
} BMesh;
|
||||||
|
@ -8,7 +8,7 @@ void node_composite_luminance_matte(vec4 color,
|
|||||||
out float matte)
|
out float matte)
|
||||||
{
|
{
|
||||||
float luminance = get_luminance(color.rgb, luminance_coefficients);
|
float luminance = get_luminance(color.rgb, luminance_coefficients);
|
||||||
float alpha = clamp(0.0, 1.0, (luminance - low) / (high - low));
|
float alpha = clamp((luminance - low) / (high - low), 0.0, 1.0);
|
||||||
matte = min(alpha, color.a);
|
matte = min(alpha, color.a);
|
||||||
result = color * matte;
|
result = color * matte;
|
||||||
}
|
}
|
||||||
|
@ -101,6 +101,7 @@ SSR_INTERFACE
|
|||||||
# if defined(USE_BARYCENTRICS) && !defined(HAIR_SHADER)
|
# if defined(USE_BARYCENTRICS) && !defined(HAIR_SHADER)
|
||||||
vec3 barycentric_distances_get()
|
vec3 barycentric_distances_get()
|
||||||
{
|
{
|
||||||
|
# if defined(GPU_OPENGL)
|
||||||
/* NOTE: No need to undo perspective divide since it is not applied yet. */
|
/* NOTE: No need to undo perspective divide since it is not applied yet. */
|
||||||
vec3 pos0 = (ProjectionMatrixInverse * gpu_position_at_vertex(0)).xyz;
|
vec3 pos0 = (ProjectionMatrixInverse * gpu_position_at_vertex(0)).xyz;
|
||||||
vec3 pos1 = (ProjectionMatrixInverse * gpu_position_at_vertex(1)).xyz;
|
vec3 pos1 = (ProjectionMatrixInverse * gpu_position_at_vertex(1)).xyz;
|
||||||
@ -119,6 +120,17 @@ vec3 barycentric_distances_get()
|
|||||||
d = dot(d10, edge21);
|
d = dot(d10, edge21);
|
||||||
dists.z = sqrt(dot(edge21, edge21) - d * d);
|
dists.z = sqrt(dot(edge21, edge21) - d * d);
|
||||||
return dists.xyz;
|
return dists.xyz;
|
||||||
|
# elif defined(GPU_METAL)
|
||||||
|
/* Calculate Barycentric distances from available parameters in Metal. */
|
||||||
|
float3 wp_delta = (length(dfdx(worldPosition.xyz)) + length(dfdy(worldPosition.xyz)));
|
||||||
|
float3 bc_delta = (length(dfdx(gpu_BaryCoord)) + length(dfdy(gpu_BaryCoord)));
|
||||||
|
float3 rate_of_change = wp_delta / bc_delta;
|
||||||
|
vec3 dists;
|
||||||
|
dists.x = length(rate_of_change * (1.0 - gpu_BaryCoord.x));
|
||||||
|
dists.y = length(rate_of_change * (1.0 - gpu_BaryCoord.y));
|
||||||
|
dists.z = length(rate_of_change * (1.0 - gpu_BaryCoord.z));
|
||||||
|
return dists.xyz;
|
||||||
|
# endif
|
||||||
}
|
}
|
||||||
# endif
|
# endif
|
||||||
|
|
||||||
|
@ -1715,6 +1715,10 @@ static int gpencil_strokes_paste_exec(bContext *C, wmOperator *op)
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
for (bGPDframe *gpf = init_gpf; gpf; gpf = gpf->next) {
|
for (bGPDframe *gpf = init_gpf; gpf; gpf = gpf->next) {
|
||||||
|
/* Active frame is copied later, so don't need duplicate the stroke here. */
|
||||||
|
if (gpl->actframe == gpf) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
if (gpf->flag & GP_FRAME_SELECT) {
|
if (gpf->flag & GP_FRAME_SELECT) {
|
||||||
if (gpf) {
|
if (gpf) {
|
||||||
/* Create new stroke */
|
/* Create new stroke */
|
||||||
|
@ -1411,7 +1411,8 @@ static float gpencil_sculpt_rotation_eval_get(tGP_BrushEditData *gso,
|
|||||||
|
|
||||||
const GP_SpaceConversion *gsc = &gso->gsc;
|
const GP_SpaceConversion *gsc = &gso->gsc;
|
||||||
bGPDstroke *gps_orig = (gps_eval->runtime.gps_orig) ? gps_eval->runtime.gps_orig : gps_eval;
|
bGPDstroke *gps_orig = (gps_eval->runtime.gps_orig) ? gps_eval->runtime.gps_orig : gps_eval;
|
||||||
bGPDspoint *pt_orig = &gps_orig->points[pt_eval->runtime.idx_orig];
|
bGPDspoint *pt_orig = (pt_eval->runtime.pt_orig) ? &gps_orig->points[pt_eval->runtime.idx_orig] :
|
||||||
|
pt_eval;
|
||||||
bGPDspoint *pt_prev_eval = NULL;
|
bGPDspoint *pt_prev_eval = NULL;
|
||||||
bGPDspoint *pt_orig_prev = NULL;
|
bGPDspoint *pt_orig_prev = NULL;
|
||||||
if (idx_eval != 0) {
|
if (idx_eval != 0) {
|
||||||
|
@ -2039,24 +2039,16 @@ static bool gpencil_generic_stroke_select(bContext *C,
|
|||||||
/* init space conversion stuff */
|
/* init space conversion stuff */
|
||||||
gpencil_point_conversion_init(C, &gsc);
|
gpencil_point_conversion_init(C, &gsc);
|
||||||
|
|
||||||
|
/* Use only object transform matrix because all layer transformations are already included
|
||||||
|
* in the evaluated stroke. */
|
||||||
|
Depsgraph *depsgraph = CTX_data_ensure_evaluated_depsgraph(C);
|
||||||
|
Object *ob_eval = depsgraph != NULL ? DEG_get_evaluated_object(depsgraph, ob) : ob;
|
||||||
|
float select_mat[4][4];
|
||||||
|
copy_m4_m4(select_mat, ob_eval->object_to_world);
|
||||||
|
|
||||||
/* deselect all strokes first? */
|
/* deselect all strokes first? */
|
||||||
if (SEL_OP_USE_PRE_DESELECT(sel_op) || GPENCIL_PAINT_MODE(gpd)) {
|
if (SEL_OP_USE_PRE_DESELECT(sel_op)) {
|
||||||
/* Set selection index to 0. */
|
deselect_all_selected(C);
|
||||||
gpd->select_last_index = 0;
|
|
||||||
|
|
||||||
CTX_DATA_BEGIN (C, bGPDstroke *, gps, editable_gpencil_strokes) {
|
|
||||||
bGPDspoint *pt;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0, pt = gps->points; i < gps->totpoints; i++, pt++) {
|
|
||||||
pt->flag &= ~GP_SPOINT_SELECT;
|
|
||||||
}
|
|
||||||
|
|
||||||
gps->flag &= ~GP_STROKE_SELECT;
|
|
||||||
BKE_gpencil_stroke_select_index_reset(gps);
|
|
||||||
}
|
|
||||||
CTX_DATA_END;
|
|
||||||
|
|
||||||
changed = true;
|
changed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2071,9 +2063,9 @@ static bool gpencil_generic_stroke_select(bContext *C,
|
|||||||
for (i = 0, pt = gps->points; i < gps->totpoints; i++, pt++) {
|
for (i = 0, pt = gps->points; i < gps->totpoints; i++, pt++) {
|
||||||
bGPDspoint *pt_active = (pt->runtime.pt_orig) ? pt->runtime.pt_orig : pt;
|
bGPDspoint *pt_active = (pt->runtime.pt_orig) ? pt->runtime.pt_orig : pt;
|
||||||
|
|
||||||
/* Convert point coords to screen-space. */
|
/* Convert point coords to screen-space. Needs to use the evaluated point
|
||||||
const bool is_inside = is_inside_fn(
|
* to consider modifiers. */
|
||||||
gsc.region, gpstroke_iter.diff_mat, &pt_active->x, user_data);
|
const bool is_inside = is_inside_fn(gsc.region, select_mat, &pt->x, user_data);
|
||||||
if (strokemode == false) {
|
if (strokemode == false) {
|
||||||
const bool is_select = (pt_active->flag & GP_SPOINT_SELECT) != 0;
|
const bool is_select = (pt_active->flag & GP_SPOINT_SELECT) != 0;
|
||||||
const int sel_op_result = ED_select_op_action_deselected(sel_op, is_select, is_inside);
|
const int sel_op_result = ED_select_op_action_deselected(sel_op, is_select, is_inside);
|
||||||
|
@ -147,6 +147,18 @@ void immDrawPixelsTexTiled_scaling_clipping(IMMDrawPixelsTexState *state,
|
|||||||
const float color[4])
|
const float color[4])
|
||||||
{
|
{
|
||||||
int subpart_x, subpart_y, tex_w = 256, tex_h = 256;
|
int subpart_x, subpart_y, tex_w = 256, tex_h = 256;
|
||||||
|
#ifdef __APPLE__
|
||||||
|
if (GPU_backend_get_type() == GPU_BACKEND_METAL) {
|
||||||
|
/* NOTE(Metal): The Metal backend will keep all temporary texture memory within a command
|
||||||
|
* submission in-flight, so using a partial tile size does not provide any tangible memory
|
||||||
|
* reduction, but does incur additional API overhead and significant cache inefficiency on AMD
|
||||||
|
* platforms.
|
||||||
|
* The Metal API also provides smart resource paging such that the application can
|
||||||
|
* still efficiently swap memory, even if system is low in physical memory. */
|
||||||
|
tex_w = img_w;
|
||||||
|
tex_h = img_h;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
int seamless, offset_x, offset_y, nsubparts_x, nsubparts_y;
|
int seamless, offset_x, offset_y, nsubparts_x, nsubparts_y;
|
||||||
int components;
|
int components;
|
||||||
const bool use_clipping = ((clip_min_x < clip_max_x) && (clip_min_y < clip_max_y));
|
const bool use_clipping = ((clip_min_x < clip_max_x) && (clip_min_y < clip_max_y));
|
||||||
|
@ -3882,6 +3882,16 @@ static void filelist_readjob_all_asset_library(FileListReadJob *job_params,
|
|||||||
/* A valid, but empty file-list from now. */
|
/* A valid, but empty file-list from now. */
|
||||||
filelist->filelist.entries_num = 0;
|
filelist->filelist.entries_num = 0;
|
||||||
|
|
||||||
|
asset_system::AssetLibrary *current_file_library;
|
||||||
|
{
|
||||||
|
AssetLibraryReference library_ref{};
|
||||||
|
library_ref.custom_library_index = -1;
|
||||||
|
library_ref.type = ASSET_LIBRARY_LOCAL;
|
||||||
|
|
||||||
|
current_file_library = AS_asset_library_load(job_params->current_main, library_ref);
|
||||||
|
}
|
||||||
|
|
||||||
|
job_params->load_asset_library = current_file_library;
|
||||||
filelist_readjob_main_assets_add_items(job_params, stop, do_update, progress);
|
filelist_readjob_main_assets_add_items(job_params, stop, do_update, progress);
|
||||||
|
|
||||||
/* When only doing partially reload for main data, we're done. */
|
/* When only doing partially reload for main data, we're done. */
|
||||||
@ -3904,6 +3914,10 @@ static void filelist_readjob_all_asset_library(FileListReadJob *job_params,
|
|||||||
if (root_path.is_empty()) {
|
if (root_path.is_empty()) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (&nested_library == current_file_library) {
|
||||||
|
/* Skip the "Current File" library, it's already loaded above. */
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
/* Override library info to read this library. */
|
/* Override library info to read this library. */
|
||||||
job_params->load_asset_library = &nested_library;
|
job_params->load_asset_library = &nested_library;
|
||||||
|
@ -2843,7 +2843,7 @@ void GRAPH_OT_fmodifier_add(wmOperatorType *ot)
|
|||||||
ot->prop = prop;
|
ot->prop = prop;
|
||||||
|
|
||||||
RNA_def_boolean(
|
RNA_def_boolean(
|
||||||
ot->srna, "only_active", 1, "Only Active", "Only add F-Modifier to active F-Curve");
|
ot->srna, "only_active", false, "Only Active", "Only add F-Modifier to active F-Curve");
|
||||||
}
|
}
|
||||||
|
|
||||||
/** \} */
|
/** \} */
|
||||||
|
@ -3200,7 +3200,7 @@ static eSnapMode transform_snap_context_project_view3d_mixed_impl(SnapObjectCont
|
|||||||
Depsgraph *depsgraph,
|
Depsgraph *depsgraph,
|
||||||
const ARegion *region,
|
const ARegion *region,
|
||||||
const View3D *v3d,
|
const View3D *v3d,
|
||||||
const eSnapMode snap_to_flag,
|
eSnapMode snap_to_flag,
|
||||||
const SnapObjectParams *params,
|
const SnapObjectParams *params,
|
||||||
const float init_co[3],
|
const float init_co[3],
|
||||||
const float mval[2],
|
const float mval[2],
|
||||||
@ -3235,7 +3235,16 @@ static eSnapMode transform_snap_context_project_view3d_mixed_impl(SnapObjectCont
|
|||||||
|
|
||||||
const RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
|
const RegionView3D *rv3d = static_cast<RegionView3D *>(region->regiondata);
|
||||||
|
|
||||||
bool use_occlusion_test = params->use_occlusion_test && !XRAY_ENABLED(v3d);
|
if (snap_to_flag & (SCE_SNAP_MODE_FACE_RAYCAST | SCE_SNAP_MODE_FACE_NEAREST)) {
|
||||||
|
if (params->use_occlusion_test && XRAY_ENABLED(v3d)) {
|
||||||
|
/* Remove Snap to Face with Occlusion Test as they are not visible in wireframe mode. */
|
||||||
|
snap_to_flag &= ~(SCE_SNAP_MODE_FACE_RAYCAST | SCE_SNAP_MODE_FACE_NEAREST);
|
||||||
|
}
|
||||||
|
else if (prev_co == nullptr || init_co == nullptr) {
|
||||||
|
/* No location to work with #SCE_SNAP_MODE_FACE_NEAREST. */
|
||||||
|
snap_to_flag &= ~SCE_SNAP_MODE_FACE_NEAREST;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/* NOTE: if both face ray-cast and face nearest are enabled, first find result of nearest, then
|
/* NOTE: if both face ray-cast and face nearest are enabled, first find result of nearest, then
|
||||||
* override with ray-cast. */
|
* override with ray-cast. */
|
||||||
@ -3261,7 +3270,7 @@ static eSnapMode transform_snap_context_project_view3d_mixed_impl(SnapObjectCont
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (snap_to_flag & SCE_SNAP_MODE_FACE_RAYCAST || use_occlusion_test) {
|
if (snap_to_flag & SCE_SNAP_MODE_FACE_RAYCAST) {
|
||||||
float ray_start[3], ray_normal[3];
|
float ray_start[3], ray_normal[3];
|
||||||
if (!ED_view3d_win_to_ray_clipped_ex(
|
if (!ED_view3d_win_to_ray_clipped_ex(
|
||||||
depsgraph, region, v3d, mval, nullptr, ray_normal, ray_start, true)) {
|
depsgraph, region, v3d, mval, nullptr, ray_normal, ray_start, true)) {
|
||||||
|
@ -1268,6 +1268,11 @@ bool MTLShader::generate_msl_from_glsl(const shader::ShaderCreateInfo *info)
|
|||||||
ss_fragment << "uint gl_PrimitiveID;" << std::endl;
|
ss_fragment << "uint gl_PrimitiveID;" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Global barycentrics. */
|
||||||
|
if (msl_iface.uses_barycentrics) {
|
||||||
|
ss_fragment << "vec3 gpu_BaryCoord;\n";
|
||||||
|
}
|
||||||
|
|
||||||
/* Add Texture members. */
|
/* Add Texture members. */
|
||||||
for (const MSLTextureSampler &tex : msl_iface.texture_samplers) {
|
for (const MSLTextureSampler &tex : msl_iface.texture_samplers) {
|
||||||
if (bool(tex.stage & ShaderStage::FRAGMENT)) {
|
if (bool(tex.stage & ShaderStage::FRAGMENT)) {
|
||||||
@ -2036,33 +2041,7 @@ std::string MSLGeneratorInterface::generate_msl_fragment_entry_stub()
|
|||||||
|
|
||||||
/* Barycentrics. */
|
/* Barycentrics. */
|
||||||
if (this->uses_barycentrics) {
|
if (this->uses_barycentrics) {
|
||||||
|
|
||||||
/* Main barycentrics. */
|
|
||||||
out << shader_stage_inst_name << ".gpu_BaryCoord = mtl_barycentric_coord.xyz;" << std::endl;
|
out << shader_stage_inst_name << ".gpu_BaryCoord = mtl_barycentric_coord.xyz;" << std::endl;
|
||||||
|
|
||||||
/* barycentricDist represents the world-space distance from the current world-space position
|
|
||||||
* to the opposite edge of the vertex. */
|
|
||||||
out << "float3 worldPos = " << shader_stage_inst_name << ".worldPosition.xyz;" << std::endl;
|
|
||||||
out << "float3 wpChange = (length(dfdx(worldPos))+length(dfdy(worldPos)));" << std::endl;
|
|
||||||
out << "float3 bcChange = "
|
|
||||||
"(length(dfdx(mtl_barycentric_coord))+length(dfdy(mtl_barycentric_coord)));"
|
|
||||||
<< std::endl;
|
|
||||||
out << "float3 rateOfChange = wpChange/bcChange;" << std::endl;
|
|
||||||
|
|
||||||
/* Distance to edge using inverse barycentric value, as rather than the length of 0.7
|
|
||||||
* contribution, we'd want the distance to the opposite side. */
|
|
||||||
out << shader_stage_inst_name
|
|
||||||
<< ".gpu_BarycentricDist.x = length(rateOfChange * "
|
|
||||||
"(1.0-mtl_barycentric_coord.x));"
|
|
||||||
<< std::endl;
|
|
||||||
out << shader_stage_inst_name
|
|
||||||
<< ".gpu_BarycentricDist.y = length(rateOfChange * "
|
|
||||||
"(1.0-mtl_barycentric_coord.y));"
|
|
||||||
<< std::endl;
|
|
||||||
out << shader_stage_inst_name
|
|
||||||
<< ".gpu_BarycentricDist.z = length(rateOfChange * "
|
|
||||||
"(1.0-mtl_barycentric_coord.z));"
|
|
||||||
<< std::endl;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Populate Uniforms and uniform blocks. */
|
/* Populate Uniforms and uniform blocks. */
|
||||||
|
@ -594,17 +594,6 @@ void gpu::MTLTexture::update_sub(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Prepare staging buffer for data. */
|
|
||||||
id<MTLBuffer> staging_buffer = nil;
|
|
||||||
uint64_t staging_buffer_offset = 0;
|
|
||||||
|
|
||||||
/* Fetch allocation from scratch buffer. */
|
|
||||||
MTLTemporaryBuffer allocation =
|
|
||||||
ctx->get_scratchbuffer_manager().scratch_buffer_allocate_range_aligned(totalsize, 256);
|
|
||||||
memcpy(allocation.data, data, totalsize);
|
|
||||||
staging_buffer = allocation.metal_buffer;
|
|
||||||
staging_buffer_offset = allocation.buffer_offset;
|
|
||||||
|
|
||||||
/* Common Properties. */
|
/* Common Properties. */
|
||||||
MTLPixelFormat compatible_write_format = mtl_format_get_writeable_view_format(
|
MTLPixelFormat compatible_write_format = mtl_format_get_writeable_view_format(
|
||||||
destination_format);
|
destination_format);
|
||||||
@ -616,6 +605,12 @@ void gpu::MTLTexture::update_sub(
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Fetch allocation from memory pool. */
|
||||||
|
MTLBuffer *temp_allocation = MTLContext::get_global_memory_manager()->allocate_with_data(
|
||||||
|
totalsize, true, data);
|
||||||
|
id<MTLBuffer> staging_buffer = temp_allocation->get_metal_buffer();
|
||||||
|
BLI_assert(staging_buffer != nil);
|
||||||
|
|
||||||
/* Prepare command encoders. */
|
/* Prepare command encoders. */
|
||||||
id<MTLBlitCommandEncoder> blit_encoder = nil;
|
id<MTLBlitCommandEncoder> blit_encoder = nil;
|
||||||
id<MTLComputeCommandEncoder> compute_encoder = nil;
|
id<MTLComputeCommandEncoder> compute_encoder = nil;
|
||||||
@ -697,7 +692,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
int max_array_index = ((type_ == GPU_TEXTURE_1D_ARRAY) ? extent[1] : 1);
|
int max_array_index = ((type_ == GPU_TEXTURE_1D_ARRAY) ? extent[1] : 1);
|
||||||
for (int array_index = 0; array_index < max_array_index; array_index++) {
|
for (int array_index = 0; array_index < max_array_index; array_index++) {
|
||||||
|
|
||||||
int buffer_array_offset = staging_buffer_offset + (bytes_per_image * array_index);
|
int buffer_array_offset = (bytes_per_image * array_index);
|
||||||
[blit_encoder
|
[blit_encoder
|
||||||
copyFromBuffer:staging_buffer
|
copyFromBuffer:staging_buffer
|
||||||
sourceOffset:buffer_array_offset
|
sourceOffset:buffer_array_offset
|
||||||
@ -727,7 +722,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
||||||
cs.bind_pso(pso);
|
cs.bind_pso(pso);
|
||||||
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
||||||
cs.bind_compute_buffer(staging_buffer, staging_buffer_offset, 1);
|
cs.bind_compute_buffer(staging_buffer, 0, 1);
|
||||||
cs.bind_compute_texture(texture_handle, 0);
|
cs.bind_compute_texture(texture_handle, 0);
|
||||||
[compute_encoder
|
[compute_encoder
|
||||||
dispatchThreads:MTLSizeMake(extent[0], 1, 1) /* Width, Height, Layer */
|
dispatchThreads:MTLSizeMake(extent[0], 1, 1) /* Width, Height, Layer */
|
||||||
@ -747,7 +742,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
||||||
cs.bind_pso(pso);
|
cs.bind_pso(pso);
|
||||||
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
||||||
cs.bind_compute_buffer(staging_buffer, staging_buffer_offset, 1);
|
cs.bind_compute_buffer(staging_buffer, 0, 1);
|
||||||
cs.bind_compute_texture(texture_handle, 0);
|
cs.bind_compute_texture(texture_handle, 0);
|
||||||
[compute_encoder
|
[compute_encoder
|
||||||
dispatchThreads:MTLSizeMake(extent[0], extent[1], 1) /* Width, layers, nil */
|
dispatchThreads:MTLSizeMake(extent[0], extent[1], 1) /* Width, layers, nil */
|
||||||
@ -779,7 +774,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
}
|
}
|
||||||
|
|
||||||
[blit_encoder copyFromBuffer:staging_buffer
|
[blit_encoder copyFromBuffer:staging_buffer
|
||||||
sourceOffset:staging_buffer_offset + texture_array_relative_offset
|
sourceOffset:texture_array_relative_offset
|
||||||
sourceBytesPerRow:bytes_per_row
|
sourceBytesPerRow:bytes_per_row
|
||||||
sourceBytesPerImage:bytes_per_image
|
sourceBytesPerImage:bytes_per_image
|
||||||
sourceSize:MTLSizeMake(extent[0], extent[1], 1)
|
sourceSize:MTLSizeMake(extent[0], extent[1], 1)
|
||||||
@ -807,7 +802,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
||||||
cs.bind_pso(pso);
|
cs.bind_pso(pso);
|
||||||
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
||||||
cs.bind_compute_buffer(staging_buffer, staging_buffer_offset, 1);
|
cs.bind_compute_buffer(staging_buffer, 0, 1);
|
||||||
cs.bind_compute_texture(texture_handle, 0);
|
cs.bind_compute_texture(texture_handle, 0);
|
||||||
[compute_encoder
|
[compute_encoder
|
||||||
dispatchThreads:MTLSizeMake(
|
dispatchThreads:MTLSizeMake(
|
||||||
@ -828,7 +823,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
||||||
cs.bind_pso(pso);
|
cs.bind_pso(pso);
|
||||||
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
||||||
cs.bind_compute_buffer(staging_buffer, staging_buffer_offset, 1);
|
cs.bind_compute_buffer(staging_buffer, 0, 1);
|
||||||
cs.bind_compute_texture(texture_handle, 0);
|
cs.bind_compute_texture(texture_handle, 0);
|
||||||
[compute_encoder dispatchThreads:MTLSizeMake(extent[0],
|
[compute_encoder dispatchThreads:MTLSizeMake(extent[0],
|
||||||
extent[1],
|
extent[1],
|
||||||
@ -848,7 +843,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
ctx->pipeline_state.unpack_row_length);
|
ctx->pipeline_state.unpack_row_length);
|
||||||
int bytes_per_image = bytes_per_row * extent[1];
|
int bytes_per_image = bytes_per_row * extent[1];
|
||||||
[blit_encoder copyFromBuffer:staging_buffer
|
[blit_encoder copyFromBuffer:staging_buffer
|
||||||
sourceOffset:staging_buffer_offset
|
sourceOffset:0
|
||||||
sourceBytesPerRow:bytes_per_row
|
sourceBytesPerRow:bytes_per_row
|
||||||
sourceBytesPerImage:bytes_per_image
|
sourceBytesPerImage:bytes_per_image
|
||||||
sourceSize:MTLSizeMake(extent[0], extent[1], extent[2])
|
sourceSize:MTLSizeMake(extent[0], extent[1], extent[2])
|
||||||
@ -871,7 +866,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
MTLComputeState &cs = ctx->main_command_buffer.get_compute_state();
|
||||||
cs.bind_pso(pso);
|
cs.bind_pso(pso);
|
||||||
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
cs.bind_compute_bytes(¶ms, sizeof(params), 0);
|
||||||
cs.bind_compute_buffer(staging_buffer, staging_buffer_offset, 1);
|
cs.bind_compute_buffer(staging_buffer, 0, 1);
|
||||||
cs.bind_compute_texture(texture_handle, 0);
|
cs.bind_compute_texture(texture_handle, 0);
|
||||||
[compute_encoder
|
[compute_encoder
|
||||||
dispatchThreads:MTLSizeMake(
|
dispatchThreads:MTLSizeMake(
|
||||||
@ -896,7 +891,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
int face_index = offset[2] + i;
|
int face_index = offset[2] + i;
|
||||||
|
|
||||||
[blit_encoder copyFromBuffer:staging_buffer
|
[blit_encoder copyFromBuffer:staging_buffer
|
||||||
sourceOffset:staging_buffer_offset + texture_array_relative_offset
|
sourceOffset:texture_array_relative_offset
|
||||||
sourceBytesPerRow:bytes_per_row
|
sourceBytesPerRow:bytes_per_row
|
||||||
sourceBytesPerImage:bytes_per_image
|
sourceBytesPerImage:bytes_per_image
|
||||||
sourceSize:MTLSizeMake(extent[0], extent[1], 1)
|
sourceSize:MTLSizeMake(extent[0], extent[1], 1)
|
||||||
@ -930,7 +925,7 @@ void gpu::MTLTexture::update_sub(
|
|||||||
for (int i = 0; i < extent[2]; i++) {
|
for (int i = 0; i < extent[2]; i++) {
|
||||||
int face_index = offset[2] + i;
|
int face_index = offset[2] + i;
|
||||||
[blit_encoder copyFromBuffer:staging_buffer
|
[blit_encoder copyFromBuffer:staging_buffer
|
||||||
sourceOffset:staging_buffer_offset + texture_array_relative_offset
|
sourceOffset:texture_array_relative_offset
|
||||||
sourceBytesPerRow:bytes_per_row
|
sourceBytesPerRow:bytes_per_row
|
||||||
sourceBytesPerImage:bytes_per_image
|
sourceBytesPerImage:bytes_per_image
|
||||||
sourceSize:MTLSizeMake(extent[0], extent[1], 1)
|
sourceSize:MTLSizeMake(extent[0], extent[1], 1)
|
||||||
@ -1058,6 +1053,11 @@ void gpu::MTLTexture::update_sub(
|
|||||||
|
|
||||||
/* Decrement texture reference counts. This ensures temporary texture views are released. */
|
/* Decrement texture reference counts. This ensures temporary texture views are released. */
|
||||||
[texture_handle release];
|
[texture_handle release];
|
||||||
|
|
||||||
|
/* Release temporary staging buffer allocation.
|
||||||
|
* NOTE: Allocation will be tracked with command submission and released once no longer in use.
|
||||||
|
*/
|
||||||
|
temp_allocation->free();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,9 +360,16 @@ static std::ostream &print_qualifier(std::ostream &os, const Qualifier &qualifie
|
|||||||
return os;
|
return os;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void print_resource(std::ostream &os, const ShaderCreateInfo::Resource &res)
|
static void print_resource(std::ostream &os,
|
||||||
|
const ShaderCreateInfo::Resource &res,
|
||||||
|
bool auto_resource_location)
|
||||||
{
|
{
|
||||||
if (GLContext::explicit_location_support) {
|
if (auto_resource_location && res.bind_type == ShaderCreateInfo::Resource::BindType::SAMPLER) {
|
||||||
|
/* Skip explicit binding location for samplers when not needed, since drivers can usually
|
||||||
|
* handle more sampler declarations this way (as long as they're not actually used by the
|
||||||
|
* shader). See #105661. */
|
||||||
|
}
|
||||||
|
else if (GLContext::explicit_location_support) {
|
||||||
os << "layout(binding = " << res.slot;
|
os << "layout(binding = " << res.slot;
|
||||||
if (res.bind_type == ShaderCreateInfo::Resource::BindType::IMAGE) {
|
if (res.bind_type == ShaderCreateInfo::Resource::BindType::IMAGE) {
|
||||||
os << ", " << to_string(res.image.format);
|
os << ", " << to_string(res.image.format);
|
||||||
@ -466,14 +473,14 @@ std::string GLShader::resources_declare(const ShaderCreateInfo &info) const
|
|||||||
|
|
||||||
ss << "\n/* Pass Resources. */\n";
|
ss << "\n/* Pass Resources. */\n";
|
||||||
for (const ShaderCreateInfo::Resource &res : info.pass_resources_) {
|
for (const ShaderCreateInfo::Resource &res : info.pass_resources_) {
|
||||||
print_resource(ss, res);
|
print_resource(ss, res, info.auto_resource_location_);
|
||||||
}
|
}
|
||||||
for (const ShaderCreateInfo::Resource &res : info.pass_resources_) {
|
for (const ShaderCreateInfo::Resource &res : info.pass_resources_) {
|
||||||
print_resource_alias(ss, res);
|
print_resource_alias(ss, res);
|
||||||
}
|
}
|
||||||
ss << "\n/* Batch Resources. */\n";
|
ss << "\n/* Batch Resources. */\n";
|
||||||
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
|
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
|
||||||
print_resource(ss, res);
|
print_resource(ss, res, info.auto_resource_location_);
|
||||||
}
|
}
|
||||||
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
|
for (const ShaderCreateInfo::Resource &res : info.batch_resources_) {
|
||||||
print_resource_alias(ss, res);
|
print_resource_alias(ss, res);
|
||||||
|
@ -2296,10 +2296,6 @@ typedef enum eSnapMode {
|
|||||||
SCE_SNAP_MODE_EDGE_PERPENDICULAR = (1 << 5),
|
SCE_SNAP_MODE_EDGE_PERPENDICULAR = (1 << 5),
|
||||||
SCE_SNAP_MODE_FACE_NEAREST = (1 << 8),
|
SCE_SNAP_MODE_FACE_NEAREST = (1 << 8),
|
||||||
|
|
||||||
SCE_SNAP_MODE_GEOM = (SCE_SNAP_MODE_VERTEX | SCE_SNAP_MODE_EDGE | SCE_SNAP_MODE_FACE_RAYCAST |
|
|
||||||
SCE_SNAP_MODE_EDGE_PERPENDICULAR | SCE_SNAP_MODE_EDGE_MIDPOINT |
|
|
||||||
SCE_SNAP_MODE_FACE_NEAREST),
|
|
||||||
|
|
||||||
/** #ToolSettings.snap_node_mode */
|
/** #ToolSettings.snap_node_mode */
|
||||||
SCE_SNAP_MODE_NODE_X = (1 << 0),
|
SCE_SNAP_MODE_NODE_X = (1 << 0),
|
||||||
SCE_SNAP_MODE_NODE_Y = (1 << 1),
|
SCE_SNAP_MODE_NODE_Y = (1 << 1),
|
||||||
@ -2314,6 +2310,10 @@ typedef enum eSnapMode {
|
|||||||
ENUM_OPERATORS(eSnapMode, SCE_SNAP_MODE_GRID)
|
ENUM_OPERATORS(eSnapMode, SCE_SNAP_MODE_GRID)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define SCE_SNAP_MODE_GEOM \
|
||||||
|
(SCE_SNAP_MODE_VERTEX | SCE_SNAP_MODE_EDGE | SCE_SNAP_MODE_FACE_RAYCAST | \
|
||||||
|
SCE_SNAP_MODE_EDGE_PERPENDICULAR | SCE_SNAP_MODE_EDGE_MIDPOINT | SCE_SNAP_MODE_FACE_NEAREST)
|
||||||
|
|
||||||
/** #SequencerToolSettings.snap_mode */
|
/** #SequencerToolSettings.snap_mode */
|
||||||
#define SEQ_SNAP_TO_STRIPS (1 << 0)
|
#define SEQ_SNAP_TO_STRIPS (1 << 0)
|
||||||
#define SEQ_SNAP_TO_CURRENT_FRAME (1 << 1)
|
#define SEQ_SNAP_TO_CURRENT_FRAME (1 << 1)
|
||||||
|
@ -204,6 +204,51 @@ static StructRNA *rna_FModifierType_refine(struct PointerRNA *ptr)
|
|||||||
# include "DEG_depsgraph.h"
|
# include "DEG_depsgraph.h"
|
||||||
# include "DEG_depsgraph_build.h"
|
# include "DEG_depsgraph_build.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \warning this isn't efficient but it's unavoidable
|
||||||
|
* when only the #ID and the #DriverVar are known.
|
||||||
|
*/
|
||||||
|
static FCurve *rna_FCurve_find_driver_by_variable(ID *owner_id, DriverVar *dvar)
|
||||||
|
{
|
||||||
|
AnimData *adt = BKE_animdata_from_id(owner_id);
|
||||||
|
BLI_assert(adt != NULL);
|
||||||
|
LISTBASE_FOREACH (FCurve *, fcu, &adt->drivers) {
|
||||||
|
ChannelDriver *driver = fcu->driver;
|
||||||
|
if (driver == NULL) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (BLI_findindex(&driver->variables, dvar) != -1) {
|
||||||
|
return fcu;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* \warning this isn't efficient but it's unavoidable
|
||||||
|
* when only the #ID and the #DriverTarget are known.
|
||||||
|
*/
|
||||||
|
static FCurve *rna_FCurve_find_driver_by_target(ID *owner_id, DriverTarget *dtar)
|
||||||
|
{
|
||||||
|
AnimData *adt = BKE_animdata_from_id(owner_id);
|
||||||
|
BLI_assert(adt != NULL);
|
||||||
|
LISTBASE_FOREACH (FCurve *, fcu, &adt->drivers) {
|
||||||
|
ChannelDriver *driver = fcu->driver;
|
||||||
|
if (driver == NULL) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
LISTBASE_FOREACH (DriverVar *, dvar, &driver->variables) {
|
||||||
|
/* NOTE: Use #MAX_DRIVER_TARGETS instead of `dvar->num_targets` because
|
||||||
|
* it's possible RNA holds a reference to a target that has been removed.
|
||||||
|
* In this case it's best to return the #FCurve it belongs to instead of nothing. */
|
||||||
|
if (ARRAY_HAS_ITEM(dtar, &dvar->targets[0], MAX_DRIVER_TARGETS)) {
|
||||||
|
return fcu;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static bool rna_ChannelDriver_is_simple_expression_get(PointerRNA *ptr)
|
static bool rna_ChannelDriver_is_simple_expression_get(PointerRNA *ptr)
|
||||||
{
|
{
|
||||||
ChannelDriver *driver = ptr->data;
|
ChannelDriver *driver = ptr->data;
|
||||||
@ -211,20 +256,27 @@ static bool rna_ChannelDriver_is_simple_expression_get(PointerRNA *ptr)
|
|||||||
return BKE_driver_has_simple_expression(driver);
|
return BKE_driver_has_simple_expression(driver);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rna_ChannelDriver_update_data(Main *bmain, Scene *scene, PointerRNA *ptr)
|
static void rna_ChannelDriver_update_data_impl(Main *bmain,
|
||||||
|
Scene *scene,
|
||||||
|
ID *owner_id,
|
||||||
|
ChannelDriver *driver)
|
||||||
{
|
{
|
||||||
ID *id = ptr->owner_id;
|
|
||||||
ChannelDriver *driver = ptr->data;
|
|
||||||
|
|
||||||
driver->flag &= ~DRIVER_FLAG_INVALID;
|
driver->flag &= ~DRIVER_FLAG_INVALID;
|
||||||
|
|
||||||
/* TODO: this really needs an update guard... */
|
/* TODO: this really needs an update guard... */
|
||||||
DEG_relations_tag_update(bmain);
|
DEG_relations_tag_update(bmain);
|
||||||
DEG_id_tag_update(id, ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
|
DEG_id_tag_update(owner_id, ID_RECALC_TRANSFORM | ID_RECALC_GEOMETRY);
|
||||||
|
|
||||||
WM_main_add_notifier(NC_SCENE | ND_FRAME, scene);
|
WM_main_add_notifier(NC_SCENE | ND_FRAME, scene);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rna_ChannelDriver_update_data(Main *bmain, Scene *scene, PointerRNA *ptr)
|
||||||
|
{
|
||||||
|
ID *id = ptr->owner_id;
|
||||||
|
ChannelDriver *driver = ptr->data;
|
||||||
|
rna_ChannelDriver_update_data_impl(bmain, scene, id, driver);
|
||||||
|
}
|
||||||
|
|
||||||
static void rna_ChannelDriver_update_expr(Main *bmain, Scene *scene, PointerRNA *ptr)
|
static void rna_ChannelDriver_update_expr(Main *bmain, Scene *scene, PointerRNA *ptr)
|
||||||
{
|
{
|
||||||
ChannelDriver *driver = ptr->data;
|
ChannelDriver *driver = ptr->data;
|
||||||
@ -238,33 +290,47 @@ static void rna_ChannelDriver_update_expr(Main *bmain, Scene *scene, PointerRNA
|
|||||||
|
|
||||||
static void rna_DriverTarget_update_data(Main *bmain, Scene *scene, PointerRNA *ptr)
|
static void rna_DriverTarget_update_data(Main *bmain, Scene *scene, PointerRNA *ptr)
|
||||||
{
|
{
|
||||||
PointerRNA driverptr;
|
DriverTarget *dtar = (DriverTarget *)ptr->data;
|
||||||
ChannelDriver *driver;
|
FCurve *fcu = rna_FCurve_find_driver_by_target(ptr->owner_id, dtar);
|
||||||
FCurve *fcu;
|
BLI_assert(fcu); /* This hints at an internal error, data may be corrupt. */
|
||||||
AnimData *adt = BKE_animdata_from_id(ptr->owner_id);
|
if (UNLIKELY(fcu == NULL)) {
|
||||||
|
return;
|
||||||
/* find the driver this belongs to and update it */
|
|
||||||
for (fcu = adt->drivers.first; fcu; fcu = fcu->next) {
|
|
||||||
driver = fcu->driver;
|
|
||||||
fcu->flag &= ~FCURVE_DISABLED;
|
|
||||||
|
|
||||||
if (driver) {
|
|
||||||
/* FIXME: need to be able to search targets for required one. */
|
|
||||||
// BLI_findindex(&driver->targets, ptr->data) != -1)
|
|
||||||
RNA_pointer_create(ptr->owner_id, &RNA_Driver, driver, &driverptr);
|
|
||||||
rna_ChannelDriver_update_data(bmain, scene, &driverptr);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
/* Find function ensures it's never NULL. */
|
||||||
|
ChannelDriver *driver = fcu->driver;
|
||||||
|
fcu->flag &= ~FCURVE_DISABLED;
|
||||||
|
rna_ChannelDriver_update_data_impl(bmain, scene, ptr->owner_id, driver);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void rna_DriverTarget_update_name(Main *bmain, Scene *scene, PointerRNA *ptr)
|
static void rna_DriverVariable_update_name(Main *bmain, Scene *scene, PointerRNA *ptr)
|
||||||
{
|
{
|
||||||
ChannelDriver *driver = ptr->data;
|
DriverVar *dvar = (DriverVar *)ptr->data;
|
||||||
rna_DriverTarget_update_data(bmain, scene, ptr);
|
FCurve *fcu = rna_FCurve_find_driver_by_variable(ptr->owner_id, dvar);
|
||||||
|
BLI_assert(fcu); /* This hints at an internal error, data may be corrupt. */
|
||||||
|
if (UNLIKELY(fcu == NULL)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
/* Find function ensures it's never NULL. */
|
||||||
|
ChannelDriver *driver = fcu->driver;
|
||||||
|
fcu->flag &= ~FCURVE_DISABLED;
|
||||||
|
rna_ChannelDriver_update_data_impl(bmain, scene, ptr->owner_id, driver);
|
||||||
BKE_driver_invalidate_expression(driver, false, true);
|
BKE_driver_invalidate_expression(driver, false, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void rna_DriverVariable_update_data(Main *bmain, Scene *scene, PointerRNA *ptr)
|
||||||
|
{
|
||||||
|
DriverVar *dvar = (DriverVar *)ptr->data;
|
||||||
|
FCurve *fcu = rna_FCurve_find_driver_by_variable(ptr->owner_id, dvar);
|
||||||
|
BLI_assert(fcu); /* This hints at an internal error, data may be corrupt. */
|
||||||
|
if (UNLIKELY(fcu == NULL)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
/* Find function ensures it's never NULL. */
|
||||||
|
ChannelDriver *driver = fcu->driver;
|
||||||
|
fcu->flag &= ~FCURVE_DISABLED;
|
||||||
|
rna_ChannelDriver_update_data_impl(bmain, scene, ptr->owner_id, driver);
|
||||||
|
}
|
||||||
|
|
||||||
/* ----------- */
|
/* ----------- */
|
||||||
|
|
||||||
/* NOTE: this function exists only to avoid id reference-counting. */
|
/* NOTE: this function exists only to avoid id reference-counting. */
|
||||||
@ -1924,14 +1990,14 @@ static void rna_def_drivervar(BlenderRNA *brna)
|
|||||||
"Name",
|
"Name",
|
||||||
"Name to use in scripted expressions/functions (no spaces or dots are allowed, "
|
"Name to use in scripted expressions/functions (no spaces or dots are allowed, "
|
||||||
"and must start with a letter)");
|
"and must start with a letter)");
|
||||||
RNA_def_property_update(prop, 0, "rna_DriverTarget_update_name"); /* XXX */
|
RNA_def_property_update(prop, 0, "rna_DriverVariable_update_name");
|
||||||
|
|
||||||
/* Enums */
|
/* Enums */
|
||||||
prop = RNA_def_property(srna, "type", PROP_ENUM, PROP_NONE);
|
prop = RNA_def_property(srna, "type", PROP_ENUM, PROP_NONE);
|
||||||
RNA_def_property_enum_items(prop, prop_type_items);
|
RNA_def_property_enum_items(prop, prop_type_items);
|
||||||
RNA_def_property_enum_funcs(prop, NULL, "rna_DriverVariable_type_set", NULL);
|
RNA_def_property_enum_funcs(prop, NULL, "rna_DriverVariable_type_set", NULL);
|
||||||
RNA_def_property_ui_text(prop, "Type", "Driver variable type");
|
RNA_def_property_ui_text(prop, "Type", "Driver variable type");
|
||||||
RNA_def_property_update(prop, 0, "rna_ChannelDriver_update_data"); /* XXX */
|
RNA_def_property_update(prop, 0, "rna_DriverVariable_update_data");
|
||||||
|
|
||||||
/* Targets */
|
/* Targets */
|
||||||
/* TODO: for nicer api, only expose the relevant props via subclassing,
|
/* TODO: for nicer api, only expose the relevant props via subclassing,
|
||||||
|
@ -989,7 +989,11 @@ static PyObject *bpy_bmesh_free(BPy_BMesh *self)
|
|||||||
|
|
||||||
bm_dealloc_editmode_warn(self);
|
bm_dealloc_editmode_warn(self);
|
||||||
|
|
||||||
if ((self->flag & BPY_BMFLAG_IS_WRAPPED) == 0) {
|
if (self->flag & BPY_BMFLAG_IS_WRAPPED) {
|
||||||
|
/* Ensure further access doesn't return this invalid object, see: #105715. */
|
||||||
|
bm->py_handle = NULL;
|
||||||
|
}
|
||||||
|
else {
|
||||||
BM_mesh_free(bm);
|
BM_mesh_free(bm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,6 +247,7 @@ static int count_items(PyObject *seq, int dim)
|
|||||||
static int validate_array_length(PyObject *rvalue,
|
static int validate_array_length(PyObject *rvalue,
|
||||||
PointerRNA *ptr,
|
PointerRNA *ptr,
|
||||||
PropertyRNA *prop,
|
PropertyRNA *prop,
|
||||||
|
const bool prop_is_param_dyn_alloc,
|
||||||
int lvalue_dim,
|
int lvalue_dim,
|
||||||
int *r_totitem,
|
int *r_totitem,
|
||||||
const char *error_prefix)
|
const char *error_prefix)
|
||||||
@ -266,26 +267,20 @@ static int validate_array_length(PyObject *rvalue,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if ((RNA_property_flag(prop) & PROP_DYNAMIC) && lvalue_dim == 0) {
|
if ((RNA_property_flag(prop) & PROP_DYNAMIC) && lvalue_dim == 0) {
|
||||||
if (RNA_property_array_length(ptr, prop) != tot) {
|
const int tot_expected = RNA_property_array_length(ptr, prop);
|
||||||
#if 0
|
if (tot_expected != tot) {
|
||||||
/* length is flexible */
|
*r_totitem = tot;
|
||||||
if (!RNA_property_dynamic_array_set_length(ptr, prop, tot)) {
|
if (!prop_is_param_dyn_alloc) {
|
||||||
/* BLI_snprintf(error_str, error_str_size,
|
|
||||||
* "%s.%s: array length cannot be changed to %d",
|
|
||||||
* RNA_struct_identifier(ptr->type), RNA_property_identifier(prop), tot); */
|
|
||||||
PyErr_Format(PyExc_ValueError,
|
PyErr_Format(PyExc_ValueError,
|
||||||
"%s %s.%s: array length cannot be changed to %d",
|
"%s %s.%s: array length cannot be changed to %d (expected %d)",
|
||||||
error_prefix,
|
error_prefix,
|
||||||
RNA_struct_identifier(ptr->type),
|
RNA_struct_identifier(ptr->type),
|
||||||
RNA_property_identifier(prop),
|
RNA_property_identifier(prop),
|
||||||
tot);
|
tot,
|
||||||
|
tot_expected);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
*r_totitem = tot;
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
len = tot;
|
len = tot;
|
||||||
@ -340,6 +335,7 @@ static int validate_array_length(PyObject *rvalue,
|
|||||||
static int validate_array(PyObject *rvalue,
|
static int validate_array(PyObject *rvalue,
|
||||||
PointerRNA *ptr,
|
PointerRNA *ptr,
|
||||||
PropertyRNA *prop,
|
PropertyRNA *prop,
|
||||||
|
const bool prop_is_param_dyn_alloc,
|
||||||
int lvalue_dim,
|
int lvalue_dim,
|
||||||
ItemTypeCheckFunc check_item_type,
|
ItemTypeCheckFunc check_item_type,
|
||||||
const char *item_type_str,
|
const char *item_type_str,
|
||||||
@ -410,7 +406,8 @@ static int validate_array(PyObject *rvalue,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
return validate_array_length(rvalue, ptr, prop, lvalue_dim, r_totitem, error_prefix);
|
return validate_array_length(
|
||||||
|
rvalue, ptr, prop, prop_is_param_dyn_alloc, lvalue_dim, r_totitem, error_prefix);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -527,15 +524,26 @@ static int py_to_array(PyObject *seq,
|
|||||||
char *data = NULL;
|
char *data = NULL;
|
||||||
|
|
||||||
// totdim = RNA_property_array_dimension(ptr, prop, dim_size); /* UNUSED */
|
// totdim = RNA_property_array_dimension(ptr, prop, dim_size); /* UNUSED */
|
||||||
|
const int flag = RNA_property_flag(prop);
|
||||||
|
|
||||||
if (validate_array(seq, ptr, prop, 0, check_item_type, item_type_str, &totitem, error_prefix) ==
|
/* Use #ParameterDynAlloc which defines it's own array length. */
|
||||||
-1) {
|
const bool prop_is_param_dyn_alloc = param_data && (flag & PROP_DYNAMIC);
|
||||||
|
|
||||||
|
if (validate_array(seq,
|
||||||
|
ptr,
|
||||||
|
prop,
|
||||||
|
prop_is_param_dyn_alloc,
|
||||||
|
0,
|
||||||
|
check_item_type,
|
||||||
|
item_type_str,
|
||||||
|
&totitem,
|
||||||
|
error_prefix) == -1) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (totitem) {
|
if (totitem) {
|
||||||
/* NOTE: this code is confusing. */
|
/* NOTE: this code is confusing. */
|
||||||
if (param_data && RNA_property_flag(prop) & PROP_DYNAMIC) {
|
if (prop_is_param_dyn_alloc) {
|
||||||
/* not freeing allocated mem, RNA_parameter_list_free() will do this */
|
/* not freeing allocated mem, RNA_parameter_list_free() will do this */
|
||||||
ParameterDynAlloc *param_alloc = (ParameterDynAlloc *)param_data;
|
ParameterDynAlloc *param_alloc = (ParameterDynAlloc *)param_data;
|
||||||
param_alloc->array_tot = (int)totitem;
|
param_alloc->array_tot = (int)totitem;
|
||||||
@ -626,9 +634,16 @@ static int py_to_array_index(PyObject *py,
|
|||||||
copy_value_single(py, ptr, prop, NULL, 0, &index, convert_item, rna_set_index);
|
copy_value_single(py, ptr, prop, NULL, 0, &index, convert_item, rna_set_index);
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
if (validate_array(
|
const bool prop_is_param_dyn_alloc = false;
|
||||||
py, ptr, prop, lvalue_dim, check_item_type, item_type_str, &totitem, error_prefix) ==
|
if (validate_array(py,
|
||||||
-1) {
|
ptr,
|
||||||
|
prop,
|
||||||
|
prop_is_param_dyn_alloc,
|
||||||
|
lvalue_dim,
|
||||||
|
check_item_type,
|
||||||
|
item_type_str,
|
||||||
|
&totitem,
|
||||||
|
error_prefix) == -1) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -195,6 +195,76 @@ class TestPropArrayMultiDimensional(unittest.TestCase):
|
|||||||
del id_type.temp
|
del id_type.temp
|
||||||
|
|
||||||
|
|
||||||
|
class TestPropArrayDynamicAssign(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Pixels are dynamic in the sense the size can change however the assignment does not define the size.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dims = 12
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.image = bpy.data.images.new("", self.dims, self.dims)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
bpy.data.images.remove(self.image)
|
||||||
|
self.image = None
|
||||||
|
|
||||||
|
def test_assign_fixed_under_1px(self):
|
||||||
|
image = self.image
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
image.pixels = [1.0, 1.0, 1.0, 1.0]
|
||||||
|
|
||||||
|
def test_assign_fixed_under_0px(self):
|
||||||
|
image = self.image
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
image.pixels = []
|
||||||
|
|
||||||
|
def test_assign_fixed_over_by_1px(self):
|
||||||
|
image = self.image
|
||||||
|
with self.assertRaises(ValueError):
|
||||||
|
image.pixels = ([1.0, 1.0, 1.0, 1.0] * (self.dims * self.dims)) + [1.0]
|
||||||
|
|
||||||
|
def test_assign_fixed(self):
|
||||||
|
# Valid assignment, ensure it works as intended.
|
||||||
|
image = self.image
|
||||||
|
values = [1.0, 0.0, 1.0, 0.0] * (self.dims * self.dims)
|
||||||
|
image.pixels = values
|
||||||
|
self.assertEqual(tuple(values), tuple(image.pixels))
|
||||||
|
|
||||||
|
|
||||||
|
class TestPropArrayDynamicArg(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Index array, a dynamic array argument which defines it's own length.
|
||||||
|
"""
|
||||||
|
|
||||||
|
dims = 8
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
self.me = bpy.data.meshes.new("")
|
||||||
|
self.me.vertices.add(self.dims)
|
||||||
|
self.ob = bpy.data.objects.new("", self.me)
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
bpy.data.objects.remove(self.ob)
|
||||||
|
bpy.data.meshes.remove(self.me)
|
||||||
|
self.me = None
|
||||||
|
self.ob = None
|
||||||
|
|
||||||
|
def test_param_dynamic(self):
|
||||||
|
ob = self.ob
|
||||||
|
vg = ob.vertex_groups.new(name="")
|
||||||
|
|
||||||
|
# Add none.
|
||||||
|
vg.add(index=(), weight=1.0, type='REPLACE')
|
||||||
|
for i in range(self.dims):
|
||||||
|
with self.assertRaises(RuntimeError):
|
||||||
|
vg.weight(i)
|
||||||
|
|
||||||
|
# Add all.
|
||||||
|
vg.add(index=range(self.dims), weight=1.0, type='REPLACE')
|
||||||
|
self.assertEqual(tuple([1.0] * self.dims), tuple([vg.weight(i) for i in range(self.dims)]))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
import sys
|
import sys
|
||||||
sys.argv = [__file__] + (sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [])
|
sys.argv = [__file__] + (sys.argv[sys.argv.index("--") + 1:] if "--" in sys.argv else [])
|
||||||
|
Loading…
Reference in New Issue
Block a user