Refactor: combine insert_keyframe() and insert_key_rna() into a single function #122053
6
.github/workflows/stale.yml
vendored
6
.github/workflows/stale.yml
vendored
@ -3,10 +3,10 @@
|
||||
|
||||
name: Close GitHub Pull Requests
|
||||
|
||||
# Trigger this Action when a pull request is opened.
|
||||
# Trigger this workflow every 12 hours.
|
||||
on:
|
||||
pull_request:
|
||||
types: [opened]
|
||||
schedule:
|
||||
- cron: '* */12 * * *'
|
||||
|
||||
jobs:
|
||||
close_prs:
|
||||
|
@ -1683,7 +1683,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
|
||||
elif device_type == 'ONEAPI':
|
||||
import sys
|
||||
if sys.platform.startswith("win"):
|
||||
driver_version = "XX.X.101.5186"
|
||||
driver_version = "XX.X.101.5518"
|
||||
col.label(text=rpt_("Requires Intel GPU with Xe-HPG architecture"), icon='BLANK1', translate=False)
|
||||
col.label(text=rpt_("and Windows driver version %s or newer") % driver_version,
|
||||
icon='BLANK1', translate=False)
|
||||
|
@ -178,6 +178,51 @@ void BVHEmbree::build(Progress &progress,
|
||||
rtcCommitScene(scene);
|
||||
}
|
||||
|
||||
string BVHEmbree::get_last_error_message()
|
||||
{
|
||||
const RTCError error_code = rtcGetDeviceError(rtc_device);
|
||||
switch (error_code) {
|
||||
case RTC_ERROR_NONE:
|
||||
return "no error";
|
||||
case RTC_ERROR_UNKNOWN:
|
||||
return "unknown error";
|
||||
case RTC_ERROR_INVALID_ARGUMENT:
|
||||
return "invalid argument error";
|
||||
case RTC_ERROR_INVALID_OPERATION:
|
||||
return "invalid operation error";
|
||||
case RTC_ERROR_OUT_OF_MEMORY:
|
||||
return "out of memory error";
|
||||
case RTC_ERROR_UNSUPPORTED_CPU:
|
||||
return "unsupported cpu error";
|
||||
case RTC_ERROR_CANCELLED:
|
||||
return "cancelled";
|
||||
default:
|
||||
/* We should never end here unless enum for RTC errors would change. */
|
||||
return "unknown error";
|
||||
}
|
||||
}
|
||||
|
||||
# if defined(WITH_EMBREE_GPU) && RTC_VERSION >= 40302
|
||||
bool BVHEmbree::offload_scenes_to_gpu(const vector<RTCScene> &scenes)
|
||||
{
|
||||
/* Having BVH on GPU is more performance-critical than texture data.
|
||||
* In order to ensure good performance even when running out of GPU
|
||||
* memory, we force BVH to migrate to GPU before allocating other textures
|
||||
* that may not fit. */
|
||||
for (const RTCScene &embree_scene : scenes) {
|
||||
RTCSceneFlags scene_flags = rtcGetSceneFlags(embree_scene);
|
||||
scene_flags = scene_flags | RTC_SCENE_FLAG_PREFETCH_USM_SHARED_ON_GPU;
|
||||
rtcSetSceneFlags(embree_scene, scene_flags);
|
||||
rtcCommitScene(embree_scene);
|
||||
/* In case of any errors from Embree, we should stop
|
||||
* the execution and propagate the error. */
|
||||
if (rtcGetDeviceError(rtc_device) != RTC_ERROR_NONE)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
# endif
|
||||
|
||||
void BVHEmbree::add_object(Object *ob, int i)
|
||||
{
|
||||
Geometry *geom = ob->get_geometry();
|
||||
|
@ -18,6 +18,7 @@
|
||||
# include "bvh/bvh.h"
|
||||
# include "bvh/params.h"
|
||||
|
||||
# include "util/string.h"
|
||||
# include "util/thread.h"
|
||||
# include "util/types.h"
|
||||
# include "util/vector.h"
|
||||
@ -36,6 +37,12 @@ class BVHEmbree : public BVH {
|
||||
const bool isSyclEmbreeDevice = false);
|
||||
void refit(Progress &progress);
|
||||
|
||||
# if defined(WITH_EMBREE_GPU) && RTC_VERSION >= 40302
|
||||
bool offload_scenes_to_gpu(const vector<RTCScene> &scenes);
|
||||
# endif
|
||||
|
||||
string get_last_error_message();
|
||||
|
||||
RTCScene scene;
|
||||
|
||||
protected:
|
||||
|
@ -257,6 +257,7 @@ class device_memory {
|
||||
friend class OptiXDevice;
|
||||
friend class HIPDevice;
|
||||
friend class MetalDevice;
|
||||
friend class OneapiDevice;
|
||||
|
||||
/* Only create through subclasses. */
|
||||
device_memory(Device *device, const char *name, MemoryType type);
|
||||
|
@ -11,6 +11,7 @@
|
||||
# include "device/oneapi/device_impl.h"
|
||||
|
||||
# include "util/debug.h"
|
||||
# include "util/foreach.h"
|
||||
# include "util/log.h"
|
||||
|
||||
# ifdef WITH_EMBREE_GPU
|
||||
@ -47,18 +48,20 @@ static void queue_error_cb(const char *message, void *user_ptr)
|
||||
}
|
||||
|
||||
OneapiDevice::OneapiDevice(const DeviceInfo &info, Stats &stats, Profiler &profiler)
|
||||
: Device(info, stats, profiler),
|
||||
: GPUDevice(info, stats, profiler),
|
||||
device_queue_(nullptr),
|
||||
# ifdef WITH_EMBREE_GPU
|
||||
embree_device(nullptr),
|
||||
embree_scene(nullptr),
|
||||
# endif
|
||||
texture_info_(this, "texture_info", MEM_GLOBAL),
|
||||
kg_memory_(nullptr),
|
||||
kg_memory_device_(nullptr),
|
||||
kg_memory_size_(0)
|
||||
{
|
||||
need_texture_info_ = false;
|
||||
/* Verify that base class types can be used with specific backend types */
|
||||
static_assert(sizeof(texMemObject) == sizeof(void *));
|
||||
static_assert(sizeof(arrayMemObject) == sizeof(void *));
|
||||
|
||||
use_hardware_raytracing = info.use_hardware_raytracing;
|
||||
|
||||
oneapi_set_error_cb(queue_error_cb, &oneapi_error_string_);
|
||||
@ -110,6 +113,18 @@ OneapiDevice::OneapiDevice(const DeviceInfo &info, Stats &stats, Profiler &profi
|
||||
kg_memory_size_ = globals_segment_size;
|
||||
|
||||
max_memory_on_device_ = get_memcapacity();
|
||||
init_host_memory();
|
||||
move_texture_to_host = false;
|
||||
can_map_host = true;
|
||||
|
||||
const char *headroom_str = getenv("CYCLES_ONEAPI_MEMORY_HEADROOM");
|
||||
if (headroom_str != nullptr) {
|
||||
const long long override_headroom = (float)atoll(headroom_str);
|
||||
device_working_headroom = override_headroom;
|
||||
device_texture_headroom = override_headroom;
|
||||
}
|
||||
VLOG_DEBUG << "oneAPI memory headroom size: "
|
||||
<< string_human_readable_size(device_working_headroom);
|
||||
}
|
||||
|
||||
OneapiDevice::~OneapiDevice()
|
||||
@ -119,7 +134,7 @@ OneapiDevice::~OneapiDevice()
|
||||
rtcReleaseDevice(embree_device);
|
||||
# endif
|
||||
|
||||
texture_info_.free();
|
||||
texture_info.free();
|
||||
usm_free(device_queue_, kg_memory_);
|
||||
usm_free(device_queue_, kg_memory_device_);
|
||||
|
||||
@ -166,8 +181,22 @@ void OneapiDevice::build_bvh(BVH *bvh, Progress &progress, bool refit)
|
||||
else {
|
||||
bvh_embree->build(progress, &stats, embree_device, true);
|
||||
}
|
||||
|
||||
# if RTC_VERSION >= 40302
|
||||
thread_scoped_lock lock(scene_data_mutex);
|
||||
all_embree_scenes.push_back(bvh_embree->scene);
|
||||
# endif
|
||||
|
||||
if (bvh->params.top_level) {
|
||||
embree_scene = bvh_embree->scene;
|
||||
# if RTC_VERSION >= 40302
|
||||
if (bvh_embree->offload_scenes_to_gpu(all_embree_scenes) == false) {
|
||||
set_error(
|
||||
string_printf("BVH failed to to migrate to the GPU due to Embree library error (%s)",
|
||||
bvh_embree->get_last_error_message()));
|
||||
}
|
||||
all_embree_scenes.clear();
|
||||
# endif
|
||||
}
|
||||
}
|
||||
else {
|
||||
@ -176,6 +205,26 @@ void OneapiDevice::build_bvh(BVH *bvh, Progress &progress, bool refit)
|
||||
}
|
||||
# endif
|
||||
|
||||
size_t OneapiDevice::get_free_mem() const
|
||||
{
|
||||
/* Accurate: Use device info, which is practically useful only on dGPU.
|
||||
* This is because for non-discrete GPUs, all GPU memory allocations would
|
||||
* be in the RAM, thus having the same performance for device and host pointers,
|
||||
* so there is no need to be very accurate about what would end where. */
|
||||
const sycl::device &device = reinterpret_cast<sycl::queue *>(device_queue_)->get_device();
|
||||
const bool is_integrated_gpu = device.get_info<sycl::info::device::host_unified_memory>();
|
||||
if (device.has(sycl::aspect::ext_intel_free_memory) && is_integrated_gpu == false) {
|
||||
return device.get_info<sycl::ext::intel::info::device::free_memory>();
|
||||
}
|
||||
/* Estimate: Capacity - in use. */
|
||||
else if (device_mem_in_use < max_memory_on_device_) {
|
||||
return max_memory_on_device_ - device_mem_in_use;
|
||||
}
|
||||
else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool OneapiDevice::load_kernels(const uint requested_features)
|
||||
{
|
||||
assert(device_queue_);
|
||||
@ -208,63 +257,101 @@ bool OneapiDevice::load_kernels(const uint requested_features)
|
||||
VLOG_INFO << "Kernels loading (compilation) has been done for \"" << info.description << "\"";
|
||||
}
|
||||
|
||||
if (is_finished_ok) {
|
||||
reserve_private_memory(requested_features);
|
||||
is_finished_ok = !have_error();
|
||||
}
|
||||
|
||||
return is_finished_ok;
|
||||
}
|
||||
|
||||
void OneapiDevice::load_texture_info()
|
||||
void OneapiDevice::reserve_private_memory(const uint kernel_features)
|
||||
{
|
||||
if (need_texture_info_) {
|
||||
need_texture_info_ = false;
|
||||
texture_info_.copy_to_device();
|
||||
size_t free_before = get_free_mem();
|
||||
|
||||
/* Use the biggest kernel for estimation. */
|
||||
const DeviceKernel test_kernel = (kernel_features & KERNEL_FEATURE_NODE_RAYTRACE) ?
|
||||
DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_RAYTRACE :
|
||||
(kernel_features & KERNEL_FEATURE_MNEE) ?
|
||||
DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE_MNEE :
|
||||
DEVICE_KERNEL_INTEGRATOR_SHADE_SURFACE;
|
||||
|
||||
{
|
||||
unique_ptr<DeviceQueue> queue = gpu_queue_create();
|
||||
|
||||
device_ptr d_path_index = 0;
|
||||
device_ptr d_render_buffer = 0;
|
||||
int d_work_size = 0;
|
||||
DeviceKernelArguments args(&d_path_index, &d_render_buffer, &d_work_size);
|
||||
|
||||
queue->init_execution();
|
||||
/* Launch of the kernel seems to be sufficient to reserve all
|
||||
* needed memory regardless of the execution global size.
|
||||
* So, the smallest possible size is used here. */
|
||||
queue->enqueue(test_kernel, 1, args);
|
||||
queue->synchronize();
|
||||
}
|
||||
|
||||
size_t free_after = get_free_mem();
|
||||
|
||||
VLOG_INFO << "For kernel execution were reserved "
|
||||
<< string_human_readable_number(free_before - free_after) << " bytes. ("
|
||||
<< string_human_readable_size(free_before - free_after) << ")";
|
||||
}
|
||||
|
||||
void OneapiDevice::generic_alloc(device_memory &mem)
|
||||
void OneapiDevice::get_device_memory_info(size_t &total, size_t &free)
|
||||
{
|
||||
size_t memory_size = mem.memory_size();
|
||||
|
||||
/* TODO(@nsirgien): In future, if scene doesn't fit into device memory, then
|
||||
* we can use USM host memory.
|
||||
* Because of the expected performance impact, implementation of this has had a low priority
|
||||
* and is not implemented yet. */
|
||||
|
||||
assert(device_queue_);
|
||||
/* NOTE(@nsirgien): There are three types of Unified Shared Memory (USM) in oneAPI: host, device
|
||||
* and shared. For new project it maybe more beneficial to use USM shared memory, because it
|
||||
* provides automatic migration mechanism in order to allow to use the same pointer on host and
|
||||
* on device, without need to worry about explicit memory transfer operations. But for
|
||||
* Blender/Cycles this type of memory is not very suitable in current application architecture,
|
||||
* because Cycles already uses two different pointer for host activity and device activity, and
|
||||
* also has to perform all needed memory transfer operations. So, USM device memory
|
||||
* type has been used for oneAPI device in order to better fit in Cycles architecture. */
|
||||
void *device_pointer = nullptr;
|
||||
if (mem.memory_size() + stats.mem_used < max_memory_on_device_)
|
||||
device_pointer = usm_alloc_device(device_queue_, memory_size);
|
||||
if (device_pointer == nullptr) {
|
||||
set_error("oneAPI kernel - device memory allocation error for " +
|
||||
string_human_readable_size(mem.memory_size()) +
|
||||
", possibly caused by lack of available memory space on the device: " +
|
||||
string_human_readable_size(stats.mem_used) + " of " +
|
||||
string_human_readable_size(max_memory_on_device_) + " is already allocated");
|
||||
}
|
||||
|
||||
mem.device_pointer = reinterpret_cast<ccl::device_ptr>(device_pointer);
|
||||
mem.device_size = memory_size;
|
||||
|
||||
stats.mem_alloc(memory_size);
|
||||
free = get_free_mem();
|
||||
total = max_memory_on_device_;
|
||||
}
|
||||
|
||||
void OneapiDevice::generic_copy_to(device_memory &mem)
|
||||
bool OneapiDevice::alloc_device(void *&device_pointer, size_t size)
|
||||
{
|
||||
if (!mem.device_pointer) {
|
||||
return;
|
||||
}
|
||||
size_t memory_size = mem.memory_size();
|
||||
bool allocation_success = false;
|
||||
device_pointer = usm_alloc_device(device_queue_, size);
|
||||
if (device_pointer != nullptr) {
|
||||
allocation_success = true;
|
||||
/* Due to lazy memory initialization in GPU runtime we will force memory to
|
||||
* appear in device memory via execution of a kernel using this memory. */
|
||||
if (!oneapi_zero_memory_on_device(device_queue_, device_pointer, size)) {
|
||||
set_error("oneAPI memory operation error: got runtime exception \"" + oneapi_error_string_ +
|
||||
"\"");
|
||||
usm_free(device_queue_, device_pointer);
|
||||
|
||||
/* Copy operation from host shouldn't be requested if there is no memory allocated on host. */
|
||||
assert(mem.host_pointer);
|
||||
assert(device_queue_);
|
||||
usm_memcpy(device_queue_, (void *)mem.device_pointer, (void *)mem.host_pointer, memory_size);
|
||||
device_pointer = nullptr;
|
||||
allocation_success = false;
|
||||
}
|
||||
}
|
||||
|
||||
return allocation_success;
|
||||
}
|
||||
|
||||
void OneapiDevice::free_device(void *device_pointer)
|
||||
{
|
||||
usm_free(device_queue_, device_pointer);
|
||||
}
|
||||
|
||||
bool OneapiDevice::alloc_host(void *&shared_pointer, size_t size)
|
||||
{
|
||||
shared_pointer = usm_aligned_alloc_host(device_queue_, size, 64);
|
||||
return shared_pointer != nullptr;
|
||||
}
|
||||
|
||||
void OneapiDevice::free_host(void *shared_pointer)
|
||||
{
|
||||
usm_free(device_queue_, shared_pointer);
|
||||
}
|
||||
|
||||
void OneapiDevice::transform_host_pointer(void *&device_pointer, void *&shared_pointer)
|
||||
{
|
||||
/* Device and host pointer are in the same address space
|
||||
* as we're using Unified Shared Memory. */
|
||||
device_pointer = shared_pointer;
|
||||
}
|
||||
|
||||
void OneapiDevice::copy_host_to_device(void *device_pointer, void *host_pointer, size_t size)
|
||||
{
|
||||
usm_memcpy(device_queue_, device_pointer, host_pointer, size);
|
||||
}
|
||||
|
||||
/* TODO: Make sycl::queue part of OneapiQueue and avoid using pointers to sycl::queue. */
|
||||
@ -288,20 +375,6 @@ void *OneapiDevice::kernel_globals_device_pointer()
|
||||
return kg_memory_device_;
|
||||
}
|
||||
|
||||
void OneapiDevice::generic_free(device_memory &mem)
|
||||
{
|
||||
if (!mem.device_pointer) {
|
||||
return;
|
||||
}
|
||||
|
||||
stats.mem_free(mem.device_size);
|
||||
mem.device_size = 0;
|
||||
|
||||
assert(device_queue_);
|
||||
usm_free(device_queue_, (void *)mem.device_pointer);
|
||||
mem.device_pointer = 0;
|
||||
}
|
||||
|
||||
void OneapiDevice::mem_alloc(device_memory &mem)
|
||||
{
|
||||
if (mem.type == MEM_TEXTURE) {
|
||||
@ -344,7 +417,7 @@ void OneapiDevice::mem_copy_to(device_memory &mem)
|
||||
}
|
||||
else {
|
||||
if (!mem.device_pointer)
|
||||
mem_alloc(mem);
|
||||
generic_alloc(mem);
|
||||
|
||||
generic_copy_to(mem);
|
||||
}
|
||||
@ -515,14 +588,14 @@ void OneapiDevice::tex_alloc(device_texture &mem)
|
||||
|
||||
/* Resize if needed. Also, in case of resize - allocate in advance for future allocations. */
|
||||
const uint slot = mem.slot;
|
||||
if (slot >= texture_info_.size()) {
|
||||
texture_info_.resize(slot + 128);
|
||||
if (slot >= texture_info.size()) {
|
||||
texture_info.resize(slot + 128);
|
||||
}
|
||||
|
||||
texture_info_[slot] = mem.info;
|
||||
need_texture_info_ = true;
|
||||
texture_info[slot] = mem.info;
|
||||
need_texture_info = true;
|
||||
|
||||
texture_info_[slot].data = (uint64_t)mem.device_pointer;
|
||||
texture_info[slot].data = (uint64_t)mem.device_pointer;
|
||||
}
|
||||
|
||||
void OneapiDevice::tex_free(device_texture &mem)
|
||||
@ -628,6 +701,16 @@ void *OneapiDevice::usm_alloc_device(SyclQueue *queue_, size_t memory_size)
|
||||
{
|
||||
assert(queue_);
|
||||
sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_);
|
||||
/* NOTE(@nsirgien): There are three types of Unified Shared Memory (USM) in oneAPI: host, device
|
||||
* and shared. For new project it could more beneficial to use USM shared memory, because it
|
||||
* provides automatic migration mechanism in order to allow to use the same pointer on host and
|
||||
* on device, without need to worry about explicit memory transfer operations, although usage of
|
||||
* USM shared imply some documented limitations on the memory usage in regards of parallel access
|
||||
* from different threads. But for Blender/Cycles this type of memory is not very suitable in
|
||||
* current application architecture, because Cycles is multi-thread application and already uses
|
||||
* two different pointer for host activity and device activity, and also has to perform all
|
||||
* needed memory transfer operations. So, USM device memory type has been used for oneAPI device
|
||||
* in order to better fit in Cycles architecture. */
|
||||
# ifndef WITH_ONEAPI_SYCL_HOST_TASK
|
||||
return sycl::malloc_device(memory_size, *queue);
|
||||
# else
|
||||
@ -646,9 +729,26 @@ void OneapiDevice::usm_free(SyclQueue *queue_, void *usm_ptr)
|
||||
bool OneapiDevice::usm_memcpy(SyclQueue *queue_, void *dest, void *src, size_t num_bytes)
|
||||
{
|
||||
assert(queue_);
|
||||
/* sycl::queue::memcpy may crash if the queue is in an invalid state due to previous
|
||||
* runtime errors. It's better to avoid running memory operations in that case.
|
||||
* The render will be canceled and the queue will be destroyed anyway. */
|
||||
if (have_error())
|
||||
return false;
|
||||
|
||||
sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_);
|
||||
OneapiDevice::check_usm(queue_, dest, true);
|
||||
OneapiDevice::check_usm(queue_, src, true);
|
||||
sycl::usm::alloc dest_type = get_pointer_type(dest, queue->get_context());
|
||||
sycl::usm::alloc src_type = get_pointer_type(src, queue->get_context());
|
||||
/* Unknown here means, that this is not an USM allocation, which implies that this is
|
||||
* some generic C++ allocation, so we could use C++ memcpy directly with USM host. */
|
||||
if ((dest_type == sycl::usm::alloc::host || dest_type == sycl::usm::alloc::unknown) &&
|
||||
(src_type == sycl::usm::alloc::host || src_type == sycl::usm::alloc::unknown))
|
||||
{
|
||||
memcpy(dest, src, num_bytes);
|
||||
return true;
|
||||
}
|
||||
|
||||
try {
|
||||
sycl::event mem_event = queue->memcpy(dest, src, num_bytes);
|
||||
# ifdef WITH_CYCLES_DEBUG
|
||||
@ -658,8 +758,6 @@ bool OneapiDevice::usm_memcpy(SyclQueue *queue_, void *dest, void *src, size_t n
|
||||
mem_event.wait_and_throw();
|
||||
return true;
|
||||
# else
|
||||
sycl::usm::alloc dest_type = get_pointer_type(dest, queue->get_context());
|
||||
sycl::usm::alloc src_type = get_pointer_type(src, queue->get_context());
|
||||
bool from_device_to_host = dest_type == sycl::usm::alloc::host &&
|
||||
src_type == sycl::usm::alloc::device;
|
||||
bool host_or_device_memop_with_offset = dest_type == sycl::usm::alloc::unknown ||
|
||||
@ -684,6 +782,12 @@ bool OneapiDevice::usm_memset(SyclQueue *queue_,
|
||||
size_t num_bytes)
|
||||
{
|
||||
assert(queue_);
|
||||
/* sycl::queue::memset may crash if the queue is in an invalid state due to previous
|
||||
* runtime errors. It's better to avoid running memory operations in that case.
|
||||
* The render will be canceled and the queue will be destroyed anyway. */
|
||||
if (have_error())
|
||||
return false;
|
||||
|
||||
sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_);
|
||||
OneapiDevice::check_usm(queue_, usm_ptr, true);
|
||||
try {
|
||||
@ -735,7 +839,7 @@ void OneapiDevice::set_global_memory(SyclQueue *queue_,
|
||||
assert(memory_name);
|
||||
assert(memory_device_pointer);
|
||||
KernelGlobalsGPU *globals = (KernelGlobalsGPU *)kernel_globals;
|
||||
OneapiDevice::check_usm(queue_, memory_device_pointer);
|
||||
OneapiDevice::check_usm(queue_, memory_device_pointer, true);
|
||||
OneapiDevice::check_usm(queue_, kernel_globals, true);
|
||||
|
||||
std::string matched_name(memory_name);
|
||||
@ -874,11 +978,11 @@ void OneapiDevice::get_adjusted_global_and_local_sizes(SyclQueue *queue,
|
||||
|
||||
/* Compute-runtime (ie. NEO) version is what gets returned by sycl/L0 on Windows
|
||||
* since Windows driver 101.3268. */
|
||||
static const int lowest_supported_driver_version_win = 1015186;
|
||||
static const int lowest_supported_driver_version_win = 1015518;
|
||||
# ifdef _WIN32
|
||||
/* For Windows driver 101.5186, compute-runtime version is 28044.
|
||||
/* For Windows driver 101.5518, compute-runtime version is 28044.
|
||||
* This information is returned by `ocloc query OCL_DRIVER_VERSION`.*/
|
||||
static const int lowest_supported_driver_version_neo = 28044;
|
||||
static const int lowest_supported_driver_version_neo = 29283;
|
||||
# else
|
||||
static const int lowest_supported_driver_version_neo = 27642;
|
||||
# endif
|
||||
|
@ -21,17 +21,19 @@ typedef void (*OneAPIDeviceIteratorCallback)(const char *id,
|
||||
bool oidn_support,
|
||||
void *user_ptr);
|
||||
|
||||
class OneapiDevice : public Device {
|
||||
class OneapiDevice : public GPUDevice {
|
||||
private:
|
||||
SyclQueue *device_queue_;
|
||||
# ifdef WITH_EMBREE_GPU
|
||||
RTCDevice embree_device;
|
||||
RTCScene embree_scene;
|
||||
# if RTC_VERSION >= 40302
|
||||
thread_mutex scene_data_mutex;
|
||||
vector<RTCScene> all_embree_scenes;
|
||||
# endif
|
||||
# endif
|
||||
using ConstMemMap = map<string, device_vector<uchar> *>;
|
||||
ConstMemMap const_mem_map_;
|
||||
device_vector<TextureInfo> texture_info_;
|
||||
bool need_texture_info_;
|
||||
void *kg_memory_;
|
||||
void *kg_memory_device_;
|
||||
size_t kg_memory_size_ = (size_t)0;
|
||||
@ -41,6 +43,8 @@ class OneapiDevice : public Device {
|
||||
unsigned int kernel_features = 0;
|
||||
int scene_max_shaders_ = 0;
|
||||
|
||||
size_t get_free_mem() const;
|
||||
|
||||
public:
|
||||
virtual BVHLayoutMask get_bvh_layout_mask(uint kernel_features) const override;
|
||||
|
||||
@ -54,13 +58,15 @@ class OneapiDevice : public Device {
|
||||
|
||||
bool load_kernels(const uint kernel_features) override;
|
||||
|
||||
void load_texture_info();
|
||||
void reserve_private_memory(const uint kernel_features);
|
||||
|
||||
void generic_alloc(device_memory &mem);
|
||||
|
||||
void generic_copy_to(device_memory &mem);
|
||||
|
||||
void generic_free(device_memory &mem);
|
||||
virtual void get_device_memory_info(size_t &total, size_t &free) override;
|
||||
virtual bool alloc_device(void *&device_pointer, size_t size) override;
|
||||
virtual void free_device(void *device_pointer) override;
|
||||
virtual bool alloc_host(void *&shared_pointer, size_t size) override;
|
||||
virtual void free_host(void *shared_pointer) override;
|
||||
virtual void transform_host_pointer(void *&device_pointer, void *&shared_pointer) override;
|
||||
virtual void copy_host_to_device(void *device_pointer, void *host_pointer, size_t size) override;
|
||||
|
||||
string oneapi_error_message();
|
||||
|
||||
|
@ -133,6 +133,26 @@ bool oneapi_run_test_kernel(SyclQueue *queue_)
|
||||
return is_computation_correct;
|
||||
}
|
||||
|
||||
bool oneapi_zero_memory_on_device(SyclQueue *queue_, void *device_pointer, size_t num_bytes)
|
||||
{
|
||||
assert(queue_);
|
||||
sycl::queue *queue = reinterpret_cast<sycl::queue *>(queue_);
|
||||
try {
|
||||
queue->submit([&](sycl::handler &cgh) {
|
||||
cgh.parallel_for(num_bytes,
|
||||
[=](sycl::id<1> idx) { ((char *)device_pointer)[idx.get(0)] = (char)0; });
|
||||
});
|
||||
queue->wait_and_throw();
|
||||
return true;
|
||||
}
|
||||
catch (sycl::exception const &e) {
|
||||
if (s_error_cb) {
|
||||
s_error_cb(e.what(), s_error_user_ptr);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool oneapi_kernel_is_required_for_features(const std::string &kernel_name,
|
||||
const uint kernel_features)
|
||||
{
|
||||
|
@ -44,6 +44,9 @@ extern "C" {
|
||||
# endif
|
||||
|
||||
CYCLES_KERNEL_ONEAPI_EXPORT bool oneapi_run_test_kernel(SyclQueue *queue_);
|
||||
CYCLES_KERNEL_ONEAPI_EXPORT bool oneapi_zero_memory_on_device(SyclQueue *queue_,
|
||||
void *device_pointer,
|
||||
size_t num_bytes);
|
||||
CYCLES_KERNEL_ONEAPI_EXPORT void oneapi_set_error_cb(OneAPIErrorCallback cb, void *user_ptr);
|
||||
CYCLES_KERNEL_ONEAPI_EXPORT size_t oneapi_suggested_gpu_kernel_size(const DeviceKernel kernel);
|
||||
CYCLES_KERNEL_ONEAPI_EXPORT bool oneapi_enqueue_kernel(KernelContext *context,
|
||||
|
@ -39,6 +39,11 @@ ccl_device int shadow_linking_pick_mesh_intersection(KernelGlobals kg,
|
||||
|
||||
const uint visibility = path_state_ray_visibility(state);
|
||||
|
||||
int transparent_bounce = INTEGRATOR_STATE(state, path, transparent_bounce);
|
||||
int volume_bounce = INTEGRATOR_STATE(state, path, volume_bounce);
|
||||
|
||||
/* TODO: Replace the look with sequential calls to the kernel, similar to the transparent shadow
|
||||
* intersection kernel. */
|
||||
for (int i = 0; i < SHADOW_LINK_MAX_INTERSECTION_COUNT; i++) {
|
||||
Intersection current_isect ccl_optional_struct_init;
|
||||
current_isect.object = OBJECT_NONE;
|
||||
@ -68,12 +73,33 @@ ccl_device int shadow_linking_pick_mesh_intersection(KernelGlobals kg,
|
||||
}
|
||||
}
|
||||
|
||||
const uint blocker_set = kernel_data_fetch(objects, current_isect.object).blocker_shadow_set;
|
||||
if (blocker_set == 0) {
|
||||
/* Contribution from the lights past the default blocker is accumulated using the main path.
|
||||
*/
|
||||
ray->tmax = current_isect.t;
|
||||
break;
|
||||
/* Contribution from the lights past the default opaque blocker is accumulated
|
||||
* using the main path. */
|
||||
if (!(shader_flags & (SD_HAS_ONLY_VOLUME | SD_HAS_TRANSPARENT_SHADOW))) {
|
||||
const uint blocker_set = kernel_data_fetch(objects, current_isect.object).blocker_shadow_set;
|
||||
if (blocker_set == 0) {
|
||||
ray->tmax = current_isect.t;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
/* Lights past the maximum allowed transparency bounce do not contribute any light, so
|
||||
* consider them as fully blocked and only consider lights prior to this intersection. */
|
||||
if (shader_flags & SD_HAS_TRANSPARENT_SHADOW) {
|
||||
++transparent_bounce;
|
||||
if (transparent_bounce >= kernel_data.integrator.transparent_max_bounce) {
|
||||
ray->tmax = current_isect.t;
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
kernel_assert(shader_flags & SD_HAS_ONLY_VOLUME);
|
||||
++volume_bounce;
|
||||
if (volume_bounce >= kernel_data.integrator.max_volume_bounce) {
|
||||
ray->tmax = current_isect.t;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Move the ray forward. */
|
||||
|
@ -1 +1 @@
|
||||
Subproject commit 7181d6dccb9fe4184340f9f5b1c381f8089fe4ec
|
||||
Subproject commit 6864f1832e71a31e1e04f72bb7a5a1f53f0cd01c
|
Binary file not shown.
Binary file not shown.
@ -344,7 +344,7 @@ const bTheme U_theme_default = {
|
||||
.bone_locked_weight = RGBA(0xff000080),
|
||||
.cframe = RGBA(0x4772b3ff),
|
||||
.time_keyframe = RGBA(0xddd700ff),
|
||||
.time_gp_keyframe = RGBA(0xb5e61dff),
|
||||
.time_gp_keyframe = RGBA(0x7a9b14ff),
|
||||
.freestyle_edge_mark = RGBA(0x7fff7fff),
|
||||
.freestyle_face_mark = RGBA(0x7fff7f4d),
|
||||
.nurb_uline = RGBA(0x909000ff),
|
||||
|
@ -58,8 +58,8 @@ watch_check_mypy:
|
||||
done
|
||||
|
||||
check_ruff: FORCE
|
||||
@env --chdir="$(BASE_DIR)" ruff check $(PY_FILES_MYPY)
|
||||
@env --chdir="$(BASE_DIR)" ruff check $(PY_FILES_MYPY_STANDALONE)
|
||||
@cd "$(BASE_DIR)" && ruff check $(PY_FILES_MYPY)
|
||||
@cd "$(BASE_DIR)" && ruff check $(PY_FILES_MYPY_STANDALONE)
|
||||
watch_check_ruff:
|
||||
@cd "$(BASE_DIR)" && \
|
||||
while true; do \
|
||||
@ -71,7 +71,7 @@ watch_check_ruff:
|
||||
done
|
||||
|
||||
check_pylint:
|
||||
@env --chdir="$(BASE_DIR)" \
|
||||
@cd "$(BASE_DIR)" && \
|
||||
pylint $(PY_FILES) \
|
||||
--disable=C0111,C0301,C0302,C0103,C0415,R1705,R0902,R0903,R0913,E0611,E0401,I1101,R0801,C0209,W0511,W0718,W0719,C0413,R0911,R0912,R0914,R0915
|
||||
watch_check_pylint:
|
||||
@ -87,9 +87,9 @@ watch_check_pylint:
|
||||
# Tests (All)
|
||||
|
||||
test: FORCE
|
||||
@env --chdir="$(BASE_DIR)" $(MAKE) test_cli;
|
||||
@env --chdir="$(BASE_DIR)" $(MAKE) test_blender;
|
||||
@env --chdir="$(BASE_DIR)" $(MAKE) test_cli_blender;
|
||||
@$(MAKE) -C "$(BASE_DIR)" test_cli;
|
||||
@$(MAKE) -C "$(BASE_DIR)" test_blender;
|
||||
@$(MAKE) -C "$(BASE_DIR)" test_cli_blender;
|
||||
watch_test: FORCE
|
||||
@cd "$(BASE_DIR)" && \
|
||||
while true; do \
|
||||
@ -104,10 +104,10 @@ watch_test: FORCE
|
||||
|
||||
# python3 ./tests/test_cli.py
|
||||
test_cli: FORCE
|
||||
@env --chdir="$(BASE_DIR)" \
|
||||
@cd "$(BASE_DIR)" && \
|
||||
USE_HTTP=0 \
|
||||
$(PYTHON_BIN) ./tests/test_cli.py
|
||||
@env --chdir="$(BASE_DIR)" \
|
||||
@cd "$(BASE_DIR)" && \
|
||||
USE_HTTP=1 \
|
||||
$(PYTHON_BIN) ./tests/test_cli.py
|
||||
watch_test_cli: FORCE
|
||||
@ -121,9 +121,11 @@ watch_test_cli: FORCE
|
||||
|
||||
# NOTE: these rely on the blender binary.
|
||||
test_blender: FORCE
|
||||
@env --chdir="$(BASE_DIR)" \
|
||||
@cd "$(BASE_DIR)" && \
|
||||
ASAN_OPTIONS=check_initialization_order=0:leak_check_at_exit=0 \
|
||||
$(BLENDER_BIN) --background --factory-startup -noaudio --python ./tests/test_blender.py -- --verbose
|
||||
$(BLENDER_BIN) \
|
||||
--background --factory-startup --online-mode -noaudio \
|
||||
--python ./tests/test_blender.py -- --verbose
|
||||
watch_test_blender: FORCE
|
||||
@cd "$(BASE_DIR)" && \
|
||||
while true; do \
|
||||
@ -133,10 +135,12 @@ watch_test_blender: FORCE
|
||||
done
|
||||
|
||||
test_cli_blender: FORCE
|
||||
@env BLENDER_BIN=$(BLENDER_BIN) \
|
||||
@cd "$(BASE_DIR)" && \
|
||||
env BLENDER_BIN=$(BLENDER_BIN) \
|
||||
$(PYTHON_BIN) ./tests/test_cli_blender.py
|
||||
watch_test_cli_blender: FORCE
|
||||
@while true; do \
|
||||
@cd "$(BASE_DIR)" && \
|
||||
while true; do \
|
||||
env BLENDER_BIN=$(BLENDER_BIN) \
|
||||
$(MAKE) test_cli_blender; \
|
||||
inotifywait -q -e close_write $(EXTRA_WATCH_FILES) $(PY_FILES) ; \
|
||||
@ -144,7 +148,7 @@ watch_test_cli_blender: FORCE
|
||||
done
|
||||
|
||||
test_path_pattern_match: FORCE
|
||||
@env --chdir="$(BASE_DIR)" \
|
||||
@cd "$(BASE_DIR)" && \
|
||||
$(PYTHON_BIN) ./tests/test_path_pattern_match.py
|
||||
watch_test_path_pattern_match: FORCE
|
||||
@cd "$(BASE_DIR)" && \
|
||||
|
@ -136,59 +136,79 @@ use_repos_to_notify = False
|
||||
def repos_to_notify():
|
||||
import os
|
||||
from .bl_extension_utils import (
|
||||
repo_index_outdated,
|
||||
scandir_with_demoted_errors,
|
||||
PKG_MANIFEST_FILENAME_TOML,
|
||||
)
|
||||
|
||||
repos_notify = []
|
||||
if not bpy.app.background:
|
||||
# To use notifications on startup requires:
|
||||
# - The splash displayed.
|
||||
# - The status bar displayed.
|
||||
do_online_sync = False
|
||||
|
||||
if bpy.app.background:
|
||||
return repos_notify, do_online_sync
|
||||
|
||||
# To use notifications on startup requires:
|
||||
# - The splash displayed.
|
||||
# - The status bar displayed.
|
||||
#
|
||||
# Since it's not all that common to disable the status bar just run notifications
|
||||
# if any repositories are marked to run notifications.
|
||||
|
||||
prefs = bpy.context.preferences
|
||||
extension_repos = prefs.extensions.repos
|
||||
for repo_item in extension_repos:
|
||||
if not repo_item.enabled:
|
||||
continue
|
||||
if not repo_item.use_sync_on_startup:
|
||||
continue
|
||||
if not repo_item.use_remote_url:
|
||||
continue
|
||||
remote_url = repo_item.remote_url
|
||||
# Invalid, if there is no remote path this can't update.
|
||||
if not remote_url:
|
||||
continue
|
||||
|
||||
# WARNING: this could be a more expensive check, use a "reasonable" guess.
|
||||
# This is technically incorrect because knowing if a repository has any installed
|
||||
# packages requires reading it's meta-data and comparing it with the directory contents.
|
||||
# Chances are - if the directory contains *any* directories containing a package manifest
|
||||
# this means it has packages installed.
|
||||
#
|
||||
# Since it's not all that common to disable the status bar just run notifications
|
||||
# if any repositories are marked to run notifications.
|
||||
# Simply check the repositories directory isn't empty (ignoring dot-files).
|
||||
# Importantly, this may be false positives but *not* false negatives.
|
||||
repo_is_empty = True
|
||||
repo_directory = repo_item.directory
|
||||
if os.path.isdir(repo_directory):
|
||||
for entry in scandir_with_demoted_errors(repo_directory):
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
if entry.name.startswith("."):
|
||||
continue
|
||||
if not os.path.exists(os.path.join(entry.path, PKG_MANIFEST_FILENAME_TOML)):
|
||||
continue
|
||||
repo_is_empty = False
|
||||
break
|
||||
if repo_is_empty:
|
||||
continue
|
||||
|
||||
prefs = bpy.context.preferences
|
||||
extension_repos = prefs.extensions.repos
|
||||
for repo_item in extension_repos:
|
||||
if not repo_item.enabled:
|
||||
continue
|
||||
if not repo_item.use_sync_on_startup:
|
||||
continue
|
||||
if not repo_item.use_remote_url:
|
||||
continue
|
||||
remote_url = repo_item.remote_url
|
||||
# Invalid, if there is no remote path this can't update.
|
||||
if not remote_url:
|
||||
continue
|
||||
# NOTE: offline checks are handled by the notification (not here).
|
||||
repos_notify.append(
|
||||
bl_extension_ops.RepoItem(
|
||||
name=repo_item.name,
|
||||
directory=repo_directory,
|
||||
remote_url=remote_url,
|
||||
module=repo_item.module,
|
||||
use_cache=repo_item.use_cache,
|
||||
access_token=repo_item.access_token if repo_item.use_access_token else "",
|
||||
),
|
||||
)
|
||||
|
||||
# WARNING: this could be a more expensive check, use a "reasonable" guess.
|
||||
# This is technically incorrect because knowing if a repository has any installed
|
||||
# packages requires reading it's meta-data and comparing it with the directory contents.
|
||||
# Chances are - if the directory contains *any* directories containing a package manifest
|
||||
# this means it has packages installed.
|
||||
#
|
||||
# Simply check the repositories directory isn't empty (ignoring dot-files).
|
||||
# Importantly, this may be false positives but *not* false negatives.
|
||||
repo_is_empty = True
|
||||
repo_directory = repo_item.directory
|
||||
if os.path.isdir(repo_directory):
|
||||
for entry in scandir_with_demoted_errors(repo_directory):
|
||||
if not entry.is_dir():
|
||||
continue
|
||||
if entry.name.startswith("."):
|
||||
continue
|
||||
if not os.path.exists(os.path.join(entry.path, PKG_MANIFEST_FILENAME_TOML)):
|
||||
continue
|
||||
repo_is_empty = False
|
||||
break
|
||||
if repo_is_empty:
|
||||
continue
|
||||
# Update all repos together or none, to avoid bothering users
|
||||
# multiple times in a day.
|
||||
if repo_index_outdated(repo_item.directory):
|
||||
do_online_sync = True
|
||||
|
||||
# NOTE: offline checks are handled by the notification (not here).
|
||||
repos_notify.append(repo_item)
|
||||
return repos_notify
|
||||
return repos_notify, do_online_sync
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
@ -509,10 +529,12 @@ def register():
|
||||
cli_commands.append(bpy.utils.register_cli_command("extension", cli_extension))
|
||||
|
||||
global use_repos_to_notify
|
||||
if (repos_notify := repos_to_notify()):
|
||||
repos_notify, do_online_sync = repos_to_notify()
|
||||
if repos_notify:
|
||||
use_repos_to_notify = True
|
||||
from . import bl_extension_notify
|
||||
bl_extension_notify.register(repos_notify)
|
||||
bl_extension_notify.register()
|
||||
bl_extension_notify.update_non_blocking(repos=repos_notify, do_online_sync=do_online_sync)
|
||||
del repos_notify
|
||||
|
||||
monkeypatch_install()
|
||||
|
@ -2,11 +2,15 @@
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
"""
|
||||
Startup notifications.
|
||||
"""
|
||||
# Notifications used by:
|
||||
# - The splash screen on startup.
|
||||
# - The preferences when checking first displaying the extensions view.
|
||||
|
||||
__all__ = (
|
||||
"update_non_blocking",
|
||||
"update_in_progress",
|
||||
"update_ui_text",
|
||||
|
||||
"register",
|
||||
"unregister",
|
||||
)
|
||||
@ -74,6 +78,26 @@ def sync_status_count_outdated_extensions(repos_notify):
|
||||
#
|
||||
# This is a black-box which handled running the updates, yielding status text.
|
||||
|
||||
def sync_calc_stale_repo_directories(repos_notify):
|
||||
# Check for the unlikely event that the state of repositories has changed since checking for updated began.
|
||||
# Do this by checking for directories since renaming or even disabling a repository need not prevent the
|
||||
# listing from being updated. Only detect changes to the (directory + URL) which define the source/destination.
|
||||
repo_state_from_prefs = set(
|
||||
(repo.directory, repo.remote_url)
|
||||
for repo in bpy.context.preferences.extensions.repos
|
||||
)
|
||||
repo_state_from_notify = set(
|
||||
(repo.directory, repo.remote_url)
|
||||
for repo in repos_notify
|
||||
)
|
||||
|
||||
repo_directories_skip = set()
|
||||
for directory, _remote_url in (repo_state_from_notify - repo_state_from_prefs):
|
||||
repo_directories_skip.add(directory)
|
||||
|
||||
return repo_directories_skip
|
||||
|
||||
|
||||
def sync_apply_locked(repos_notify, repos_notify_files, unique_ext):
|
||||
"""
|
||||
Move files with a unique extension to their final location
|
||||
@ -93,7 +117,11 @@ def sync_apply_locked(repos_notify, repos_notify_files, unique_ext):
|
||||
# Blender and even then the user would need to be *lucky*.
|
||||
from . import cookie_from_session
|
||||
|
||||
repo_directories_stale = sync_calc_stale_repo_directories(repos_notify)
|
||||
|
||||
any_lock_errors = False
|
||||
any_stale_errors = False
|
||||
|
||||
repo_directories = [repo_item.directory for repo_item in repos_notify]
|
||||
with bl_extension_utils.RepoLockContext(
|
||||
repo_directories=repo_directories,
|
||||
@ -103,9 +131,18 @@ def sync_apply_locked(repos_notify, repos_notify_files, unique_ext):
|
||||
repo_files = [os.path.join(directory, filepath_rel) for filepath_rel in repo_files]
|
||||
|
||||
# If locking failed, remove the temporary files that were written to.
|
||||
if (lock_result_for_repo := lock_result[directory]) is not None:
|
||||
has_error = False
|
||||
if directory in repo_directories_stale:
|
||||
# Unlikely but possible repositories change or are removed after check starts.
|
||||
sys.stderr.write("Warning \"{:s}\" has changed or been removed (skipping)\n".format(directory))
|
||||
any_stale_errors = True
|
||||
has_error = True
|
||||
elif (lock_result_for_repo := lock_result[directory]) is not None:
|
||||
sys.stderr.write("Warning \"{:s}\" locking \"{:s}\"\n".format(lock_result_for_repo, directory))
|
||||
any_lock_errors = True
|
||||
has_error = True
|
||||
|
||||
if has_error:
|
||||
for filepath in repo_files:
|
||||
# Don't check this exists as it always should, showing an error if it doesn't is fine.
|
||||
try:
|
||||
@ -130,10 +167,11 @@ def sync_apply_locked(repos_notify, repos_notify_files, unique_ext):
|
||||
except Exception as ex:
|
||||
sys.stderr.write("Failed to rename file: {:s}\n".format(str(ex)))
|
||||
|
||||
return any_lock_errors
|
||||
return any_lock_errors, any_stale_errors
|
||||
|
||||
|
||||
def sync_status_generator(repos_notify):
|
||||
def sync_status_generator(repos_notify, do_online_sync):
|
||||
import atexit
|
||||
|
||||
# Generator results...
|
||||
# -> None: do nothing.
|
||||
@ -168,26 +206,22 @@ def sync_status_generator(repos_notify):
|
||||
cmd_batch_partial.append(partial(
|
||||
bl_extension_utils.repo_sync,
|
||||
directory=repo_item.directory,
|
||||
remote_url=repo_item.remote_url,
|
||||
remote_name=repo_item.name,
|
||||
remote_url=bl_extension_ops.url_params_append_defaults(repo_item.remote_url),
|
||||
online_user_agent=bl_extension_ops.online_user_agent_from_blender(),
|
||||
access_token=repo_item.access_token,
|
||||
# Never sleep while there is no input, as this blocks Blender.
|
||||
use_idle=False,
|
||||
# Needed so the user can exit blender without warnings about a broken pipe.
|
||||
# TODO: write to a temporary location, once done:
|
||||
# There is no chance of corrupt data as the data isn't written directly to the target JSON.
|
||||
force_exit_ok=not USE_GRACEFUL_EXIT,
|
||||
dry_run=not do_online_sync,
|
||||
extension_override=unique_ext,
|
||||
))
|
||||
|
||||
yield None
|
||||
|
||||
# repos_lock = [repo_item.directory for repo_item in self.repos_notify]
|
||||
|
||||
# Lock repositories.
|
||||
# self.repo_lock = bl_extension_utils.RepoLock(repo_directories=repos_lock, cookie=cookie_from_session())
|
||||
|
||||
import atexit
|
||||
|
||||
cmd_batch = None
|
||||
|
||||
def cmd_force_quit():
|
||||
@ -260,10 +294,17 @@ def sync_status_generator(repos_notify):
|
||||
if command_result.status_data_changed:
|
||||
extra_warnings = []
|
||||
if command_result.all_complete:
|
||||
any_lock_errors = sync_apply_locked(repos_notify, repos_notify_files, unique_ext)
|
||||
|
||||
# ################### #
|
||||
# Finalize The Update #
|
||||
# ################### #
|
||||
any_lock_errors, any_stale_errors = sync_apply_locked(repos_notify, repos_notify_files, unique_ext)
|
||||
update_total = sync_status_count_outdated_extensions(repos_notify)
|
||||
if any_lock_errors:
|
||||
extra_warnings.append(" Failed to acquire lock!")
|
||||
if any_stale_errors:
|
||||
extra_warnings.append(" Unexpected change in repository!")
|
||||
|
||||
if any_offline:
|
||||
extra_warnings.append(" Skipping online repositories!")
|
||||
yield (cmd_batch.calc_status_data(), update_total, extra_warnings)
|
||||
@ -275,16 +316,8 @@ def sync_status_generator(repos_notify):
|
||||
|
||||
atexit.unregister(cmd_force_quit)
|
||||
|
||||
# ################### #
|
||||
# Finalize The Update #
|
||||
# ################### #
|
||||
|
||||
yield None
|
||||
|
||||
# Unlock repositories.
|
||||
# lock_result_any_failed_with_report(self, self.repo_lock.release(), report_type='WARNING')
|
||||
# self.repo_lock = None
|
||||
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Private API
|
||||
@ -294,31 +327,65 @@ TIME_WAIT_INIT = 0.05
|
||||
# The time between calling the timer.
|
||||
TIME_WAIT_STEP = 0.1
|
||||
|
||||
state_text = (
|
||||
"Checking for updates...",
|
||||
)
|
||||
|
||||
|
||||
class NotifyHandle:
|
||||
__slots__ = (
|
||||
"splash_region",
|
||||
"state",
|
||||
|
||||
"sync_generator",
|
||||
"sync_info",
|
||||
"do_online_sync",
|
||||
|
||||
"_repos",
|
||||
"is_complete",
|
||||
"_sync_generator",
|
||||
)
|
||||
|
||||
def __init__(self, repos_notify):
|
||||
def __init__(self, repos_notify, do_online_sync):
|
||||
self.splash_region = None
|
||||
self.state = 0
|
||||
# We could start the generator separately, this seems OK here for now.
|
||||
self.sync_generator = iter(sync_status_generator(repos_notify))
|
||||
self._repos = repos_notify
|
||||
self._sync_generator = None
|
||||
self.is_complete = False
|
||||
# status_data, update_count, extra_warnings.
|
||||
self.sync_info = None
|
||||
self.do_online_sync = do_online_sync
|
||||
|
||||
def run(self):
|
||||
assert self._sync_generator is None
|
||||
self._sync_generator = iter(sync_status_generator(self._repos, self.do_online_sync))
|
||||
|
||||
def run_ensure(self):
|
||||
if self.is_running():
|
||||
return
|
||||
self.run()
|
||||
|
||||
def run_step(self):
|
||||
assert self._sync_generator is not None
|
||||
sync_info = next(self._sync_generator, ...)
|
||||
if sync_info is ...:
|
||||
self.is_complete = True
|
||||
if isinstance(sync_info, tuple):
|
||||
self.sync_info = sync_info
|
||||
return sync_info
|
||||
|
||||
def is_running(self):
|
||||
return self._sync_generator is not None
|
||||
|
||||
def ui_text(self):
|
||||
if self.sync_info is None:
|
||||
return "Checking for Extension Updates", 'NONE', 0
|
||||
status_data, update_count, extra_warnings = self.sync_info
|
||||
do_online_sync = self.do_online_sync
|
||||
text, icon = bl_extension_utils.CommandBatch.calc_status_text_icon_from_data(
|
||||
status_data, update_count, do_online_sync,
|
||||
)
|
||||
# Not more than 1-2 of these (failed to lock, some repositories offline .. etc).
|
||||
for warning in extra_warnings:
|
||||
text = text + warning
|
||||
return text, icon, update_count
|
||||
|
||||
|
||||
# When non-null, the timer is running.
|
||||
_notify = None
|
||||
# A list of `NotifyHandle`, only the first item is allowed to be running.
|
||||
_notify_queue = []
|
||||
|
||||
|
||||
def _region_exists(region):
|
||||
@ -334,54 +401,74 @@ def _region_exists(region):
|
||||
return exists
|
||||
|
||||
|
||||
def _ui_refresh_apply(*, notify):
|
||||
if notify.splash_region is not None:
|
||||
# Check if the splash_region is valid.
|
||||
if not _region_exists(notify.splash_region):
|
||||
notify.splash_region = None
|
||||
return None
|
||||
notify.splash_region.tag_redraw()
|
||||
notify.splash_region.tag_refresh_ui()
|
||||
|
||||
# Ensure the preferences are redrawn when the update is complete.
|
||||
if bpy.context.preferences.active_section == 'EXTENSIONS':
|
||||
for wm in bpy.data.window_managers:
|
||||
for win in wm.windows:
|
||||
for area in win.screen.areas:
|
||||
if area.type != 'PREFERENCES':
|
||||
continue
|
||||
for region in area.regions:
|
||||
if region.type != 'WINDOW':
|
||||
continue
|
||||
region.tag_redraw()
|
||||
|
||||
|
||||
def _ui_refresh_timer():
|
||||
if _notify is None:
|
||||
if not _notify_queue:
|
||||
return None
|
||||
|
||||
notify = _notify_queue[0]
|
||||
notify.run_ensure()
|
||||
|
||||
default_wait = TIME_WAIT_STEP
|
||||
|
||||
sync_info = next(_notify.sync_generator, ...)
|
||||
# If the generator exited, early exit here.
|
||||
if notify.is_complete:
|
||||
sync_info = ...
|
||||
else:
|
||||
sync_info = notify.run_step()
|
||||
if sync_info is None:
|
||||
# Nothing changed, no action is needed (waiting for a response).
|
||||
return default_wait
|
||||
|
||||
# If the generator exited, either step to the next action or early exit here.
|
||||
if sync_info is ...:
|
||||
return None
|
||||
if sync_info is None:
|
||||
# Nothing changed, no action is needed (waiting for a response).
|
||||
_ui_refresh_apply(notify=notify)
|
||||
if len(_notify_queue) <= 1:
|
||||
# Keep the item because the text should remain displayed for the splash.
|
||||
return None
|
||||
# Move onto the next item.
|
||||
del _notify_queue[0]
|
||||
return default_wait
|
||||
|
||||
# Re-display.
|
||||