Merge branch 'blender-v3.0-release'
This commit is contained in:
@@ -233,6 +233,7 @@ def list_render_passes(scene, srl):
|
||||
if crl.denoising_store_passes:
|
||||
yield ("Denoising Normal", "XYZ", 'VECTOR')
|
||||
yield ("Denoising Albedo", "RGB", 'COLOR')
|
||||
yield ("Denoising Depth", "Z", 'VALUE')
|
||||
|
||||
# Custom AOV passes.
|
||||
for aov in srl.aovs:
|
||||
|
@@ -1360,7 +1360,7 @@ class CyclesPreferences(bpy.types.AddonPreferences):
|
||||
elif entry.type == 'CPU':
|
||||
cpu_devices.append(entry)
|
||||
# Extend all GPU devices with CPU.
|
||||
if compute_device_type != 'CPU':
|
||||
if len(devices) and compute_device_type != 'CPU':
|
||||
devices.extend(cpu_devices)
|
||||
return devices
|
||||
|
||||
@@ -1378,12 +1378,18 @@ class CyclesPreferences(bpy.types.AddonPreferences):
|
||||
self.refresh_devices()
|
||||
return None
|
||||
|
||||
def get_compute_device_type(self):
|
||||
if self.compute_device_type == '':
|
||||
return 'NONE'
|
||||
return self.compute_device_type
|
||||
|
||||
def get_num_gpu_devices(self):
|
||||
import _cycles
|
||||
device_list = _cycles.available_devices(self.compute_device_type)
|
||||
compute_device_type = self.get_compute_device_type()
|
||||
device_list = _cycles.available_devices(compute_device_type)
|
||||
num = 0
|
||||
for device in device_list:
|
||||
if device[1] != self.compute_device_type:
|
||||
if device[1] != compute_device_type:
|
||||
continue
|
||||
for dev in self.devices:
|
||||
if dev.use and dev.id == device[2]:
|
||||
@@ -1425,15 +1431,16 @@ class CyclesPreferences(bpy.types.AddonPreferences):
|
||||
row = layout.row()
|
||||
row.prop(self, "compute_device_type", expand=True)
|
||||
|
||||
if self.compute_device_type == 'NONE':
|
||||
compute_device_type = self.get_compute_device_type()
|
||||
if compute_device_type == 'NONE':
|
||||
return
|
||||
row = layout.row()
|
||||
devices = self.get_devices_for_type(self.compute_device_type)
|
||||
self._draw_devices(row, self.compute_device_type, devices)
|
||||
devices = self.get_devices_for_type(compute_device_type)
|
||||
self._draw_devices(row, compute_device_type, devices)
|
||||
|
||||
import _cycles
|
||||
has_peer_memory = 0
|
||||
for device in _cycles.available_devices(self.compute_device_type):
|
||||
for device in _cycles.available_devices(compute_device_type):
|
||||
if device[3] and self.find_existing_device_entry(device).use:
|
||||
has_peer_memory += 1
|
||||
if has_peer_memory > 1:
|
||||
|
@@ -86,7 +86,7 @@ def do_versions(self):
|
||||
# Device might not currently be available so this can fail
|
||||
try:
|
||||
if system.legacy_compute_device_type == 1:
|
||||
prop.compute_device_type = 'OPENCL'
|
||||
prop.compute_device_type = 'NONE' # Was OpenCL
|
||||
elif system.legacy_compute_device_type == 2:
|
||||
prop.compute_device_type = 'CUDA'
|
||||
else:
|
||||
@@ -97,6 +97,12 @@ def do_versions(self):
|
||||
# Init device list for UI
|
||||
prop.get_devices(prop.compute_device_type)
|
||||
|
||||
if bpy.context.preferences.version <= (3, 0, 40):
|
||||
# Disable OpenCL device
|
||||
prop = bpy.context.preferences.addons[__package__].preferences
|
||||
if prop['compute_device_type'] == 4:
|
||||
prop.compute_device_type = 'NONE'
|
||||
|
||||
# We don't modify startup file because it assumes to
|
||||
# have all the default values only.
|
||||
if not bpy.data.is_saved:
|
||||
|
@@ -639,7 +639,7 @@ void BlenderSync::sync_camera_motion(
|
||||
/* TODO(sergey): De-duplicate calculation with camera sync. */
|
||||
float fov = 2.0f * atanf((0.5f * sensor_size) / bcam.lens / aspectratio);
|
||||
if (fov != cam->get_fov()) {
|
||||
VLOG(1) << "Camera " << b_ob.name() << " FOV change detected.";
|
||||
VLOG(3) << "Camera " << b_ob.name() << " FOV change detected.";
|
||||
if (motion_time == 0.0f) {
|
||||
cam->set_fov(fov);
|
||||
}
|
||||
|
@@ -304,10 +304,6 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
|
||||
}
|
||||
}
|
||||
|
||||
if (num_curves > 0) {
|
||||
VLOG(1) << "Exporting curve segments for mesh " << hair->name;
|
||||
}
|
||||
|
||||
hair->reserve_curves(hair->num_curves() + num_curves, hair->get_curve_keys().size() + num_keys);
|
||||
|
||||
num_keys = 0;
|
||||
@@ -356,7 +352,7 @@ static void ExportCurveSegments(Scene *scene, Hair *hair, ParticleCurveData *CDa
|
||||
|
||||
/* check allocation */
|
||||
if ((hair->get_curve_keys().size() != num_keys) || (hair->num_curves() != num_curves)) {
|
||||
VLOG(1) << "Allocation failed, clearing data";
|
||||
VLOG(1) << "Hair memory allocation failed, clearing data.";
|
||||
hair->clear(true);
|
||||
}
|
||||
}
|
||||
@@ -412,16 +408,11 @@ static void export_hair_motion_validate_attribute(Hair *hair,
|
||||
if (num_motion_keys != num_keys || !have_motion) {
|
||||
/* No motion or hair "topology" changed, remove attributes again. */
|
||||
if (num_motion_keys != num_keys) {
|
||||
VLOG(1) << "Hair topology changed, removing attribute.";
|
||||
}
|
||||
else {
|
||||
VLOG(1) << "No motion, removing attribute.";
|
||||
VLOG(1) << "Hair topology changed, removing motion attribute.";
|
||||
}
|
||||
hair->attributes.remove(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
}
|
||||
else if (motion_step > 0) {
|
||||
VLOG(1) << "Filling in new motion vertex position for motion_step " << motion_step;
|
||||
|
||||
/* Motion, fill up previous steps that we might have skipped because
|
||||
* they had no motion, but we need them anyway now. */
|
||||
for (int step = 0; step < motion_step; step++) {
|
||||
@@ -437,16 +428,12 @@ static void export_hair_motion_validate_attribute(Hair *hair,
|
||||
|
||||
static void ExportCurveSegmentsMotion(Hair *hair, ParticleCurveData *CData, int motion_step)
|
||||
{
|
||||
VLOG(1) << "Exporting curve motion segments for hair " << hair->name << ", motion step "
|
||||
<< motion_step;
|
||||
|
||||
/* find attribute */
|
||||
Attribute *attr_mP = hair->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
bool new_attribute = false;
|
||||
|
||||
/* add new attribute if it doesn't exist already */
|
||||
if (!attr_mP) {
|
||||
VLOG(1) << "Creating new motion vertex position attribute";
|
||||
attr_mP = hair->attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
new_attribute = true;
|
||||
}
|
||||
@@ -682,10 +669,6 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Hair b_hair)
|
||||
const int num_keys = b_hair.points.length();
|
||||
const int num_curves = b_hair.curves.length();
|
||||
|
||||
if (num_curves > 0) {
|
||||
VLOG(1) << "Exporting curve segments for hair " << hair->name;
|
||||
}
|
||||
|
||||
hair->reserve_curves(num_curves, num_keys);
|
||||
|
||||
/* Export curves and points. */
|
||||
@@ -743,15 +726,11 @@ static void export_hair_curves(Scene *scene, Hair *hair, BL::Hair b_hair)
|
||||
|
||||
static void export_hair_curves_motion(Hair *hair, BL::Hair b_hair, int motion_step)
|
||||
{
|
||||
VLOG(1) << "Exporting curve motion segments for hair " << hair->name << ", motion step "
|
||||
<< motion_step;
|
||||
|
||||
/* Find or add attribute. */
|
||||
Attribute *attr_mP = hair->attributes.find(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
bool new_attribute = false;
|
||||
|
||||
if (!attr_mP) {
|
||||
VLOG(1) << "Creating new motion vertex position attribute";
|
||||
attr_mP = hair->attributes.add(ATTR_STD_MOTION_VERTEX_POSITION);
|
||||
new_attribute = true;
|
||||
}
|
||||
|
@@ -157,8 +157,6 @@ static PyObject *init_func(PyObject * /*self*/, PyObject *args)
|
||||
|
||||
DebugFlags().running_inside_blender = true;
|
||||
|
||||
VLOG(2) << "Debug flags initialized to:\n" << DebugFlags();
|
||||
|
||||
Py_RETURN_NONE;
|
||||
}
|
||||
|
||||
@@ -885,8 +883,6 @@ static PyObject *debug_flags_update_func(PyObject * /*self*/, PyObject *args)
|
||||
|
||||
debug_flags_sync_from_scene(b_scene);
|
||||
|
||||
VLOG(2) << "Debug flags set to:\n" << DebugFlags();
|
||||
|
||||
debug_flags_set = true;
|
||||
|
||||
Py_RETURN_NONE;
|
||||
@@ -896,7 +892,6 @@ static PyObject *debug_flags_reset_func(PyObject * /*self*/, PyObject * /*args*/
|
||||
{
|
||||
debug_flags_reset();
|
||||
if (debug_flags_set) {
|
||||
VLOG(2) << "Debug flags reset to:\n" << DebugFlags();
|
||||
debug_flags_set = false;
|
||||
}
|
||||
Py_RETURN_NONE;
|
||||
|
@@ -366,7 +366,9 @@ void BlenderSync::sync_integrator(BL::ViewLayer &b_view_layer, bool background)
|
||||
if ((preview && !preview_scrambling_distance) || use_adaptive_sampling)
|
||||
scrambling_distance = 1.0f;
|
||||
|
||||
VLOG(1) << "Used Scrambling Distance: " << scrambling_distance;
|
||||
if (scrambling_distance != 1.0f) {
|
||||
VLOG(3) << "Using scrambling distance: " << scrambling_distance;
|
||||
}
|
||||
integrator->set_scrambling_distance(scrambling_distance);
|
||||
|
||||
if (get_boolean(cscene, "use_fast_gi")) {
|
||||
|
@@ -68,8 +68,7 @@ CPUDevice::CPUDevice(const DeviceInfo &info_, Stats &stats_, Profiler &profiler_
|
||||
{
|
||||
/* Pick any kernel, all of them are supposed to have same level of microarchitecture
|
||||
* optimization. */
|
||||
VLOG(1) << "Will be using " << kernels.integrator_init_from_camera.get_uarch_name()
|
||||
<< " kernels.";
|
||||
VLOG(1) << "Using " << kernels.integrator_init_from_camera.get_uarch_name() << " CPU kernels.";
|
||||
|
||||
if (info.cpu_threads == 0) {
|
||||
info.cpu_threads = TaskScheduler::num_threads();
|
||||
|
@@ -378,7 +378,9 @@ string CUDADevice::compile_kernel(const uint kernel_features,
|
||||
cubin.c_str(),
|
||||
common_cflags.c_str());
|
||||
|
||||
printf("Compiling CUDA kernel ...\n%s\n", command.c_str());
|
||||
printf("Compiling %sCUDA kernel ...\n%s\n",
|
||||
(use_adaptive_compilation()) ? "adaptive " : "",
|
||||
command.c_str());
|
||||
|
||||
# ifdef _WIN32
|
||||
command = "call " + command;
|
||||
@@ -405,13 +407,15 @@ string CUDADevice::compile_kernel(const uint kernel_features,
|
||||
|
||||
bool CUDADevice::load_kernels(const uint kernel_features)
|
||||
{
|
||||
/* TODO(sergey): Support kernels re-load for CUDA devices.
|
||||
/* TODO(sergey): Support kernels re-load for CUDA devices adaptive compile.
|
||||
*
|
||||
* Currently re-loading kernel will invalidate memory pointers,
|
||||
* causing problems in cuCtxSynchronize.
|
||||
*/
|
||||
if (cuModule) {
|
||||
VLOG(1) << "Skipping kernel reload, not currently supported.";
|
||||
if (use_adaptive_compilation()) {
|
||||
VLOG(1) << "Skipping CUDA kernel reload for adaptive compilation, not currently supported.";
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@@ -360,7 +360,9 @@ string HIPDevice::compile_kernel(const uint kernel_features,
|
||||
source_path.c_str(),
|
||||
fatbin.c_str());
|
||||
|
||||
printf("Compiling HIP kernel ...\n%s\n", command.c_str());
|
||||
printf("Compiling %sHIP kernel ...\n%s\n",
|
||||
(use_adaptive_compilation()) ? "adaptive " : "",
|
||||
command.c_str());
|
||||
|
||||
# ifdef _WIN32
|
||||
command = "call " + command;
|
||||
@@ -387,13 +389,15 @@ string HIPDevice::compile_kernel(const uint kernel_features,
|
||||
|
||||
bool HIPDevice::load_kernels(const uint kernel_features)
|
||||
{
|
||||
/* TODO(sergey): Support kernels re-load for HIP devices.
|
||||
/* TODO(sergey): Support kernels re-load for CUDA devices adaptive compile.
|
||||
*
|
||||
* Currently re-loading kernel will invalidate memory pointers,
|
||||
* causing problems in hipCtxSynchronize.
|
||||
* causing problems in cuCtxSynchronize.
|
||||
*/
|
||||
if (hipModule) {
|
||||
VLOG(1) << "Skipping kernel reload, not currently supported.";
|
||||
if (use_adaptive_compilation()) {
|
||||
VLOG(1) << "Skipping HIP kernel reload for adaptive compilation, not currently supported.";
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@@ -91,6 +91,7 @@ OptiXDevice::OptiXDevice(const DeviceInfo &info, Stats &stats, Profiler &profile
|
||||
};
|
||||
# endif
|
||||
if (DebugFlags().optix.use_debug) {
|
||||
VLOG(1) << "Using OptiX debug mode.";
|
||||
options.validationMode = OPTIX_DEVICE_CONTEXT_VALIDATION_MODE_ALL;
|
||||
}
|
||||
optix_assert(optixDeviceContextCreate(cuContext, &options, &context));
|
||||
|
@@ -29,23 +29,11 @@ unique_ptr<Denoiser> Denoiser::create(Device *path_trace_device, const DenoisePa
|
||||
{
|
||||
DCHECK(params.use);
|
||||
|
||||
switch (params.type) {
|
||||
case DENOISER_OPTIX:
|
||||
return make_unique<OptiXDenoiser>(path_trace_device, params);
|
||||
|
||||
case DENOISER_OPENIMAGEDENOISE:
|
||||
return make_unique<OIDNDenoiser>(path_trace_device, params);
|
||||
|
||||
case DENOISER_NUM:
|
||||
case DENOISER_NONE:
|
||||
case DENOISER_ALL:
|
||||
/* pass */
|
||||
break;
|
||||
if (params.type == DENOISER_OPTIX && Device::available_devices(DEVICE_MASK_OPTIX).size()) {
|
||||
return make_unique<OptiXDenoiser>(path_trace_device, params);
|
||||
}
|
||||
|
||||
LOG(FATAL) << "Unhandled denoiser type " << params.type << ", should never happen.";
|
||||
|
||||
return nullptr;
|
||||
return make_unique<OIDNDenoiser>(path_trace_device, params);
|
||||
}
|
||||
|
||||
Denoiser::Denoiser(Device *path_trace_device, const DenoiseParams ¶ms)
|
||||
|
@@ -807,10 +807,10 @@ bool PathTraceWorkGPU::should_use_graphics_interop()
|
||||
interop_use_ = device->should_use_graphics_interop();
|
||||
|
||||
if (interop_use_) {
|
||||
VLOG(2) << "Will be using graphics interop GPU display update.";
|
||||
VLOG(2) << "Using graphics interop GPU display update.";
|
||||
}
|
||||
else {
|
||||
VLOG(2) << "Will be using naive GPU display update.";
|
||||
VLOG(2) << "Using naive GPU display update.";
|
||||
}
|
||||
|
||||
interop_use_checked_ = true;
|
||||
|
@@ -20,7 +20,7 @@ KERNEL_STRUCT_BEGIN(shadow_path)
|
||||
/* Index of a pixel within the device render buffer. */
|
||||
KERNEL_STRUCT_MEMBER(shadow_path, uint32_t, render_pixel_index, KERNEL_FEATURE_PATH_TRACING)
|
||||
/* Current sample number. */
|
||||
KERNEL_STRUCT_MEMBER(shadow_path, uint16_t, sample, KERNEL_FEATURE_PATH_TRACING)
|
||||
KERNEL_STRUCT_MEMBER(shadow_path, uint32_t, sample, KERNEL_FEATURE_PATH_TRACING)
|
||||
/* Random number generator seed. */
|
||||
KERNEL_STRUCT_MEMBER(shadow_path, uint32_t, rng_hash, KERNEL_FEATURE_PATH_TRACING)
|
||||
/* Random number dimension offset. */
|
||||
|
@@ -25,7 +25,7 @@ KERNEL_STRUCT_BEGIN(path)
|
||||
* The multiplication is delayed for later, so that state can use 32bit integer. */
|
||||
KERNEL_STRUCT_MEMBER(path, uint32_t, render_pixel_index, KERNEL_FEATURE_PATH_TRACING)
|
||||
/* Current sample number. */
|
||||
KERNEL_STRUCT_MEMBER(path, uint16_t, sample, KERNEL_FEATURE_PATH_TRACING)
|
||||
KERNEL_STRUCT_MEMBER(path, uint32_t, sample, KERNEL_FEATURE_PATH_TRACING)
|
||||
/* Current ray bounce depth. */
|
||||
KERNEL_STRUCT_MEMBER(path, uint16_t, bounce, KERNEL_FEATURE_PATH_TRACING)
|
||||
/* Current transparent ray bounce depth. */
|
||||
|
@@ -43,7 +43,7 @@ bool ConstantFolder::all_inputs_constant() const
|
||||
|
||||
void ConstantFolder::make_constant(float value) const
|
||||
{
|
||||
VLOG(1) << "Folding " << node->name << "::" << output->name() << " to constant (" << value
|
||||
VLOG(3) << "Folding " << node->name << "::" << output->name() << " to constant (" << value
|
||||
<< ").";
|
||||
|
||||
foreach (ShaderInput *sock, output->links) {
|
||||
@@ -56,7 +56,7 @@ void ConstantFolder::make_constant(float value) const
|
||||
|
||||
void ConstantFolder::make_constant(float3 value) const
|
||||
{
|
||||
VLOG(1) << "Folding " << node->name << "::" << output->name() << " to constant " << value << ".";
|
||||
VLOG(3) << "Folding " << node->name << "::" << output->name() << " to constant " << value << ".";
|
||||
|
||||
foreach (ShaderInput *sock, output->links) {
|
||||
sock->set(value);
|
||||
@@ -112,7 +112,7 @@ void ConstantFolder::bypass(ShaderOutput *new_output) const
|
||||
{
|
||||
assert(new_output);
|
||||
|
||||
VLOG(1) << "Folding " << node->name << "::" << output->name() << " to socket "
|
||||
VLOG(3) << "Folding " << node->name << "::" << output->name() << " to socket "
|
||||
<< new_output->parent->name << "::" << new_output->name() << ".";
|
||||
|
||||
/* Remove all outgoing links from socket and connect them to new_output instead.
|
||||
@@ -131,7 +131,7 @@ void ConstantFolder::discard() const
|
||||
{
|
||||
assert(output->type() == SocketType::CLOSURE);
|
||||
|
||||
VLOG(1) << "Discarding closure " << node->name << ".";
|
||||
VLOG(3) << "Discarding closure " << node->name << ".";
|
||||
|
||||
graph->disconnect(output);
|
||||
}
|
||||
|
@@ -2397,7 +2397,7 @@ void GlossyBsdfNode::simplify_settings(Scene *scene)
|
||||
* Note: Keep the epsilon in sync with kernel!
|
||||
*/
|
||||
if (!roughness_input->link && roughness <= 1e-4f) {
|
||||
VLOG(1) << "Using sharp glossy BSDF.";
|
||||
VLOG(3) << "Using sharp glossy BSDF.";
|
||||
distribution = CLOSURE_BSDF_REFLECTION_ID;
|
||||
}
|
||||
}
|
||||
@@ -2406,7 +2406,7 @@ void GlossyBsdfNode::simplify_settings(Scene *scene)
|
||||
* benefit from closure blur to remove unwanted noise.
|
||||
*/
|
||||
if (roughness_input->link == NULL && distribution == CLOSURE_BSDF_REFLECTION_ID) {
|
||||
VLOG(1) << "Using GGX glossy with filter glossy.";
|
||||
VLOG(3) << "Using GGX glossy with filter glossy.";
|
||||
distribution = CLOSURE_BSDF_MICROFACET_GGX_ID;
|
||||
roughness = 0.0f;
|
||||
}
|
||||
@@ -2490,7 +2490,7 @@ void GlassBsdfNode::simplify_settings(Scene *scene)
|
||||
* Note: Keep the epsilon in sync with kernel!
|
||||
*/
|
||||
if (!roughness_input->link && roughness <= 1e-4f) {
|
||||
VLOG(1) << "Using sharp glass BSDF.";
|
||||
VLOG(3) << "Using sharp glass BSDF.";
|
||||
distribution = CLOSURE_BSDF_SHARP_GLASS_ID;
|
||||
}
|
||||
}
|
||||
@@ -2499,7 +2499,7 @@ void GlassBsdfNode::simplify_settings(Scene *scene)
|
||||
* benefit from closure blur to remove unwanted noise.
|
||||
*/
|
||||
if (roughness_input->link == NULL && distribution == CLOSURE_BSDF_SHARP_GLASS_ID) {
|
||||
VLOG(1) << "Using GGX glass with filter glossy.";
|
||||
VLOG(3) << "Using GGX glass with filter glossy.";
|
||||
distribution = CLOSURE_BSDF_MICROFACET_GGX_GLASS_ID;
|
||||
roughness = 0.0f;
|
||||
}
|
||||
@@ -2583,7 +2583,7 @@ void RefractionBsdfNode::simplify_settings(Scene *scene)
|
||||
* Note: Keep the epsilon in sync with kernel!
|
||||
*/
|
||||
if (!roughness_input->link && roughness <= 1e-4f) {
|
||||
VLOG(1) << "Using sharp refraction BSDF.";
|
||||
VLOG(3) << "Using sharp refraction BSDF.";
|
||||
distribution = CLOSURE_BSDF_REFRACTION_ID;
|
||||
}
|
||||
}
|
||||
@@ -2592,7 +2592,7 @@ void RefractionBsdfNode::simplify_settings(Scene *scene)
|
||||
* benefit from closure blur to remove unwanted noise.
|
||||
*/
|
||||
if (roughness_input->link == NULL && distribution == CLOSURE_BSDF_REFRACTION_ID) {
|
||||
VLOG(1) << "Using GGX refraction with filter glossy.";
|
||||
VLOG(3) << "Using GGX refraction with filter glossy.";
|
||||
distribution = CLOSURE_BSDF_MICROFACET_GGX_REFRACTION_ID;
|
||||
roughness = 0.0f;
|
||||
}
|
||||
|
@@ -64,7 +64,7 @@ void SVMShaderManager::device_update_shader(Scene *scene,
|
||||
compiler.background = (shader == scene->background->get_shader(scene));
|
||||
compiler.compile(shader, *svm_nodes, 0, &summary);
|
||||
|
||||
VLOG(2) << "Compilation summary:\n"
|
||||
VLOG(3) << "Compilation summary:\n"
|
||||
<< "Shader name: " << shader->name << "\n"
|
||||
<< summary.full_report();
|
||||
}
|
||||
|
@@ -99,26 +99,4 @@ void DebugFlags::reset()
|
||||
optix.reset();
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, DebugFlagsConstRef debug_flags)
|
||||
{
|
||||
os << "CPU flags:\n"
|
||||
<< " AVX2 : " << string_from_bool(debug_flags.cpu.avx2) << "\n"
|
||||
<< " AVX : " << string_from_bool(debug_flags.cpu.avx) << "\n"
|
||||
<< " SSE4.1 : " << string_from_bool(debug_flags.cpu.sse41) << "\n"
|
||||
<< " SSE3 : " << string_from_bool(debug_flags.cpu.sse3) << "\n"
|
||||
<< " SSE2 : " << string_from_bool(debug_flags.cpu.sse2) << "\n"
|
||||
<< " BVH layout : " << bvh_layout_name(debug_flags.cpu.bvh_layout) << "\n";
|
||||
|
||||
os << "CUDA flags:\n"
|
||||
<< " Adaptive Compile : " << string_from_bool(debug_flags.cuda.adaptive_compile) << "\n";
|
||||
|
||||
os << "OptiX flags:\n"
|
||||
<< " Debug : " << string_from_bool(debug_flags.optix.use_debug) << "\n";
|
||||
|
||||
os << "HIP flags:\n"
|
||||
<< " HIP streams : " << string_from_bool(debug_flags.hip.adaptive_compile) << "\n";
|
||||
|
||||
return os;
|
||||
}
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
@@ -160,8 +160,6 @@ inline DebugFlags &DebugFlags()
|
||||
return DebugFlags::get();
|
||||
}
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, DebugFlagsConstRef debug_flags);
|
||||
|
||||
CCL_NAMESPACE_END
|
||||
|
||||
#endif /* __UTIL_DEBUG_H__ */
|
||||
|
@@ -42,7 +42,7 @@ typedef enum eGPUBarrier {
|
||||
GPU_BARRIER_SHADER_STORAGE = (1 << 2),
|
||||
} eGPUBarrier;
|
||||
|
||||
ENUM_OPERATORS(eGPUBarrier, GPU_BARRIER_TEXTURE_FETCH)
|
||||
ENUM_OPERATORS(eGPUBarrier, GPU_BARRIER_SHADER_STORAGE)
|
||||
|
||||
/**
|
||||
* Defines the fixed pipeline blending equation.
|
||||
|
Reference in New Issue
Block a user