forked from blender/blender
BLEN-335: Export environment light #1
@ -52,7 +52,8 @@ set(SRC
|
||||
finalEngine.cc
|
||||
viewportEngine.h
|
||||
viewportEngine.cc
|
||||
|
||||
camera.h
|
||||
camera.cc
|
||||
utils.h
|
||||
utils.cc
|
||||
|
||||
@ -63,8 +64,6 @@ set(SRC
|
||||
sceneDelegate/blenderSceneDelegate.cc
|
||||
sceneDelegate/object.h
|
||||
sceneDelegate/object.cc
|
||||
sceneDelegate/scene.h
|
||||
sceneDelegate/scene.cc
|
||||
sceneDelegate/material.h
|
||||
sceneDelegate/material.cc
|
||||
sceneDelegate/world.h
|
||||
|
279
source/blender/render/hydra/camera.cc
Normal file
279
source/blender/render/hydra/camera.cc
Normal file
@ -0,0 +1,279 @@
|
||||
/* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2011-2022 Blender Foundation */
|
||||
|
||||
#include "DNA_camera_types.h"
|
||||
|
||||
#include "camera.h"
|
||||
#include "utils.h"
|
||||
|
||||
using namespace pxr;
|
||||
|
||||
namespace blender::render::hydra {
|
||||
|
||||
CameraData::CameraData(Object *camera_obj, GfVec2i res, GfVec4f tile)
|
||||
{
|
||||
Camera *camera = (Camera *)camera_obj->data;
|
||||
|
||||
float t_pos[2] = {tile[0], tile[1]};
|
||||
float t_size[2] = {tile[2], tile[3]};
|
||||
transform = gf_matrix_from_transform(camera_obj->object_to_world);
|
||||
clip_range = GfRange1f(camera->clip_start, camera->clip_end);
|
||||
mode = camera->type;
|
||||
|
||||
if (camera->dof.flag & CAM_DOF_ENABLED) {
|
||||
float focus_distance;
|
||||
if (!camera->dof.focus_object) {
|
||||
focus_distance = camera->dof.focus_distance;
|
||||
}
|
||||
else {
|
||||
GfVec3f obj_pos(camera->dof.focus_object->object_to_world[0][3],
|
||||
camera->dof.focus_object->object_to_world[1][3],
|
||||
camera->dof.focus_object->object_to_world[2][3]);
|
||||
GfVec3f cam_pos(transform[0][3], transform[1][3], transform[2][3]);
|
||||
focus_distance = (obj_pos - cam_pos).GetLength();
|
||||
}
|
||||
|
||||
dof_data = std::tuple(std::max(focus_distance, 0.001f),
|
||||
camera->dof.aperture_fstop,
|
||||
camera->dof.aperture_blades);
|
||||
}
|
||||
|
||||
float ratio = (float)res[0] / res[1];
|
||||
|
||||
switch (camera->sensor_fit) {
|
||||
case CAMERA_SENSOR_FIT_VERT:
|
||||
lens_shift = GfVec2f(camera->shiftx / ratio, camera->shifty);
|
||||
break;
|
||||
case CAMERA_SENSOR_FIT_HOR:
|
||||
lens_shift = GfVec2f(camera->shiftx, camera->shifty * ratio);
|
||||
break;
|
||||
case CAMERA_SENSOR_FIT_AUTO:
|
||||
if (ratio > 1.0f) {
|
||||
lens_shift = GfVec2f(camera->shiftx, camera->shifty * ratio);
|
||||
}
|
||||
else {
|
||||
lens_shift = GfVec2f(camera->shiftx / ratio, camera->shifty);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
lens_shift = GfVec2f(camera->shiftx, camera->shifty);
|
||||
break;
|
||||
}
|
||||
|
||||
lens_shift = GfVec2f(lens_shift[0] / t_size[0] + (t_pos[0] + t_size[0] * 0.5 - 0.5) / t_size[0],
|
||||
lens_shift[1] / t_size[1] + (t_pos[1] + t_size[1] * 0.5 - 0.5) / t_size[1]);
|
||||
|
||||
switch (camera->type) {
|
||||
case CAM_PERSP:
|
||||
focal_length = camera->lens;
|
||||
|
||||
switch (camera->sensor_fit) {
|
||||
case CAMERA_SENSOR_FIT_VERT:
|
||||
sensor_size = GfVec2f(camera->sensor_y * ratio, camera->sensor_y);
|
||||
break;
|
||||
case CAMERA_SENSOR_FIT_HOR:
|
||||
sensor_size = GfVec2f(camera->sensor_x, camera->sensor_x / ratio);
|
||||
break;
|
||||
case CAMERA_SENSOR_FIT_AUTO:
|
||||
if (ratio > 1.0f) {
|
||||
sensor_size = GfVec2f(camera->sensor_x, camera->sensor_x / ratio);
|
||||
}
|
||||
else {
|
||||
sensor_size = GfVec2f(camera->sensor_x * ratio, camera->sensor_x);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
sensor_size = GfVec2f(camera->sensor_x, camera->sensor_y);
|
||||
break;
|
||||
}
|
||||
sensor_size = GfVec2f(sensor_size[0] * t_size[0], sensor_size[1] * t_size[1]);
|
||||
break;
|
||||
|
||||
case CAM_ORTHO:
|
||||
focal_length = 0.0f;
|
||||
switch (camera->sensor_fit) {
|
||||
case CAMERA_SENSOR_FIT_VERT:
|
||||
ortho_size = GfVec2f(camera->ortho_scale * ratio, camera->ortho_scale);
|
||||
break;
|
||||
case CAMERA_SENSOR_FIT_HOR:
|
||||
ortho_size = GfVec2f(camera->ortho_scale, camera->ortho_scale / ratio);
|
||||
break;
|
||||
case CAMERA_SENSOR_FIT_AUTO:
|
||||
if (ratio > 1.0f) {
|
||||
ortho_size = GfVec2f(camera->ortho_scale, camera->ortho_scale / ratio);
|
||||
}
|
||||
else {
|
||||
ortho_size = GfVec2f(camera->ortho_scale * ratio, camera->ortho_scale);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ortho_size = GfVec2f(camera->ortho_scale, camera->ortho_scale);
|
||||
break;
|
||||
}
|
||||
ortho_size = GfVec2f(ortho_size[0] * t_size[0], ortho_size[1] * t_size[1]);
|
||||
break;
|
||||
|
||||
case CAM_PANO:
|
||||
/* TODO: Recheck parameters for PANO camera */
|
||||
focal_length = camera->lens;
|
||||
|
||||
switch (camera->sensor_fit) {
|
||||
case CAMERA_SENSOR_FIT_VERT:
|
||||
sensor_size = GfVec2f(camera->sensor_y * ratio, camera->sensor_y);
|
||||
break;
|
||||
case CAMERA_SENSOR_FIT_HOR:
|
||||
sensor_size = GfVec2f(camera->sensor_x, camera->sensor_x / ratio);
|
||||
break;
|
||||
case CAMERA_SENSOR_FIT_AUTO:
|
||||
if (ratio > 1.0f) {
|
||||
sensor_size = GfVec2f(camera->sensor_x, camera->sensor_x / ratio);
|
||||
}
|
||||
else {
|
||||
sensor_size = GfVec2f(camera->sensor_x * ratio, camera->sensor_x);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
sensor_size = GfVec2f(camera->sensor_x, camera->sensor_y);
|
||||
break;
|
||||
}
|
||||
sensor_size = GfVec2f(sensor_size[0] * t_size[0], sensor_size[1] * t_size[1]);
|
||||
|
||||
default:
|
||||
focal_length = camera->lens;
|
||||
sensor_size = GfVec2f(camera->sensor_y * ratio, camera->sensor_y);
|
||||
}
|
||||
}
|
||||
|
||||
CameraData::CameraData(BL::Context &b_context)
|
||||
{
|
||||
// this constant was found experimentally, didn't find such option in
|
||||
// context.space_data or context.region_data
|
||||
float VIEWPORT_SENSOR_SIZE = 72.0;
|
||||
|
||||
BL::SpaceView3D space_data = (BL::SpaceView3D)b_context.space_data();
|
||||
BL::RegionView3D region_data = b_context.region_data();
|
||||
|
||||
GfVec2i res(b_context.region().width(), b_context.region().height());
|
||||
float ratio = (float)res[0] / res[1];
|
||||
transform = gf_matrix_from_transform((float(*)[4])region_data.view_matrix().data).GetInverse();
|
||||
|
||||
switch (region_data.view_perspective()) {
|
||||
case BL::RegionView3D::view_perspective_PERSP: {
|
||||
mode = CAM_PERSP;
|
||||
clip_range = GfRange1f(space_data.clip_start(), space_data.clip_end());
|
||||
lens_shift = GfVec2f(0.0, 0.0);
|
||||
focal_length = space_data.lens();
|
||||
|
||||
if (ratio > 1.0) {
|
||||
sensor_size = GfVec2f(VIEWPORT_SENSOR_SIZE, VIEWPORT_SENSOR_SIZE / ratio);
|
||||
}
|
||||
else {
|
||||
sensor_size = GfVec2f(VIEWPORT_SENSOR_SIZE * ratio, VIEWPORT_SENSOR_SIZE);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case BL::RegionView3D::view_perspective_ORTHO: {
|
||||
mode = CAM_ORTHO;
|
||||
lens_shift = GfVec2f(0.0f, 0.0f);
|
||||
|
||||
float o_size = region_data.view_distance() * VIEWPORT_SENSOR_SIZE / space_data.lens();
|
||||
float o_depth = space_data.clip_end();
|
||||
|
||||
clip_range = GfRange1f(-o_depth * 0.5, o_depth * 0.5);
|
||||
|
||||
if (ratio > 1.0f) {
|
||||
ortho_size = GfVec2f(o_size, o_size / ratio);
|
||||
}
|
||||
else {
|
||||
ortho_size = GfVec2f(o_size * ratio, o_size);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case BL::RegionView3D::view_perspective_CAMERA: {
|
||||
BL::Object camera_obj = space_data.camera();
|
||||
|
||||
GfMatrix4d mat = transform;
|
||||
*this = CameraData((Object *)camera_obj.ptr.data, res, GfVec4f(0, 0, 1, 1));
|
||||
transform = mat;
|
||||
|
||||
// This formula was taken from previous plugin with corresponded comment
|
||||
// See blender/intern/cycles/blender/blender_camera.cpp:blender_camera_from_view (look
|
||||
// for 1.41421f)
|
||||
float zoom = 4.0 / pow((pow(2.0, 0.5) + region_data.view_camera_zoom() / 50.0), 2);
|
||||
|
||||
// Updating l_shift due to viewport zoom and view_camera_offset
|
||||
// view_camera_offset should be multiplied by 2
|
||||
lens_shift = GfVec2f((lens_shift[0] + region_data.view_camera_offset()[0] * 2) / zoom,
|
||||
(lens_shift[1] + region_data.view_camera_offset()[1] * 2) / zoom);
|
||||
|
||||
if (mode == BL::Camera::type_ORTHO) {
|
||||
ortho_size *= zoom;
|
||||
}
|
||||
else {
|
||||
sensor_size *= zoom;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
GfCamera CameraData::gf_camera(GfVec4f tile)
|
||||
{
|
||||
float t_pos[2] = {tile[0], tile[1]}, t_size[2] = {tile[2], tile[3]};
|
||||
|
||||
GfCamera gf_camera = GfCamera();
|
||||
|
||||
gf_camera.SetClippingRange(clip_range);
|
||||
|
||||
float l_shift[2] = {(lens_shift[0] + t_pos[0] + t_size[0] * 0.5f - 0.5f) / t_size[0],
|
||||
(lens_shift[1] + t_pos[1] + t_size[1] * 0.5f - 0.5f) / t_size[1]};
|
||||
|
||||
switch (mode) {
|
||||
case CAM_PERSP:
|
||||
case CAM_PANO: {
|
||||
/* TODO: store panoramic camera settings */
|
||||
gf_camera.SetProjection(GfCamera::Projection::Perspective);
|
||||
gf_camera.SetFocalLength(focal_length);
|
||||
|
||||
float s_size[2] = {sensor_size[0] * t_size[0], sensor_size[1] * t_size[1]};
|
||||
|
||||
gf_camera.SetHorizontalAperture(s_size[0]);
|
||||
gf_camera.SetVerticalAperture(s_size[1]);
|
||||
|
||||
gf_camera.SetHorizontalApertureOffset(l_shift[0] * s_size[0]);
|
||||
gf_camera.SetVerticalApertureOffset(l_shift[1] * s_size[1]);
|
||||
break;
|
||||
}
|
||||
case CAM_ORTHO: {
|
||||
gf_camera.SetProjection(GfCamera::Projection::Orthographic);
|
||||
|
||||
// Use tenths of a world unit accorging to USD docs
|
||||
// https://graphics.pixar.com/usd/docs/api/class_gf_camera.html
|
||||
float o_size[2] = {ortho_size[0] * t_size[0] * 10, ortho_size[1] * t_size[1] * 10};
|
||||
|
||||
gf_camera.SetHorizontalAperture(o_size[0]);
|
||||
gf_camera.SetVerticalAperture(o_size[1]);
|
||||
|
||||
gf_camera.SetHorizontalApertureOffset(l_shift[0] * o_size[0]);
|
||||
gf_camera.SetVerticalApertureOffset(l_shift[1] * o_size[1]);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
gf_camera.SetTransform(transform);
|
||||
return gf_camera;
|
||||
}
|
||||
|
||||
GfCamera CameraData::gf_camera()
|
||||
{
|
||||
return gf_camera(GfVec4f(0, 0, 1, 1));
|
||||
}
|
||||
|
||||
} // namespace blender::render::hydra
|
37
source/blender/render/hydra/camera.h
Normal file
37
source/blender/render/hydra/camera.h
Normal file
@ -0,0 +1,37 @@
|
||||
/* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2011-2022 Blender Foundation */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <map>
|
||||
|
||||
#include <pxr/base/gf/camera.h>
|
||||
#include <pxr/base/gf/vec2f.h>
|
||||
|
||||
#include "MEM_guardedalloc.h"
|
||||
#include "RNA_blender_cpp.h"
|
||||
|
||||
#include "DNA_object_types.h"
|
||||
|
||||
namespace blender::render::hydra {
|
||||
|
||||
class CameraData {
|
||||
public:
|
||||
CameraData(BL::Context &b_context);
|
||||
CameraData(Object *camera_obj, pxr::GfVec2i res, pxr::GfVec4f tile);
|
||||
|
||||
pxr::GfCamera gf_camera();
|
||||
pxr::GfCamera gf_camera(pxr::GfVec4f tile);
|
||||
|
||||
private:
|
||||
int mode;
|
||||
pxr::GfRange1f clip_range;
|
||||
float focal_length;
|
||||
pxr::GfVec2f sensor_size;
|
||||
pxr::GfMatrix4d transform;
|
||||
pxr::GfVec2f lens_shift;
|
||||
pxr::GfVec2f ortho_size;
|
||||
std::tuple<float, float, int> dof_data;
|
||||
};
|
||||
|
||||
} // namespace blender::render::hydra
|
@ -11,8 +11,8 @@
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "finalEngine.h"
|
||||
#include "camera.h"
|
||||
#include "utils.h"
|
||||
#include "sceneDelegate/scene.h"
|
||||
|
||||
using namespace std;
|
||||
using namespace pxr;
|
||||
@ -32,13 +32,13 @@ void FinalEngine::sync(BL::Depsgraph &b_depsgraph, BL::Context &b_context, pxr::
|
||||
|
||||
void FinalEngine::render(BL::Depsgraph &b_depsgraph)
|
||||
{
|
||||
SceneExport sceneExport(b_depsgraph);
|
||||
auto resolution = sceneExport.resolution();
|
||||
int width = resolution.first, height = resolution.second;
|
||||
|
||||
GfCamera gfCamera = sceneExport.gfCamera();
|
||||
BL::Scene b_scene = b_depsgraph.scene();
|
||||
BL::ViewLayer b_view_layer = b_depsgraph.view_layer();
|
||||
string sceneName = b_scene.name(), layerName = b_view_layer.name();
|
||||
GfVec2i res = get_resolution(b_scene.render());
|
||||
GfCamera gfCamera = CameraData((Object *)b_scene.camera().ptr.data, res, GfVec4f(0, 0, 1, 1)).gf_camera();
|
||||
freeCameraDelegate->SetCamera(gfCamera);
|
||||
renderTaskDelegate->SetCameraAndViewport(freeCameraDelegate->GetCameraId(), GfVec4d(0, 0, width, height));
|
||||
renderTaskDelegate->SetCameraAndViewport(freeCameraDelegate->GetCameraId(), GfVec4d(0, 0, res[0], res[1]));
|
||||
renderTaskDelegate->SetRendererAov(HdAovTokens->color);
|
||||
|
||||
HdTaskSharedPtrVector tasks = renderTaskDelegate->GetTasks();
|
||||
@ -47,9 +47,9 @@ void FinalEngine::render(BL::Depsgraph &b_depsgraph)
|
||||
chrono::milliseconds elapsedTime;
|
||||
|
||||
float percentDone = 0.0;
|
||||
string sceneName = sceneExport.sceneName(), layerName = sceneExport.layerName();
|
||||
|
||||
map<string, vector<float>> renderImages{{"Combined", vector<float>(width * height * 4)}}; // 4 - number of channels
|
||||
map<string, vector<float>> renderImages{
|
||||
{"Combined", vector<float>(res[0] * res[1] * 4)}}; // 4 - number of channels
|
||||
vector<float> &pixels = renderImages["Combined"];
|
||||
|
||||
{
|
||||
@ -68,21 +68,21 @@ void FinalEngine::render(BL::Depsgraph &b_depsgraph)
|
||||
elapsedTime = chrono::duration_cast<chrono::milliseconds>(timeCurrent - timeBegin);
|
||||
|
||||
notifyStatus(percentDone / 100.0, sceneName + ": " + layerName,
|
||||
"Render Time: " + formatDuration(elapsedTime) + " | Done: " + to_string(int(percentDone)) + "%");
|
||||
"Render Time: " + format_duration(elapsedTime) + " | Done: " + to_string(int(percentDone)) + "%");
|
||||
|
||||
if (renderTaskDelegate->IsConverged()) {
|
||||
break;
|
||||
}
|
||||
|
||||
renderTaskDelegate->GetRendererAovData(HdAovTokens->color, pixels.data());
|
||||
updateRenderResult(renderImages, layerName, width, height);
|
||||
updateRenderResult(renderImages, layerName, res[0], res[1]);
|
||||
}
|
||||
|
||||
renderTaskDelegate->GetRendererAovData(HdAovTokens->color, pixels.data());
|
||||
updateRenderResult(renderImages, layerName, width, height);
|
||||
updateRenderResult(renderImages, layerName, res[0], res[1]);
|
||||
}
|
||||
|
||||
void FinalEngine::getResolution(BL::RenderSettings b_render, int &width, int &height)
|
||||
GfVec2i FinalEngine::get_resolution(BL::RenderSettings b_render)
|
||||
{
|
||||
float border_w = 1.0, border_h = 1.0;
|
||||
if (b_render.use_border()) {
|
||||
@ -90,8 +90,8 @@ void FinalEngine::getResolution(BL::RenderSettings b_render, int &width, int &he
|
||||
border_h = b_render.border_max_y() - b_render.border_min_y();
|
||||
}
|
||||
|
||||
width = int(b_render.resolution_x() * border_w * b_render.resolution_percentage() / 100);
|
||||
height = int(b_render.resolution_y() * border_h * b_render.resolution_percentage() / 100);
|
||||
return GfVec2i(int(b_render.resolution_x() * border_w * b_render.resolution_percentage() / 100),
|
||||
int(b_render.resolution_y() * border_h * b_render.resolution_percentage() / 100));
|
||||
}
|
||||
|
||||
void FinalEngine::updateRenderResult(map<string, vector<float>>& renderImages, const string &layerName, int width, int height)
|
||||
@ -117,14 +117,13 @@ void FinalEngine::notifyStatus(float progress, const string &title, const string
|
||||
|
||||
void FinalEngineGL::render(BL::Depsgraph &b_depsgraph)
|
||||
{
|
||||
SceneExport sceneExport(b_depsgraph);
|
||||
auto resolution = sceneExport.resolution();
|
||||
int width = resolution.first, height = resolution.second;
|
||||
|
||||
GfCamera gfCamera = sceneExport.gfCamera();
|
||||
BL::Scene b_scene = b_depsgraph.scene();
|
||||
BL::ViewLayer b_view_layer = b_depsgraph.view_layer();
|
||||
string sceneName = b_scene.name(), layerName = b_view_layer.name();
|
||||
GfVec2i res = get_resolution(b_scene.render());
|
||||
GfCamera gfCamera = CameraData((Object *)b_scene.camera().ptr.data, res, GfVec4f(0, 0, 1, 1)).gf_camera();
|
||||
freeCameraDelegate->SetCamera(gfCamera);
|
||||
renderTaskDelegate->SetCameraAndViewport(freeCameraDelegate->GetCameraId(),
|
||||
GfVec4d(0, 0, width, height));
|
||||
renderTaskDelegate->SetCameraAndViewport(freeCameraDelegate->GetCameraId(), GfVec4d(0, 0, res[0], res[1]));
|
||||
|
||||
HdTaskSharedPtrVector tasks = renderTaskDelegate->GetTasks();
|
||||
|
||||
@ -132,10 +131,9 @@ void FinalEngineGL::render(BL::Depsgraph &b_depsgraph)
|
||||
chrono::milliseconds elapsedTime;
|
||||
|
||||
float percentDone = 0.0;
|
||||
string sceneName = sceneExport.sceneName(), layerName = sceneExport.layerName();
|
||||
|
||||
map<string, vector<float>> renderImages{
|
||||
{"Combined", vector<float>(width * height * 4)}}; // 4 - number of channels
|
||||
{"Combined", vector<float>(res[0] * res[1] * 4)}}; // 4 - number of channels
|
||||
vector<float> &pixels = renderImages["Combined"];
|
||||
|
||||
GLuint FramebufferName = 0;
|
||||
@ -150,7 +148,7 @@ void FinalEngineGL::render(BL::Depsgraph &b_depsgraph)
|
||||
glBindTexture(GL_TEXTURE_2D, renderedTexture);
|
||||
|
||||
// Give an empty image to OpenGL ( the last "0" )
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, 0);
|
||||
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, res[0], res[1], 0, GL_RGBA, GL_FLOAT, 0);
|
||||
|
||||
// Poor filtering. Needed !
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
@ -181,7 +179,7 @@ void FinalEngineGL::render(BL::Depsgraph &b_depsgraph)
|
||||
|
||||
notifyStatus(percentDone / 100.0,
|
||||
sceneName + ": " + layerName,
|
||||
"Render Time: " + formatDuration(elapsedTime) +
|
||||
"Render Time: " + format_duration(elapsedTime) +
|
||||
" | Done: " + to_string(int(percentDone)) + "%");
|
||||
|
||||
if (renderTaskDelegate->IsConverged()) {
|
||||
@ -189,11 +187,11 @@ void FinalEngineGL::render(BL::Depsgraph &b_depsgraph)
|
||||
}
|
||||
|
||||
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT, pixels.data());
|
||||
updateRenderResult(renderImages, layerName, width, height);
|
||||
updateRenderResult(renderImages, layerName, res[0], res[1]);
|
||||
}
|
||||
|
||||
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT, pixels.data());
|
||||
updateRenderResult(renderImages, layerName, width, height);
|
||||
updateRenderResult(renderImages, layerName, res[0], res[1]);
|
||||
}
|
||||
|
||||
} // namespace blender::render::hydra
|
||||
|
@ -16,7 +16,7 @@ public:
|
||||
virtual void render(BL::Depsgraph &b_depsgraph);
|
||||
|
||||
protected:
|
||||
void getResolution(BL::RenderSettings b_render, int &width, int &height);
|
||||
pxr::GfVec2i get_resolution(BL::RenderSettings b_render);
|
||||
void updateRenderResult(std::map<std::string, std::vector<float>> &render_images, const std::string &layerName, int width, int height);
|
||||
void notifyStatus(float progress, const std::string &title, const std::string &info);
|
||||
|
||||
|
@ -31,6 +31,8 @@ std::string MaterialData::name()
|
||||
|
||||
void MaterialData::export_mtlx()
|
||||
{
|
||||
/* Call of python function hydra.export_mtlx() */
|
||||
|
||||
PyObject *module, *dict, *func, *result;
|
||||
|
||||
PyGILState_STATE gstate;
|
||||
|
@ -4,10 +4,13 @@
|
||||
#include <pxr/base/vt/array.h>
|
||||
#include <pxr/base/gf/vec2f.h>
|
||||
#include <pxr/imaging/hd/light.h>
|
||||
#include <pxr/imaging/hd/camera.h>
|
||||
#include <pxr/imaging/hd/tokens.h>
|
||||
#include <pxr/usd/usdLux/tokens.h>
|
||||
|
||||
#include "DNA_light_types.h"
|
||||
#include "DNA_camera_types.h"
|
||||
|
||||
#include "BKE_object.h"
|
||||
#include "BKE_lib_id.h"
|
||||
#include "BKE_material.h"
|
||||
@ -17,6 +20,7 @@
|
||||
#include "BKE_layer.h"
|
||||
|
||||
#include "object.h"
|
||||
#include "../utils.h"
|
||||
|
||||
PXR_NAMESPACE_OPEN_SCOPE
|
||||
TF_DEFINE_PUBLIC_TOKENS(HdBlenderTokens, HD_BLENDER_TOKENS);
|
||||
@ -58,10 +62,6 @@ ObjectData::ObjectData(Object *object)
|
||||
set_as_light();
|
||||
break;
|
||||
|
||||
case OB_CAMERA:
|
||||
set_as_camera();
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -81,7 +81,7 @@ int ObjectData::type()
|
||||
|
||||
TfToken ObjectData::prim_type()
|
||||
{
|
||||
TfToken ret;
|
||||
TfToken ret = HdBlenderTokens->empty;
|
||||
Light *light;
|
||||
switch (object->type) {
|
||||
case OB_MESH:
|
||||
@ -90,6 +90,9 @@ TfToken ObjectData::prim_type()
|
||||
case OB_CURVES:
|
||||
case OB_CURVES_LEGACY:
|
||||
case OB_MBALL:
|
||||
if (!has_data(HdTokens->points)) {
|
||||
break;
|
||||
}
|
||||
ret = HdPrimTypeTokens->mesh;
|
||||
break;
|
||||
|
||||
@ -127,29 +130,16 @@ TfToken ObjectData::prim_type()
|
||||
}
|
||||
break;
|
||||
|
||||
case OB_CAMERA:
|
||||
ret = HdPrimTypeTokens->camera;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret == HdPrimTypeTokens->mesh && !has_data(HdTokens->points)) {
|
||||
ret = HdBlenderTokens->empty;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
GfMatrix4d ObjectData::transform()
|
||||
{
|
||||
float *m = (float *)object->object_to_world;
|
||||
return GfMatrix4d(
|
||||
m[0], m[1], m[2], m[3],
|
||||
m[4], m[5], m[6], m[7],
|
||||
m[8], m[9], m[10], m[11],
|
||||
m[12], m[13], m[14], m[15]);
|
||||
return gf_matrix_from_transform(object->object_to_world);
|
||||
}
|
||||
|
||||
Material *ObjectData::material()
|
||||
@ -319,8 +309,4 @@ void ObjectData::set_as_light()
|
||||
}
|
||||
}
|
||||
|
||||
void ObjectData::set_as_camera()
|
||||
{
|
||||
}
|
||||
|
||||
} // namespace blender::render::hydra
|
||||
|
@ -55,7 +55,6 @@ public:
|
||||
void set_as_meshable();
|
||||
void set_mesh(Mesh *mesh);
|
||||
void set_as_light();
|
||||
void set_as_camera();
|
||||
};
|
||||
|
||||
using ObjectDataMap = std::map<pxr::SdfPath, ObjectData>;
|
||||
|
@ -1,59 +0,0 @@
|
||||
/* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2011-2022 Blender Foundation */
|
||||
|
||||
#include "scene.h"
|
||||
#include "object.h"
|
||||
|
||||
using namespace pxr;
|
||||
|
||||
namespace blender::render::hydra {
|
||||
|
||||
GfCamera SceneExport::gfCamera(BL::Object &b_cameraObj)
|
||||
{
|
||||
BL::Camera &b_camera = (BL::Camera &)b_cameraObj.data();
|
||||
auto res = resolution();
|
||||
float ratio = (float)res.first / res.second;
|
||||
|
||||
ObjectData obj_data((Object *)b_cameraObj.ptr.data);
|
||||
GfCamera gfCamera;
|
||||
gfCamera.SetClippingRange(GfRange1f(b_camera.clip_start(), b_camera.clip_end()));
|
||||
gfCamera.SetHorizontalAperture(b_camera.sensor_width());
|
||||
gfCamera.SetVerticalAperture(b_camera.sensor_width() / ratio);
|
||||
gfCamera.SetFocalLength(b_camera.lens());
|
||||
gfCamera.SetTransform(obj_data.transform());
|
||||
|
||||
return gfCamera;
|
||||
}
|
||||
|
||||
GfCamera SceneExport::gfCamera()
|
||||
{
|
||||
BL::Object b_cameraObj = b_scene.camera();
|
||||
return gfCamera(b_cameraObj);
|
||||
}
|
||||
|
||||
std::pair<int,int> SceneExport::resolution()
|
||||
{
|
||||
BL::RenderSettings b_render = b_scene.render();
|
||||
|
||||
float border_w = 1.0, border_h = 1.0;
|
||||
if (b_render.use_border()) {
|
||||
border_w = b_render.border_max_x() - b_render.border_min_x();
|
||||
border_h = b_render.border_max_y() - b_render.border_min_y();
|
||||
}
|
||||
|
||||
return std::make_pair<int, int>(
|
||||
int(b_render.resolution_x() * border_w * b_render.resolution_percentage() / 100),
|
||||
int(b_render.resolution_y() * border_h * b_render.resolution_percentage() / 100));
|
||||
}
|
||||
|
||||
std::string SceneExport::sceneName()
|
||||
{
|
||||
return b_scene.name();
|
||||
}
|
||||
|
||||
std::string SceneExport::layerName()
|
||||
{
|
||||
return b_depsgraph.view_layer().name();
|
||||
}
|
||||
|
||||
} // namespace blender::render::hydra
|
@ -1,31 +0,0 @@
|
||||
/* SPDX-License-Identifier: Apache-2.0
|
||||
* Copyright 2011-2022 Blender Foundation */
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <pxr/base/gf/camera.h>
|
||||
|
||||
#include "MEM_guardedalloc.h"
|
||||
#include "RNA_blender_cpp.h"
|
||||
|
||||
namespace blender::render::hydra {
|
||||
|
||||
class SceneExport
|
||||
{
|
||||
public:
|
||||
SceneExport(BL::Depsgraph &b_depsgraph)
|
||||
: b_depsgraph(b_depsgraph)
|
||||
, b_scene(b_depsgraph.scene())
|
||||
{}
|
||||
pxr::GfCamera gfCamera();
|
||||
pxr::GfCamera gfCamera(BL::Object &cameraObj);
|
||||
std::pair<int, int> resolution();
|
||||
std::string sceneName();
|
||||
std::string layerName();
|
||||
|
||||
private:
|
||||
BL::Depsgraph &b_depsgraph;
|
||||
BL::Scene b_scene;
|
||||
};
|
||||
|
||||
} // namespace blender::render::hydra
|
@ -11,14 +11,26 @@
|
||||
#include "BLI_string.h"
|
||||
#include "BLI_path_util.h"
|
||||
|
||||
#include "DNA_camera_types.h"
|
||||
|
||||
#include "utils.h"
|
||||
|
||||
using namespace pxr;
|
||||
DagerD marked this conversation as resolved
|
||||
using namespace std;
|
||||
using namespace pxr;
|
||||
|
||||
namespace blender::render::hydra {
|
||||
|
||||
string formatDuration(chrono::milliseconds millisecs)
|
||||
GfMatrix4d gf_matrix_from_transform(float m[4][4])
|
||||
{
|
||||
return GfMatrix4d(
|
||||
m[0][0], m[0][1], m[0][2], m[0][3],
|
||||
m[1][0], m[1][1], m[1][2], m[1][3],
|
||||
m[2][0], m[2][1], m[2][2], m[2][3],
|
||||
m[3][0], m[3][1], m[3][2], m[3][3]);
|
||||
}
|
||||
|
||||
string format_duration(chrono::milliseconds millisecs)
|
||||
{
|
||||
stringstream ss;
|
||||
bool neg = millisecs < 0ms;
|
||||
|
@ -6,12 +6,15 @@
|
||||
#include <chrono>
|
||||
#include <string>
|
||||
|
||||
#include <pxr/base/gf/matrix4d.h>
|
||||
|
||||
#include "BKE_image.h"
|
||||
#include "BKE_image_save.h"
|
||||
|
||||
namespace blender::render::hydra {
|
||||
|
||||
std::string formatDuration(std::chrono::milliseconds secs);
|
||||
pxr::GfMatrix4d gf_matrix_from_transform(float m[4][4]);
|
||||
std::string format_duration(std::chrono::milliseconds secs);
|
||||
std::string cache_image(Main *bmain,
|
||||
Scene *scene,
|
||||
Image *image,
|
||||
|
@ -8,10 +8,12 @@
|
||||
#include <pxr/usd/usdGeom/camera.h>
|
||||
|
||||
#include "BLI_math_matrix.h"
|
||||
#include "DNA_camera_types.h"
|
||||
|
||||
#include "glog/logging.h"
|
||||
|
||||
#include "viewportEngine.h"
|
||||
#include "camera.h"
|
||||
#include "utils.h"
|
||||
|
||||
using namespace std;
|
||||
@ -19,321 +21,24 @@ using namespace pxr;
|
||||
|
||||
namespace blender::render::hydra {
|
||||
|
||||
struct CameraData {
|
||||
static CameraData init_from_camera(BL::Camera &b_camera, float transform[4][4], float ratio, float border[2][2]);
|
||||
static CameraData init_from_context(BL::Context &b_context);
|
||||
|
||||
pxr::GfCamera export_gf(float tile[4]);
|
||||
|
||||
BL::Camera::type_enum mode;
|
||||
float clip_range[2];
|
||||
float focal_length = 0.0;
|
||||
float sensor_size[2];
|
||||
float transform[4][4];
|
||||
float lens_shift[2];
|
||||
float ortho_size[2];
|
||||
tuple<float, float, int> dof_data;
|
||||
};
|
||||
|
||||
struct ViewSettings {
|
||||
ViewSettings(BL::Context &b_context);
|
||||
|
||||
int get_width();
|
||||
int get_height();
|
||||
int width();
|
||||
int height();
|
||||
|
||||
pxr::GfCamera export_camera();
|
||||
GfCamera gf_camera();
|
||||
|
||||
CameraData camera_data;
|
||||
|
||||
int screen_width;
|
||||
int screen_height;
|
||||
int border[2][2];
|
||||
GfVec4i border;
|
||||
};
|
||||
|
||||
CameraData CameraData::init_from_camera(BL::Camera &b_camera, float transform[4][4], float ratio, float border[2][2])
|
||||
{
|
||||
float pos[2] = {border[0][0], border[0][1]};
|
||||
float size[2] = {border[1][0], border[1][1]};
|
||||
|
||||
CameraData data = CameraData();
|
||||
|
||||
copy_m4_m4(data.transform, transform);
|
||||
|
||||
data.clip_range[0] = b_camera.clip_start();
|
||||
data.clip_range[1] = b_camera.clip_end();
|
||||
data.mode = b_camera.type();
|
||||
|
||||
if (b_camera.dof().use_dof()) {
|
||||
float focus_distance;
|
||||
if (!b_camera.dof().focus_object()) {
|
||||
focus_distance = b_camera.dof().focus_distance();
|
||||
}
|
||||
else {
|
||||
float obj_pos[] = {b_camera.dof().focus_object().matrix_world()[3],
|
||||
b_camera.dof().focus_object().matrix_world()[7],
|
||||
b_camera.dof().focus_object().matrix_world()[11]};
|
||||
|
||||
float camera_pos[] = {transform[0][3],
|
||||
transform[1][3],
|
||||
transform[2][3]};
|
||||
|
||||
focus_distance = sqrt(pow((obj_pos[0] - camera_pos[0]), 2) +
|
||||
pow((obj_pos[1] - camera_pos[1]), 2) +
|
||||
pow((obj_pos[2] - camera_pos[2]), 2));
|
||||
}
|
||||
|
||||
data.dof_data = tuple(max(focus_distance, 0.001f),
|
||||
b_camera.dof().aperture_fstop(),
|
||||
b_camera.dof().aperture_blades());
|
||||
}
|
||||
|
||||
if (b_camera.sensor_fit() == BL::Camera::sensor_fit_VERTICAL) {
|
||||
data.lens_shift[0] = b_camera.shift_x() / ratio;
|
||||
data.lens_shift[1] = b_camera.shift_y();
|
||||
}
|
||||
else if ((b_camera.sensor_fit() == BL::Camera::sensor_fit_HORIZONTAL)) {
|
||||
data.lens_shift[0] = b_camera.shift_x();
|
||||
data.lens_shift[1] = b_camera.shift_y() * ratio;
|
||||
}
|
||||
else if ((b_camera.sensor_fit() == BL::Camera::sensor_fit_AUTO)) {
|
||||
if (ratio > 1.0f) {
|
||||
data.lens_shift[0] = b_camera.shift_x();
|
||||
data.lens_shift[1] = b_camera.shift_y() * ratio;
|
||||
}
|
||||
else {
|
||||
data.lens_shift[0] = b_camera.shift_x() / ratio;
|
||||
data.lens_shift[1] = b_camera.shift_y();
|
||||
}
|
||||
}
|
||||
else {
|
||||
data.lens_shift[0] = b_camera.shift_x();
|
||||
data.lens_shift[1] = b_camera.shift_y();
|
||||
}
|
||||
|
||||
data.lens_shift[0] = data.lens_shift[0] / size[0] + (pos[0] + size[0] * 0.5 - 0.5) / size[0];
|
||||
data.lens_shift[1] = data.lens_shift[1] / size[1] + (pos[1] + size[1] * 0.5 - 0.5) / size[1];
|
||||
|
||||
if (b_camera.type() == BL::Camera::type_PERSP) {
|
||||
data.focal_length = b_camera.lens();
|
||||
|
||||
if (b_camera.sensor_fit() == BL::Camera::sensor_fit_VERTICAL) {
|
||||
data.sensor_size[0] = b_camera.sensor_height() * ratio;
|
||||
data.sensor_size[1] = b_camera.sensor_height();
|
||||
}
|
||||
else if (b_camera.sensor_fit() == BL::Camera::sensor_fit_HORIZONTAL) {
|
||||
data.sensor_size[0] = b_camera.sensor_width();
|
||||
data.sensor_size[1] = b_camera.sensor_width() / ratio;
|
||||
}
|
||||
else {
|
||||
if (ratio > 1.0f) {
|
||||
data.sensor_size[0] = b_camera.sensor_width();
|
||||
data.sensor_size[1] = b_camera.sensor_width() / ratio;
|
||||
}
|
||||
else {
|
||||
data.sensor_size[0] = b_camera.sensor_width() * ratio;
|
||||
data.sensor_size[1] = b_camera.sensor_width();
|
||||
}
|
||||
}
|
||||
data.sensor_size[0] = data.sensor_size[0] * size[0];
|
||||
data.sensor_size[1] = data.sensor_size[1] * size[1];
|
||||
}
|
||||
else if (b_camera.type() == BL::Camera::type_ORTHO) {
|
||||
if (b_camera.sensor_fit() == BL::Camera::sensor_fit_VERTICAL) {
|
||||
data.ortho_size[0] = b_camera.ortho_scale() * ratio;
|
||||
data.ortho_size[1] = b_camera.ortho_scale();
|
||||
}
|
||||
else if (b_camera.sensor_fit() == BL::Camera::sensor_fit_HORIZONTAL) {
|
||||
data.ortho_size[0] = b_camera.ortho_scale();
|
||||
data.ortho_size[1] = b_camera.ortho_scale() / ratio;
|
||||
}
|
||||
else {
|
||||
if (ratio > 1.0f) {
|
||||
data.ortho_size[0] = b_camera.ortho_scale();
|
||||
data.ortho_size[1] = b_camera.ortho_scale() / ratio;
|
||||
}
|
||||
else {
|
||||
data.ortho_size[0] = b_camera.ortho_scale() * ratio;
|
||||
data.ortho_size[1] = b_camera.ortho_scale();
|
||||
}
|
||||
}
|
||||
|
||||
data.ortho_size[0] = data.ortho_size[0] * size[0];
|
||||
data.ortho_size[1] = data.ortho_size[1] * size[1];
|
||||
|
||||
data.clip_range[0] = b_camera.clip_start();
|
||||
data.clip_range[1] = b_camera.clip_end();
|
||||
}
|
||||
else if (b_camera.type() == BL::Camera::type_PANO) {
|
||||
// TODO: Recheck parameters for PANO camera
|
||||
data.focal_length = b_camera.lens();
|
||||
if (b_camera.sensor_fit() == BL::Camera::sensor_fit_VERTICAL) {
|
||||
data.sensor_size[0] = b_camera.sensor_height() * ratio;
|
||||
data.sensor_size[1] = b_camera.sensor_height();
|
||||
}
|
||||
else if (b_camera.sensor_fit() == BL::Camera::sensor_fit_HORIZONTAL) {
|
||||
data.sensor_size[0] = b_camera.sensor_height();
|
||||
data.sensor_size[1] = b_camera.sensor_height() / ratio;
|
||||
}
|
||||
else {
|
||||
if (ratio > 1.0f) {
|
||||
data.sensor_size[0] = b_camera.sensor_width();
|
||||
data.sensor_size[1] = b_camera.sensor_width() / ratio;
|
||||
}
|
||||
else {
|
||||
data.sensor_size[0] = b_camera.sensor_width() * ratio;
|
||||
data.sensor_size[1] = b_camera.sensor_width();
|
||||
}
|
||||
}
|
||||
data.sensor_size[0] = data.sensor_size[0] * size[0];
|
||||
data.sensor_size[1] = data.sensor_size[1] * size[1];
|
||||
}
|
||||
else {
|
||||
data.focal_length = b_camera.lens();
|
||||
data.sensor_size[0] = b_camera.sensor_height() * ratio;
|
||||
data.sensor_size[1] = b_camera.sensor_height();
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
CameraData CameraData::init_from_context(BL::Context &b_context)
|
||||
{
|
||||
// this constant was found experimentally, didn't find such option in
|
||||
// context.space_data or context.region_data
|
||||
float VIEWPORT_SENSOR_SIZE = 72.0;
|
||||
|
||||
BL::SpaceView3D space_data = (BL::SpaceView3D)b_context.space_data();
|
||||
|
||||
CameraData data;
|
||||
float ratio = (float)b_context.region().width() / (float)b_context.region().height();
|
||||
if (b_context.region_data().view_perspective() == BL::RegionView3D::view_perspective_PERSP) {
|
||||
data = CameraData();
|
||||
data.mode = BL::Camera::type_PERSP;
|
||||
data.clip_range[0] = space_data.clip_start();
|
||||
data.clip_range[1] = space_data.clip_end();
|
||||
data.lens_shift[0] = 0.0;
|
||||
data.lens_shift[1] = 0.0;
|
||||
data.focal_length = space_data.lens();
|
||||
|
||||
if (ratio > 1.0) {
|
||||
data.sensor_size[0] = VIEWPORT_SENSOR_SIZE;
|
||||
data.sensor_size[1] = VIEWPORT_SENSOR_SIZE / ratio;
|
||||
}
|
||||
else {
|
||||
data.sensor_size[0] = VIEWPORT_SENSOR_SIZE * ratio;
|
||||
data.sensor_size[1] = VIEWPORT_SENSOR_SIZE;
|
||||
}
|
||||
|
||||
invert_m4_m4(data.transform, (float(*)[4])b_context.region_data().view_matrix().data);
|
||||
}
|
||||
else if (b_context.region_data().view_perspective() == BL::RegionView3D::view_perspective_ORTHO) {
|
||||
data = CameraData();
|
||||
data.mode = BL::Camera::type_ORTHO;
|
||||
data.lens_shift[0] = 0.0f;
|
||||
data.lens_shift[1] = 0.0f;
|
||||
|
||||
float ortho_size = b_context.region_data().view_distance() * VIEWPORT_SENSOR_SIZE / space_data.lens();
|
||||
float ortho_depth = space_data.clip_end();
|
||||
|
||||
data.clip_range[0] = -ortho_depth * 0.5;
|
||||
data.clip_range[1] = ortho_depth * 0.5;
|
||||
|
||||
if (ratio > 1.0f) {
|
||||
data.ortho_size[0] = ortho_size;
|
||||
data.ortho_size[1] = ortho_size / ratio;
|
||||
} else {
|
||||
data.ortho_size[0] = ortho_size * ratio;
|
||||
data.ortho_size[1] = ortho_size;
|
||||
}
|
||||
|
||||
invert_m4_m4(data.transform, (float(*)[4])b_context.region_data().view_matrix().data);
|
||||
}
|
||||
else if (b_context.region_data().view_perspective() == BL::RegionView3D::view_perspective_CAMERA) {
|
||||
BL::Object camera_obj = space_data.camera();
|
||||
|
||||
float border[2][2] = {{0, 0}, {1, 1}};
|
||||
float inverted_transform[4][4];
|
||||
invert_m4_m4(inverted_transform, (float(*)[4])b_context.region_data().view_matrix().data);
|
||||
|
||||
data = CameraData::init_from_camera((BL::Camera &)camera_obj.data(), inverted_transform, ratio, border);
|
||||
|
||||
// This formula was taken from previous plugin with corresponded comment
|
||||
// See blender/intern/cycles/blender/blender_camera.cpp:blender_camera_from_view (look for 1.41421f)
|
||||
float zoom = 4.0 / pow((pow(2.0, 0.5) + b_context.region_data().view_camera_zoom() / 50.0), 2);
|
||||
|
||||
// Updating lens_shift due to viewport zoom and view_camera_offset
|
||||
// view_camera_offset should be multiplied by 2
|
||||
data.lens_shift[0] = (data.lens_shift[0] + b_context.region_data().view_camera_offset()[0] * 2) / zoom;
|
||||
data.lens_shift[1] = (data.lens_shift[1] + b_context.region_data().view_camera_offset()[1] * 2) / zoom;
|
||||
|
||||
if (data.mode == BL::Camera::type_ORTHO) {
|
||||
data.ortho_size[0] *= zoom;
|
||||
data.ortho_size[1] *= zoom;
|
||||
}
|
||||
else {
|
||||
data.sensor_size[0] *= zoom;
|
||||
data.sensor_size[1] *= zoom;
|
||||
}
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
pxr::GfCamera CameraData::export_gf(float tile[4])
|
||||
{
|
||||
float tile_pos[2] = {tile[0], tile[1]}, tile_size[2] = {tile[2], tile[3]};
|
||||
|
||||
pxr::GfCamera gf_camera = pxr::GfCamera();
|
||||
|
||||
gf_camera.SetClippingRange(pxr::GfRange1f(this->clip_range[0], this->clip_range[1]));
|
||||
|
||||
vector<float> lens_shift = {(float)(this->lens_shift[0] + tile_pos[0] + tile_size[0] * 0.5 - 0.5) / tile_size[0],
|
||||
(float)(this->lens_shift[1] + tile_pos[1] + tile_size[1] * 0.5 - 0.5) / tile_size[1]};
|
||||
|
||||
if (this->mode == BL::Camera::type_PERSP) {
|
||||
gf_camera.SetProjection(pxr::GfCamera::Projection::Perspective);
|
||||
gf_camera.SetFocalLength(this->focal_length);
|
||||
|
||||
vector<float> sensor_size = {this->sensor_size[0] * tile_size[0], this->sensor_size[1] * tile_size[1]};
|
||||
|
||||
gf_camera.SetHorizontalAperture(sensor_size[0]);
|
||||
gf_camera.SetVerticalAperture(sensor_size[1]);
|
||||
|
||||
gf_camera.SetHorizontalApertureOffset(lens_shift[0] * sensor_size[0]);
|
||||
gf_camera.SetVerticalApertureOffset(lens_shift[1] * sensor_size[1]);
|
||||
}
|
||||
else if (this->mode == BL::Camera::type_ORTHO) {
|
||||
gf_camera.SetProjection(pxr::GfCamera::Projection::Orthographic);
|
||||
|
||||
// Use tenths of a world unit accorging to USD docs https://graphics.pixar.com/usd/docs/api/class_gf_camera.html
|
||||
float ortho_size[2] = {this->ortho_size[0] * tile_size[0] * 10,
|
||||
this->ortho_size[1] * tile_size[1] * 10};
|
||||
|
||||
gf_camera.SetHorizontalAperture(ortho_size[0]);
|
||||
gf_camera.SetVerticalAperture(ortho_size[1]);
|
||||
|
||||
gf_camera.SetHorizontalApertureOffset(lens_shift[0] * this->ortho_size[0] * tile_size[0] * 10);
|
||||
gf_camera.SetVerticalApertureOffset(lens_shift[1] * this->ortho_size[1] * tile_size[1] * 10);
|
||||
}
|
||||
else if (this->mode == BL::Camera::type_PANO) {
|
||||
// TODO: store panoramic camera settings
|
||||
}
|
||||
|
||||
double transform_d[4][4];
|
||||
for (int i = 0 ; i < 4; i++) {
|
||||
for (int j = 0 ; j < 4; j++) {
|
||||
transform_d[i][j] = (double)transform[i][j];
|
||||
}
|
||||
}
|
||||
gf_camera.SetTransform(pxr::GfMatrix4d(transform_d));
|
||||
|
||||
return gf_camera;
|
||||
}
|
||||
|
||||
ViewSettings::ViewSettings(BL::Context &b_context)
|
||||
: camera_data(b_context)
|
||||
{
|
||||
camera_data = CameraData::init_from_context(b_context);
|
||||
|
||||
screen_width = b_context.region().width();
|
||||
screen_height = b_context.region().height();
|
||||
|
||||
@ -405,27 +110,24 @@ ViewSettings::ViewSettings(BL::Context &b_context)
|
||||
}
|
||||
}
|
||||
|
||||
border[0][0] = x1;
|
||||
border[0][1] = y1;
|
||||
border[1][0] = x2 - x1;
|
||||
border[1][1] = y2 - y1;
|
||||
border = GfVec4i(x1, y1, x2 - x1, y2 - y1);
|
||||
}
|
||||
|
||||
int ViewSettings::get_width()
|
||||
int ViewSettings::width()
|
||||
{
|
||||
return border[1][0];
|
||||
return border[2];
|
||||
}
|
||||
|
||||
int ViewSettings::get_height()
|
||||
int ViewSettings::height()
|
||||
{
|
||||
return border[1][1];
|
||||
return border[3];
|
||||
}
|
||||
|
||||
GfCamera ViewSettings::export_camera()
|
||||
GfCamera ViewSettings::gf_camera()
|
||||
{
|
||||
float tile[4] = {(float)border[0][0] / screen_width, (float)border[0][1] / screen_height,
|
||||
(float)border[1][0] / screen_width, (float)border[1][1] / screen_height};
|
||||
return camera_data.export_gf(tile);
|
||||
return camera_data.gf_camera(GfVec4f(
|
||||
(float)border[0] / screen_width, (float)border[1] / screen_height,
|
||||
(float)border[2] / screen_width, (float)border[3] / screen_height));
|
||||
}
|
||||
|
||||
GLTexture::GLTexture()
|
||||
@ -443,7 +145,7 @@ GLTexture::~GLTexture()
|
||||
}
|
||||
}
|
||||
|
||||
void GLTexture::setBuffer(pxr::HdRenderBuffer *buffer)
|
||||
void GLTexture::setBuffer(HdRenderBuffer *buffer)
|
||||
{
|
||||
if (!textureId) {
|
||||
create(buffer);
|
||||
@ -463,7 +165,7 @@ void GLTexture::setBuffer(pxr::HdRenderBuffer *buffer)
|
||||
buffer->Unmap();
|
||||
}
|
||||
|
||||
void GLTexture::create(pxr::HdRenderBuffer *buffer)
|
||||
void GLTexture::create(HdRenderBuffer *buffer)
|
||||
{
|
||||
width = buffer->GetWidth();
|
||||
height = buffer->GetHeight();
|
||||
@ -538,7 +240,7 @@ void GLTexture::draw(GLfloat x, GLfloat y)
|
||||
glDeleteVertexArrays(1, &vertex_array);
|
||||
}
|
||||
|
||||
void ViewportEngine::sync(BL::Depsgraph &b_depsgraph, BL::Context &b_context, pxr::HdRenderSettingsMap &renderSettings)
|
||||
void ViewportEngine::sync(BL::Depsgraph &b_depsgraph, BL::Context &b_context, HdRenderSettingsMap &renderSettings)
|
||||
{
|
||||
if (!sceneDelegate) {
|
||||
sceneDelegate = std::make_unique<BlenderSceneDelegate>(renderIndex.get(),
|
||||
@ -554,16 +256,16 @@ void ViewportEngine::sync(BL::Depsgraph &b_depsgraph, BL::Context &b_context, px
|
||||
void ViewportEngine::viewDraw(BL::Depsgraph &b_depsgraph, BL::Context &b_context)
|
||||
{
|
||||
ViewSettings viewSettings(b_context);
|
||||
if (viewSettings.get_width() * viewSettings.get_height() == 0) {
|
||||
if (viewSettings.width() * viewSettings.height() == 0) {
|
||||
return;
|
||||
};
|
||||
|
||||
BL::Scene b_scene = b_depsgraph.scene_eval();
|
||||
GfCamera gfCamera = viewSettings.export_camera();
|
||||
GfCamera gfCamera = viewSettings.gf_camera();
|
||||
|
||||
freeCameraDelegate->SetCamera(gfCamera);
|
||||
renderTaskDelegate->SetCameraAndViewport(freeCameraDelegate->GetCameraId(),
|
||||
GfVec4d(viewSettings.border[0][0], viewSettings.border[0][1], viewSettings.border[1][0], viewSettings.border[1][1]));
|
||||
GfVec4d(viewSettings.border[0], viewSettings.border[1], viewSettings.border[2], viewSettings.border[3]));
|
||||
|
||||
if (!b_engine.bl_use_gpu_context()) {
|
||||
renderTaskDelegate->SetRendererAov(HdAovTokens->color);
|
||||
@ -584,7 +286,7 @@ void ViewportEngine::viewDraw(BL::Depsgraph &b_depsgraph, BL::Context &b_context
|
||||
|
||||
if (!b_engine.bl_use_gpu_context()) {
|
||||
texture.setBuffer(renderTaskDelegate->GetRendererAov(HdAovTokens->color));
|
||||
texture.draw((GLfloat)viewSettings.border[0][0], (GLfloat)viewSettings.border[0][1]);
|
||||
texture.draw((GLfloat)viewSettings.border[0], (GLfloat)viewSettings.border[1]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -593,7 +295,7 @@ void ViewportEngine::viewDraw(BL::Depsgraph &b_depsgraph, BL::Context &b_context
|
||||
chrono::time_point<chrono::steady_clock> timeCurrent = chrono::steady_clock::now();
|
||||
chrono::milliseconds elapsedTime = chrono::duration_cast<chrono::milliseconds>(timeCurrent - timeBegin);
|
||||
|
||||
string formattedTime = formatDuration(elapsedTime);
|
||||
string formattedTime = format_duration(elapsedTime);
|
||||
|
||||
if (!renderTaskDelegate->IsConverged()) {
|
||||
notifyStatus("Time: " + formattedTime + " | Done: " + to_string(int(getRendererPercentDone())) + "%",
|
||||
|
Loading…
Reference in New Issue
Block a user
remove this