Hydra: Use BKE_camera_params API to simplify camera code and share more with rest of Blender #114370

Merged
Brecht Van Lommel merged 20 commits from DagerD/blender:hydra-camera-refactoring into main 2023-11-15 19:03:31 +01:00
8 changed files with 135 additions and 348 deletions

View File

@ -150,7 +150,6 @@ set(SRC
if(WITH_HYDRA)
list(APPEND SRC
hydra/camera.cc
hydra/curves.cc
hydra/hydra_scene_delegate.cc
hydra/id.cc
@ -165,7 +164,6 @@ if(WITH_HYDRA)
hydra/volume_modifier.cc
hydra/world.cc
hydra/camera.h
hydra/curves.h
hydra/hydra_scene_delegate.h
hydra/id.h

View File

@ -1,284 +0,0 @@
/* SPDX-FileCopyrightText: 2011-2022 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "camera.h"
#include "DNA_camera_types.h"
#include "DNA_object_types.h"
#include "DNA_screen_types.h"
#include "DNA_view3d_types.h"
#include "hydra/object.h"
namespace blender::io::hydra {
CameraData::CameraData(const View3D *v3d, const ARegion *region)
{
const RegionView3D *region_data = (const RegionView3D *)region->regiondata;
/* TODO: refactor use BKE_camera_params API. */
float VIEWPORT_SENSOR_SIZE = DEFAULT_SENSOR_WIDTH * 2.0f;
pxr::GfVec2i res(region->winx, region->winy);
float ratio = float(res[0]) / res[1];
transform_ = gf_matrix_from_transform(region_data->viewmat).GetInverse();
switch (region_data->persp) {
case RV3D_PERSP: {
mode_ = CAM_PERSP;
clip_range_ = pxr::GfRange1f(v3d->clip_start, v3d->clip_end);
lens_shift_ = pxr::GfVec2f(0.0, 0.0);
focal_length_ = v3d->lens;
if (ratio > 1.0) {
sensor_size_ = pxr::GfVec2f(VIEWPORT_SENSOR_SIZE, VIEWPORT_SENSOR_SIZE / ratio);
}
else {
sensor_size_ = pxr::GfVec2f(VIEWPORT_SENSOR_SIZE * ratio, VIEWPORT_SENSOR_SIZE);
}
break;
}
case RV3D_ORTHO: {
mode_ = CAM_ORTHO;
lens_shift_ = pxr::GfVec2f(0.0f, 0.0f);
float o_size = region_data->dist * VIEWPORT_SENSOR_SIZE / v3d->lens;
float o_depth = v3d->clip_end;
clip_range_ = pxr::GfRange1f(-o_depth * 0.5, o_depth * 0.5);
if (ratio > 1.0f) {
ortho_size_ = pxr::GfVec2f(o_size, o_size / ratio);
}
else {
ortho_size_ = pxr::GfVec2f(o_size * ratio, o_size);
}
break;
}
case RV3D_CAMOB: {
pxr::GfMatrix4d mat = transform_;
*this = CameraData(v3d->camera, res, pxr::GfVec4f(0, 0, 1, 1));
transform_ = mat;
/* This formula was taken from previous plugin with corresponded comment.
* See blender/intern/cycles/blender/blender_camera.cpp:blender_camera_from_view (look
* for 1.41421f). */
float zoom = 4.0 / pow((pow(2.0, 0.5) + region_data->camzoom / 50.0), 2);
/* Updating l_shift due to viewport zoom and view_camera_offset
* view_camera_offset should be multiplied by 2. */
lens_shift_ = pxr::GfVec2f((lens_shift_[0] + region_data->camdx * 2) / zoom,
(lens_shift_[1] + region_data->camdy * 2) / zoom);
if (mode_ == CAM_ORTHO) {
ortho_size_ *= zoom;
}
else {
sensor_size_ *= zoom;
}
break;
}
default:
break;
}
}
CameraData::CameraData(const Object *camera_obj, pxr::GfVec2i res, pxr::GfVec4f tile)
{
const Camera *camera = (const Camera *)camera_obj->data;
float t_pos[2] = {tile[0], tile[1]};
float t_size[2] = {tile[2], tile[3]};
transform_ = gf_matrix_from_transform(camera_obj->object_to_world);
clip_range_ = pxr::GfRange1f(camera->clip_start, camera->clip_end);
mode_ = camera->type;
if (camera->dof.flag & CAM_DOF_ENABLED) {
float focus_distance;
if (!camera->dof.focus_object) {
focus_distance = camera->dof.focus_distance;
}
else {
pxr::GfVec3f obj_pos(camera->dof.focus_object->object_to_world[0][3],
camera->dof.focus_object->object_to_world[1][3],
camera->dof.focus_object->object_to_world[2][3]);
pxr::GfVec3f cam_pos(transform_[0][3], transform_[1][3], transform_[2][3]);
focus_distance = (obj_pos - cam_pos).GetLength();
}
dof_data_ = std::tuple(
std::max(focus_distance, 0.001f), camera->dof.aperture_fstop, camera->dof.aperture_blades);
}
float ratio = float(res[0]) / res[1];
switch (camera->sensor_fit) {
case CAMERA_SENSOR_FIT_VERT:
lens_shift_ = pxr::GfVec2f(camera->shiftx / ratio, camera->shifty);
break;
case CAMERA_SENSOR_FIT_HOR:
lens_shift_ = pxr::GfVec2f(camera->shiftx, camera->shifty * ratio);
break;
case CAMERA_SENSOR_FIT_AUTO:
if (ratio > 1.0f) {
lens_shift_ = pxr::GfVec2f(camera->shiftx, camera->shifty * ratio);
}
else {
lens_shift_ = pxr::GfVec2f(camera->shiftx / ratio, camera->shifty);
}
break;
default:
lens_shift_ = pxr::GfVec2f(camera->shiftx, camera->shifty);
break;
}
lens_shift_ = pxr::GfVec2f(
lens_shift_[0] / t_size[0] + (t_pos[0] + t_size[0] * 0.5 - 0.5) / t_size[0],
lens_shift_[1] / t_size[1] + (t_pos[1] + t_size[1] * 0.5 - 0.5) / t_size[1]);
switch (camera->type) {
case CAM_PERSP: {
focal_length_ = camera->lens;
switch (camera->sensor_fit) {
case CAMERA_SENSOR_FIT_VERT:
sensor_size_ = pxr::GfVec2f(camera->sensor_y * ratio, camera->sensor_y);
break;
case CAMERA_SENSOR_FIT_HOR:
sensor_size_ = pxr::GfVec2f(camera->sensor_x, camera->sensor_x / ratio);
break;
case CAMERA_SENSOR_FIT_AUTO:
if (ratio > 1.0f) {
sensor_size_ = pxr::GfVec2f(camera->sensor_x, camera->sensor_x / ratio);
}
else {
sensor_size_ = pxr::GfVec2f(camera->sensor_x * ratio, camera->sensor_x);
}
break;
default:
sensor_size_ = pxr::GfVec2f(camera->sensor_x, camera->sensor_y);
break;
}
sensor_size_ = pxr::GfVec2f(sensor_size_[0] * t_size[0], sensor_size_[1] * t_size[1]);
break;
}
case CAM_ORTHO: {
focal_length_ = 0.0f;
switch (camera->sensor_fit) {
case CAMERA_SENSOR_FIT_VERT:
ortho_size_ = pxr::GfVec2f(camera->ortho_scale * ratio, camera->ortho_scale);
break;
case CAMERA_SENSOR_FIT_HOR:
ortho_size_ = pxr::GfVec2f(camera->ortho_scale, camera->ortho_scale / ratio);
break;
case CAMERA_SENSOR_FIT_AUTO:
if (ratio > 1.0f) {
ortho_size_ = pxr::GfVec2f(camera->ortho_scale, camera->ortho_scale / ratio);
}
else {
ortho_size_ = pxr::GfVec2f(camera->ortho_scale * ratio, camera->ortho_scale);
}
break;
default:
ortho_size_ = pxr::GfVec2f(camera->ortho_scale, camera->ortho_scale);
break;
}
ortho_size_ = pxr::GfVec2f(ortho_size_[0] * t_size[0], ortho_size_[1] * t_size[1]);
break;
}
case CAM_PANO: {
/* TODO: Recheck parameters for PANO camera */
focal_length_ = camera->lens;
switch (camera->sensor_fit) {
case CAMERA_SENSOR_FIT_VERT:
sensor_size_ = pxr::GfVec2f(camera->sensor_y * ratio, camera->sensor_y);
break;
case CAMERA_SENSOR_FIT_HOR:
sensor_size_ = pxr::GfVec2f(camera->sensor_x, camera->sensor_x / ratio);
break;
case CAMERA_SENSOR_FIT_AUTO:
if (ratio > 1.0f) {
sensor_size_ = pxr::GfVec2f(camera->sensor_x, camera->sensor_x / ratio);
}
else {
sensor_size_ = pxr::GfVec2f(camera->sensor_x * ratio, camera->sensor_x);
}
break;
default:
sensor_size_ = pxr::GfVec2f(camera->sensor_x, camera->sensor_y);
break;
}
sensor_size_ = pxr::GfVec2f(sensor_size_[0] * t_size[0], sensor_size_[1] * t_size[1]);
break;
}
default: {
focal_length_ = camera->lens;
sensor_size_ = pxr::GfVec2f(camera->sensor_y * ratio, camera->sensor_y);
break;
}
}
}
pxr::GfCamera CameraData::gf_camera()
{
return gf_camera(pxr::GfVec4f(0, 0, 1, 1));
}
pxr::GfCamera CameraData::gf_camera(pxr::GfVec4f tile)
{
float t_pos[2] = {tile[0], tile[1]}, t_size[2] = {tile[2], tile[3]};
pxr::GfCamera gf_camera = pxr::GfCamera();
gf_camera.SetClippingRange(clip_range_);
float l_shift[2] = {(lens_shift_[0] + t_pos[0] + t_size[0] * 0.5f - 0.5f) / t_size[0],
(lens_shift_[1] + t_pos[1] + t_size[1] * 0.5f - 0.5f) / t_size[1]};
switch (mode_) {
case CAM_PERSP:
case CAM_PANO: {
/* TODO: store panoramic camera settings */
gf_camera.SetProjection(pxr::GfCamera::Projection::Perspective);
gf_camera.SetFocalLength(focal_length_);
float s_size[2] = {sensor_size_[0] * t_size[0], sensor_size_[1] * t_size[1]};
gf_camera.SetHorizontalAperture(s_size[0]);
gf_camera.SetVerticalAperture(s_size[1]);
gf_camera.SetHorizontalApertureOffset(l_shift[0] * s_size[0]);
gf_camera.SetVerticalApertureOffset(l_shift[1] * s_size[1]);
break;
}
case CAM_ORTHO: {
gf_camera.SetProjection(pxr::GfCamera::Projection::Orthographic);
/* Use tenths of a world unit according to USD docs
* https://graphics.pixar.com/usd/docs/api/class_gf_camera.html */
float o_size[2] = {ortho_size_[0] * t_size[0] * 10, ortho_size_[1] * t_size[1] * 10};
gf_camera.SetHorizontalAperture(o_size[0]);
gf_camera.SetVerticalAperture(o_size[1]);
gf_camera.SetHorizontalApertureOffset(l_shift[0] * o_size[0]);
gf_camera.SetVerticalApertureOffset(l_shift[1] * o_size[1]);
break;
}
default:
break;
}
gf_camera.SetTransform(transform_);
return gf_camera;
}
} // namespace blender::io::hydra

View File

@ -1,37 +0,0 @@
/* SPDX-FileCopyrightText: 2011-2022 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include <tuple>
#include <pxr/base/gf/camera.h>
#include <pxr/base/gf/vec2f.h>
struct ARegion;
struct Object;
struct View3D;
namespace blender::io::hydra {
class CameraData {
private:
int mode_;
pxr::GfRange1f clip_range_;
float focal_length_;
pxr::GfVec2f sensor_size_;
pxr::GfMatrix4d transform_;
pxr::GfVec2f lens_shift_;
pxr::GfVec2f ortho_size_;
std::tuple<float, float, int> dof_data_;
public:
CameraData(const View3D *v3d, const ARegion *region);
CameraData(const Object *camera_obj, pxr::GfVec2i res, pxr::GfVec4f tile);
pxr::GfCamera gf_camera();
pxr::GfCamera gf_camera(pxr::GfVec4f tile);
};
} // namespace blender::io::hydra

View File

@ -74,6 +74,7 @@ set(LIB
)
set(SRC
camera.cc
engine.cc
final_engine.cc
light_tasks_delegate.cc
@ -82,6 +83,7 @@ set(SRC
render_task_delegate.cc
viewport_engine.cc
camera.h
engine.h
final_engine.h
light_tasks_delegate.h

View File

@ -0,0 +1,88 @@
/* SPDX-FileCopyrightText: 2011-2022 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "camera.h"
#include "BKE_camera.h"
#include "DNA_camera_types.h"
#include "DNA_screen_types.h"
#include "DNA_view3d_types.h"
#include "hydra/object.h"
namespace blender::render::hydra {
static pxr::GfCamera gf_camera(const CameraParams &params,
const pxr::GfVec2i &res,
const pxr::GfVec4f &border)
{
pxr::GfCamera camera;
camera.SetProjection(params.is_ortho ? pxr::GfCamera::Projection::Orthographic :
pxr::GfCamera::Projection::Perspective);
camera.SetClippingRange(pxr::GfRange1f(params.clip_start, params.clip_end));
camera.SetFocalLength(params.lens);
pxr::GfVec2f b_pos(border[0], border[1]), b_size(border[2], border[3]);
float sensor_size = BKE_camera_sensor_size(params.sensor_fit, params.sensor_x, params.sensor_y);
pxr::GfVec2f sensor_scale = (BKE_camera_sensor_fit(params.sensor_fit, res[0], res[1]) ==
CAMERA_SENSOR_FIT_HOR) ?
pxr::GfVec2f(1.0f, float(res[1]) / res[0]) :
pxr::GfVec2f(float(res[0]) / res[1], 1.0f);
pxr::GfVec2f aperture = pxr::GfVec2f((params.is_ortho) ? params.ortho_scale : sensor_size);
aperture = pxr::GfCompMult(aperture, sensor_scale);
aperture = pxr::GfCompMult(aperture, b_size);
aperture *= params.zoom;
if (params.is_ortho) {
/* Use tenths of a world unit according to USD docs
* https://graphics.pixar.com/usd/docs/api/class_gf_camera.html */
aperture *= 10.0f;
}
camera.SetHorizontalAperture(aperture[0]);
camera.SetVerticalAperture(aperture[1]);
pxr::GfVec2f lens_shift = pxr::GfVec2f(params.shiftx, params.shifty);
lens_shift = pxr::GfCompDiv(lens_shift, sensor_scale);
lens_shift += pxr::GfVec2f(params.offsetx, params.offsety);
lens_shift += b_pos + b_size * 0.5f - pxr::GfVec2f(0.5f);
lens_shift = pxr::GfCompDiv(lens_shift, b_size);
camera.SetHorizontalApertureOffset(lens_shift[0] * aperture[0]);
camera.SetVerticalApertureOffset(lens_shift[1] * aperture[1]);
return camera;
}
pxr::GfCamera gf_camera(const Depsgraph *depsgraph,
const View3D *v3d,
const ARegion *region,
const pxr::GfVec4f &border)
{
const RegionView3D *region_data = (const RegionView3D *)region->regiondata;
CameraParams params;
brecht marked this conversation as resolved Outdated

There is still a lot of duplicated logic here, BKE_camera_params_from_view3d already abstracts most of the differences.

This whole switch could probably be replaced by this, but have not tested:

  const float fit_width = (region_data->persp == RV3D_CAMOB) ? rd->xasp * rd->xsch : region->winx;
  const float fit_height = (region_data->persp == RV3D_CAMOB) ? rd->yasp * rd->ysch : region->winy;
  const int sensor_fit = BKE_camera_sensor_fit(camera_params.sensor_fit, fit_width, fit_height);
  const pxr::GfVec2f sensor_fit_scale = (sensor_fit == CAMERA_SENSOR_FIT_HOR) ?
                                            pxr::GfVec2f(1.0f, ratio) :
                                            pxr::GfVec2f(1.0f / ratio, 1.0f);

  const float camera_sensor_size = BKE_camera_sensor_size(
      sensor_fit, camera_params.sensor_x, camera_params.sensor_y);
  aperture = pxr::GfVec2f((camera_params.is_ortho) ? camera_params.ortho_scale : 
                                                     camera_sensor_size);
  aperture = pxr::GfCompMult(aperture, sensor_fit_scale);
  aperture *= camera_params.zoom;

  lens_shift = pxr::GfVec2f(camera_params.shiftx, camera_params.shifty);
  lens_shift = pxr::GfCompMult(lens_shift, sensor_fit_scale);
  lens_shift += pxr::GfVec2f(camera_params.offsetx, camera_params.offsety) * 2;
  lens_shift /= camera_params.zoom;
There is still a lot of duplicated logic here, `BKE_camera_params_from_view3d` already abstracts most of the differences. This whole switch could probably be replaced by this, but have not tested: ``` const float fit_width = (region_data->persp == RV3D_CAMOB) ? rd->xasp * rd->xsch : region->winx; const float fit_height = (region_data->persp == RV3D_CAMOB) ? rd->yasp * rd->ysch : region->winy; const int sensor_fit = BKE_camera_sensor_fit(camera_params.sensor_fit, fit_width, fit_height); const pxr::GfVec2f sensor_fit_scale = (sensor_fit == CAMERA_SENSOR_FIT_HOR) ? pxr::GfVec2f(1.0f, ratio) : pxr::GfVec2f(1.0f / ratio, 1.0f); const float camera_sensor_size = BKE_camera_sensor_size( sensor_fit, camera_params.sensor_x, camera_params.sensor_y); aperture = pxr::GfVec2f((camera_params.is_ortho) ? camera_params.ortho_scale : camera_sensor_size); aperture = pxr::GfCompMult(aperture, sensor_fit_scale); aperture *= camera_params.zoom; lens_shift = pxr::GfVec2f(camera_params.shiftx, camera_params.shifty); lens_shift = pxr::GfCompMult(lens_shift, sensor_fit_scale); lens_shift += pxr::GfVec2f(camera_params.offsetx, camera_params.offsety) * 2; lens_shift /= camera_params.zoom; ```
BKE_camera_params_init(&params);
BKE_camera_params_from_view3d(&params, depsgraph, v3d, region_data);
pxr::GfCamera camera = gf_camera(params, pxr::GfVec2i(region->winx, region->winy), border);
camera.SetTransform(io::hydra::gf_matrix_from_transform(region_data->viewmat).GetInverse());
return camera;
}
pxr::GfCamera gf_camera(const Object *camera_obj,
const pxr::GfVec2i &res,
const pxr::GfVec4f &border)
{
CameraParams params;
BKE_camera_params_init(&params);
BKE_camera_params_from_object(&params, camera_obj);
pxr::GfCamera camera = gf_camera(params, res, border);
camera.SetTransform(io::hydra::gf_matrix_from_transform(camera_obj->object_to_world));
return camera;
}
} // namespace blender::render::hydra

View File

@ -0,0 +1,27 @@
/* SPDX-FileCopyrightText: 2011-2022 Blender Authors
*
* SPDX-License-Identifier: GPL-2.0-or-later */
#pragma once
#include <pxr/base/gf/camera.h>
#include <pxr/base/gf/vec2f.h>
struct ARegion;
struct Object;
struct View3D;
struct Depsgraph;
struct RenderData;
namespace blender::render::hydra {
pxr::GfCamera gf_camera(const Depsgraph *depsgraph,
const View3D *v3d,
const ARegion *region,
const pxr::GfVec4f &border);
pxr::GfCamera gf_camera(const Object *camera_obj,
const pxr::GfVec2i &res,
const pxr::GfVec4f &border);
} // namespace blender::render::hydra

View File

@ -3,6 +3,7 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "final_engine.h"
#include "camera.h"
#include <pxr/imaging/hd/light.h>
#include <pxr/imaging/hd/renderBuffer.h>
@ -20,8 +21,6 @@
#include "RE_engine.h"
#include "hydra/camera.h"
namespace blender::render::hydra {
void FinalEngine::render()
@ -42,8 +41,8 @@ void FinalEngine::render()
pxr::GfVec2i image_res(r.xsch * r.size / 100, r.ysch * r.size / 100);
int width = image_res[0] * border[2];
int height = image_res[1] * border[3];
pxr::GfCamera camera =
io::hydra::CameraData(scene_->camera, image_res, pxr::GfVec4f(0, 0, 1, 1)).gf_camera(border);
pxr::GfCamera camera = gf_camera(scene_->camera, image_res, border);
free_camera_delegate_->SetCamera(camera);
render_task_delegate_->set_viewport(pxr::GfVec4d(0, 0, width, height));

View File

@ -3,6 +3,7 @@
* SPDX-License-Identifier: GPL-2.0-or-later */
#include "viewport_engine.h"
#include "camera.h"
#include <pxr/base/gf/camera.h>
#include <pxr/imaging/glf/drawTarget.h>
@ -28,27 +29,21 @@
#include "RE_engine.h"
#include "hydra/camera.h"
namespace blender::render::hydra {
struct ViewSettings {
int screen_width;
int screen_height;
pxr::GfVec4i border;
pxr::GfCamera camera;
ViewSettings(bContext *context);
int width();
int height();
pxr::GfCamera gf_camera();
io::hydra::CameraData camera_data;
int screen_width;
int screen_height;
pxr::GfVec4i border;
};
ViewSettings::ViewSettings(bContext *context)
: camera_data(CTX_wm_view3d(context), CTX_wm_region(context))
{
View3D *view3d = CTX_wm_view3d(context);
RegionView3D *region_data = static_cast<RegionView3D *>(CTX_wm_region_data(context));
@ -121,6 +116,14 @@ ViewSettings::ViewSettings(bContext *context)
}
border = pxr::GfVec4i(x1, y1, x2, y2);
camera = gf_camera(CTX_data_ensure_evaluated_depsgraph(context),
view3d,
region,
pxr::GfVec4f(float(border[0]) / screen_width,
float(border[1]) / screen_height,
float(width()) / screen_width,
float(height()) / screen_height));
}
int ViewSettings::width()
@ -133,14 +136,6 @@ int ViewSettings::height()
return border[3] - border[1];
}
pxr::GfCamera ViewSettings::gf_camera()
{
return camera_data.gf_camera(pxr::GfVec4f(float(border[0]) / screen_width,
float(border[1]) / screen_height,
float(width()) / screen_width,
float(height()) / screen_height));
}
DrawTexture::DrawTexture()
{
float coords[8] = {0.0, 0.0, 1.0, 0.0, 1.0, 1.0, 0.0, 1.0};
@ -215,8 +210,7 @@ void ViewportEngine::render()
return;
};
pxr::GfCamera gf_camera = view_settings.gf_camera();
free_camera_delegate_->SetCamera(gf_camera);
free_camera_delegate_->SetCamera(view_settings.camera);
pxr::GfVec4d viewport(0.0, 0.0, view_settings.width(), view_settings.height());
render_task_delegate_->set_viewport(viewport);