use sensor size when calculating dof rather then hard coded values.

This commit is contained in:
2012-08-09 15:59:32 +00:00
parent 863aee6caf
commit 2f2560eb49
3 changed files with 27 additions and 31 deletions

View File

@@ -57,8 +57,8 @@ void *BKE_camera_add(const char *name)
cam = BKE_libblock_alloc(&G.main->camera, ID_CA, name);
cam->lens = 35.0f;
cam->sensor_x = 32.0f;
cam->sensor_y = 18.0f;
cam->sensor_x = DEFAULT_SENSOR_WIDTH;
cam->sensor_y = DEFAULT_SENSOR_HEIGHT;
cam->clipsta = 0.1f;
cam->clipend = 100.0f;
cam->drawsize = 0.5f;

View File

@@ -22,6 +22,7 @@
#include "COM_ConvertDepthToRadiusOperation.h"
#include "BLI_math.h"
#include "BKE_camera.h"
#include "DNA_camera_types.h"
ConvertDepthToRadiusOperation::ConvertDepthToRadiusOperation() : NodeOperation()
@@ -37,41 +38,35 @@ ConvertDepthToRadiusOperation::ConvertDepthToRadiusOperation() : NodeOperation()
float ConvertDepthToRadiusOperation::determineFocalDistance()
{
if (this->m_cameraObject == NULL || this->m_cameraObject->type != OB_CAMERA) {
return 10.0f;
}
else {
if (this->m_cameraObject && this->m_cameraObject->type == OB_CAMERA) {
Camera *camera = (Camera *)this->m_cameraObject->data;
this->m_cam_lens = camera->lens;
if (camera->dof_ob) {
/* too simple, better to return the distance on the view axis only
* return len_v3v3(ob->obmat[3], cam->dof_ob->obmat[3]); */
float mat[4][4], imat[4][4], obmat[4][4];
copy_m4_m4(obmat, this->m_cameraObject->obmat);
normalize_m4(obmat);
invert_m4_m4(imat, obmat);
mult_m4_m4m4(mat, imat, camera->dof_ob->obmat);
return fabsf(mat[3][2]);
}
else {
return camera->YF_dofdist;
}
return BKE_camera_object_dof_distance(this->m_cameraObject);
}
else {
return 10.0f;
}
}
void ConvertDepthToRadiusOperation::initExecution()
{
float cam_sensor = DEFAULT_SENSOR_WIDTH;
Camera *camera = NULL;
if (this->m_cameraObject && this->m_cameraObject->type == OB_CAMERA) {
camera = (Camera *)this->m_cameraObject->data;
cam_sensor = BKE_camera_sensor_size(camera->sensor_fit, camera->sensor_x, camera->sensor_y);
}
this->m_inputOperation = this->getInputSocketReader(0);
float focalDistance = determineFocalDistance();
if (focalDistance == 0.0f) focalDistance = 1e10f; /* if the dof is 0.0 then set it be be far away */
this->m_inverseFocalDistance = 1.f / focalDistance;
this->m_inverseFocalDistance = 1.0f / focalDistance;
this->m_aspect = (this->getWidth() > this->getHeight()) ? (this->getHeight() / (float)this->getWidth()) : (this->getWidth() / (float)this->getHeight());
this->m_aperture = 0.5f * (this->m_cam_lens / (this->m_aspect * 32.0f)) / this->m_fStop;
float minsz = MIN2(getWidth(), getHeight());
this->m_dof_sp = (float)minsz / (16.f / this->m_cam_lens); // <- == aspect * MIN2(img->x, img->y) / tan(0.5f * fov);
this->m_aperture = 0.5f * (this->m_cam_lens / (this->m_aspect * cam_sensor)) / this->m_fStop;
float minsz = min(getWidth(), getHeight());
this->m_dof_sp = (float)minsz / ((cam_sensor / 2.0f) / this->m_cam_lens); // <- == aspect * MIN2(img->x, img->y) / tan(0.5f * fov);
if (this->m_blurPostOperation) {
m_blurPostOperation->setSigma(min(m_aperture * 128.0f, this->m_maxRadius));
}

View File

@@ -254,6 +254,7 @@ static void defocus_blur(bNode *node, CompBuf *new, CompBuf *img, CompBuf *zbuf,
BokehCoeffs BKH[8]; // bokeh shape data, here never > 8 pts.
float bkh_b[4] = {0}; // shape 2D bound
float cam_fdist=1, cam_invfdist=1, cam_lens=35;
float cam_sensor = DEFAULT_SENSOR_WIDTH;
float dof_sp, maxfgc, bk_hn_theta=0, inradsq=0;
int y, len_bkh=0, ydone = FALSE;
float aspect, aperture;
@@ -268,17 +269,17 @@ static void defocus_blur(bNode *node, CompBuf *new, CompBuf *img, CompBuf *zbuf,
Camera* cam = (Camera*)camob->data;
cam_lens = cam->lens;
cam_fdist = BKE_camera_object_dof_distance(camob);
if (cam_fdist==0.0f) cam_fdist = 1e10f; /* if the dof is 0.0 then set it be be far away */
cam_invfdist = 1.f/cam_fdist;
cam_sensor = BKE_camera_sensor_size(cam->sensor_fit, cam->sensor_x, cam->sensor_y);
if (cam_fdist == 0.0f) cam_fdist = 1e10f; /* if the dof is 0.0 then set it be be far away */
cam_invfdist = 1.f / cam_fdist;
}
// guess work here.. best match with raytraced result
minsz = MIN2(img->x, img->y);
dof_sp = (float)minsz / (16.f / cam_lens); // <- == aspect * MIN2(img->x, img->y) / tan(0.5f * fov);
dof_sp = (float)minsz / ((cam_sensor / 2.0f) / cam_lens); // <- == aspect * MIN2(img->x, img->y) / tan(0.5f * fov);
// aperture
aspect = (img->x > img->y) ? (img->y / (float)img->x) : (img->x / (float)img->y);
aperture = 0.5f*(cam_lens / (aspect*32.f)) / nqd->fstop;
aperture = 0.5f * (cam_lens / (aspect * cam_sensor)) / nqd->fstop;
// if not disk, make bokeh coefficients and other needed data
if (nqd->bktype!=0) {