1
1

Compare commits

..

38 Commits

Author SHA1 Message Date
e769961f32 order lines correctly 2021-03-04 18:18:50 +01:00
e8b29dab58 Merge branch 'master' into temp-derived-node-tree-refactor 2021-03-04 18:17:25 +01:00
d059fe522b cleanup 2021-03-04 18:06:56 +01:00
97156bbce5 rename files 2021-03-04 18:03:11 +01:00
2fb26211db rename new type to derived node tree 2021-03-04 18:01:49 +01:00
238b05ce30 Merge branch 'master' into temp-derived-node-tree-refactor 2021-03-04 17:54:36 +01:00
51d8c2ce75 fix building mf network 2021-03-04 17:14:35 +01:00
49049a3e6f simplify getting input values for socket 2021-03-04 16:55:44 +01:00
ee5952cf11 simplify naming 2021-03-04 16:41:19 +01:00
aa31d02b3b cleanup 2021-03-04 16:39:34 +01:00
1a778c2008 make node members private 2021-03-04 16:38:10 +01:00
7b6d8ff6dd simplify sockets 2021-03-04 16:33:33 +01:00
59a55de6f9 bring back support for muted nodes 2021-03-04 16:16:40 +01:00
4aa0105aac move function to xxx socket 2021-03-04 15:51:42 +01:00
65b892fa88 convert types from unlinked sockets 2021-03-04 15:42:29 +01:00
4780ea4bbd Merge branch 'master' into temp-derived-node-tree-refactor 2021-03-04 15:30:43 +01:00
2a879a4666 Merge branch 'master' into temp-derived-node-tree-refactor 2021-03-04 15:18:13 +01:00
4fbf19f3fd remove old derived node tree 2021-03-04 10:10:59 +01:00
3ed0843df9 remove all uses of old derived node tree 2021-03-04 10:08:42 +01:00
f94b3c886c remove remaining derived tree references from MOD_nodes.cc 2021-03-03 17:56:25 +01:00
ad5775452b Merge branch 'master' into temp-derived-node-tree-refactor 2021-03-03 16:37:55 +01:00
422901585b cleanup naming 2021-03-03 16:37:38 +01:00
940704beb3 remove unused function 2021-03-03 16:27:46 +01:00
a1653184c0 cleanup context 2021-03-03 16:24:05 +01:00
4658bd0342 cleanup 2021-03-03 16:12:08 +01:00
5d0b263c04 cleanup 2021-03-03 16:09:28 +01:00
30d71a7405 initial somewhat working function
Still does not work correctly with the pebbles demo file, because
the math nodes don't work yet.
2021-03-03 11:42:57 +01:00
0d15b30eb1 simplify importing types 2021-03-03 09:31:16 +01:00
1252f72606 prelude 2021-03-03 09:22:06 +01:00
0b0aba3cee support constructing context 2021-03-03 09:17:45 +01:00
3a64bed4e4 more constructors 2021-03-03 09:13:37 +01:00
1cd47ba37e operator bool 2021-03-03 09:03:56 +01:00
8986cb9ef8 more accessors 2021-03-03 09:01:30 +01:00
fd3d5ae32e add conversion/comparison operators and hash functions 2021-03-03 08:54:21 +01:00
a40b4d5439 construct contexts 2021-03-03 08:33:14 +01:00
a88e504fbd cleanup 2021-03-03 08:23:41 +01:00
cae115835f destructor 2021-03-03 08:17:49 +01:00
50fda06ef5 initial xxx node tree 2021-03-03 08:13:17 +01:00
925 changed files with 13802 additions and 21961 deletions

View File

@@ -161,7 +161,6 @@ PenaltyBreakString: 1000000
# "^\s+[A-Z][A-Z0-9_]+\s*\([^\n]*\)\n\s*\{"
ForEachMacros:
- BEGIN_ANIMFILTER_SUBCHANNELS
- BKE_pbvh_vertex_iter_begin
- BLI_FOREACH_SPARSE_RANGE
- BLI_SMALLSTACK_ITER_BEGIN
- BMO_ITER

View File

@@ -506,7 +506,7 @@ check_descriptions: .FORCE
#
source_archive: .FORCE
python3 ./build_files/utils/make_source_archive.py
./build_files/utils/make_source_archive.sh
INKSCAPE_BIN?="inkscape"
icons: .FORCE

View File

@@ -66,7 +66,7 @@ endif()
if(XCODE_VERSION)
# Construct SDKs path ourselves, because xcode-select path could be ambiguous.
# Both /Applications/Xcode.app/Contents/Developer or /Applications/Xcode.app would be allowed.
set(XCODE_SDK_DIR ${XCODE_DEVELOPER_DIR}/Platforms/MacOSX.platform/Developer/SDKs)
set(XCODE_SDK_DIR ${XCODE_DEVELOPER_DIR}/Platforms/MacOSX.platform//Developer/SDKs)
# Detect SDK version to use
if(NOT DEFINED OSX_SYSTEM)

View File

@@ -1,198 +0,0 @@
#!/usr/bin/env python3
import dataclasses
import os
import re
import subprocess
from pathlib import Path
from typing import Iterable, TextIO
# This script can run from any location,
# output is created in the $CWD
#
# NOTE: while the Python part of this script is portable,
# it relies on external commands typically found on GNU/Linux.
# Support for other platforms could be added by moving GNU `tar` & `md5sum` use to Python.
SKIP_NAMES = {
".gitignore",
".gitmodules",
".arcconfig",
}
def main() -> None:
output_dir = Path(".").absolute()
blender_srcdir = Path(__file__).absolute().parent.parent.parent
print(f"Source dir: {blender_srcdir}")
version = parse_blender_version(blender_srcdir)
manifest = output_dir / f"blender-{version}-manifest.txt"
tarball = output_dir / f"blender-{version}.tar.xz"
os.chdir(blender_srcdir)
create_manifest(version, manifest)
create_tarball(version, tarball, manifest)
create_checksum_file(tarball)
cleanup(manifest)
print("Done!")
@dataclasses.dataclass
class BlenderVersion:
version: int # 293 for 2.93.1
patch: int # 1 for 2.93.1
cycle: str # 'alpha', 'beta', 'release', maybe others.
@property
def is_release(self) -> bool:
return self.cycle == "release"
def __str__(self) -> str:
"""Convert to version string.
>>> str(BlenderVersion(293, 1, "alpha"))
'2.93.1-alpha'
>>> str(BlenderVersion(327, 0, "release"))
'3.27.0'
"""
version_major = self.version // 100
version_minor = self.version % 100
as_string = f"{version_major}.{version_minor}.{self.patch}"
if self.is_release:
return as_string
return f"{as_string}-{self.cycle}"
def parse_blender_version(blender_srcdir: Path) -> BlenderVersion:
version_path = blender_srcdir / "source/blender/blenkernel/BKE_blender_version.h"
version_info = {}
line_re = re.compile(r"^#define (BLENDER_VERSION[A-Z_]*)\s+([0-9a-z]+)$")
with version_path.open(encoding="utf-8") as version_file:
for line in version_file:
match = line_re.match(line.strip())
if not match:
continue
version_info[match.group(1)] = match.group(2)
return BlenderVersion(
int(version_info["BLENDER_VERSION"]),
int(version_info["BLENDER_VERSION_PATCH"]),
version_info["BLENDER_VERSION_CYCLE"],
)
### Manifest creation
def create_manifest(version: BlenderVersion, outpath: Path) -> None:
print(f'Building manifest of files: "{outpath}"...', end="", flush=True)
with outpath.open("w", encoding="utf-8") as outfile:
main_files_to_manifest(outfile)
submodules_to_manifest(version, outfile)
print("OK")
def main_files_to_manifest(outfile: TextIO) -> None:
for path in git_ls_files():
print(path, file=outfile)
def submodules_to_manifest(version: BlenderVersion, outfile: TextIO) -> None:
skip_addon_contrib = version.is_release
for line in git_command("submodule"):
submodule = line.split()[1]
# Don't use native slashes as GIT for MS-Windows outputs forward slashes.
if skip_addon_contrib and submodule == "release/scripts/addons_contrib":
continue
for path in git_ls_files(Path(submodule)):
print(path, file=outfile)
def create_tarball(version: BlenderVersion, tarball: Path, manifest: Path) -> None:
print(f'Creating archive: "{tarball}" ...', end="", flush=True)
# Requires GNU `tar`, since `--transform` is used.
command = [
"tar",
"--transform",
f"s,^,blender-{version}/,g",
"--use-compress-program=xz -9",
"--create",
f"--file={tarball}",
f"--files-from={manifest}",
# Without owner/group args, extracting the files as root will
# use ownership from the tar archive:
"--owner=0",
"--group=0",
]
subprocess.run(command, check=True, timeout=300)
print("OK")
def create_checksum_file(tarball: Path) -> None:
md5_path = tarball.with_name(tarball.name + ".md5sum")
print(f'Creating checksum: "{md5_path}" ...', end="", flush=True)
command = [
"md5sum",
# The name is enough, as the tarball resides in the same dir as the MD5
# file, and that's the current working directory.
tarball.name,
]
md5_cmd = subprocess.run(
command, stdout=subprocess.PIPE, check=True, text=True, timeout=300
)
with md5_path.open("w", encoding="utf-8") as outfile:
outfile.write(md5_cmd.stdout)
print("OK")
def cleanup(manifest: Path) -> None:
print("Cleaning up ...", end="", flush=True)
if manifest.exists():
manifest.unlink()
print("OK")
## Low-level commands
def git_ls_files(directory: Path = Path(".")) -> Iterable[Path]:
"""Generator, yields lines of output from 'git ls-files'.
Only lines that are actually files (so no directories, sockets, etc.) are
returned, and never one from SKIP_NAMES.
"""
for line in git_command("-C", str(directory), "ls-files"):
path = directory / line
if not path.is_file() or path.name in SKIP_NAMES:
continue
yield path
def git_command(*cli_args) -> Iterable[str]:
"""Generator, yields lines of output from a Git command."""
command = ("git", *cli_args)
# import shlex
# print(">", " ".join(shlex.quote(arg) for arg in command))
git = subprocess.run(
command, stdout=subprocess.PIPE, check=True, text=True, timeout=30
)
for line in git.stdout.split("\n"):
if line:
yield line
if __name__ == "__main__":
import doctest
if doctest.testmod().failed:
raise SystemExit("ERROR: Self-test failed, refusing to run")
main()

View File

@@ -0,0 +1,82 @@
#!/bin/sh
# This script can run from any location,
# output is created in the $CWD
BASE_DIR="$PWD"
blender_srcdir=$(dirname -- $0)/../..
blender_version=$(grep "BLENDER_VERSION\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
blender_version_patch=$(grep "BLENDER_VERSION_PATCH\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
blender_version_cycle=$(grep "BLENDER_VERSION_CYCLE\s" "$blender_srcdir/source/blender/blenkernel/BKE_blender_version.h" | awk '{print $3}')
VERSION=$(expr $blender_version / 100).$(expr $blender_version % 100).$blender_version_patch
if [ "$blender_version_cycle" = "release" ] ; then
SUBMODULE_EXCLUDE="^\(release/scripts/addons_contrib\)$"
else
VERSION=$VERSION-$blender_version_cycle
SUBMODULE_EXCLUDE="^$" # dummy regex
fi
MANIFEST="blender-$VERSION-manifest.txt"
TARBALL="blender-$VERSION.tar.xz"
cd "$blender_srcdir"
# not so nice, but works
FILTER_FILES_PY=\
"import os, sys; "\
"[print(l[:-1]) for l in sys.stdin.readlines() "\
"if os.path.isfile(l[:-1]) "\
"if os.path.basename(l[:-1]) not in {"\
"'.gitignore', "\
"'.gitmodules', "\
"'.arcconfig', "\
"}"\
"]"
# Build master list
echo -n "Building manifest of files: \"$BASE_DIR/$MANIFEST\" ..."
git ls-files | python3 -c "$FILTER_FILES_PY" > $BASE_DIR/$MANIFEST
# Enumerate submodules
for lcv in $(git submodule | awk '{print $2}' | grep -v "$SUBMODULE_EXCLUDE"); do
cd "$BASE_DIR"
cd "$blender_srcdir/$lcv"
git ls-files | python3 -c "$FILTER_FILES_PY" | awk '$0="'"$lcv"/'"$0' >> $BASE_DIR/$MANIFEST
cd "$BASE_DIR"
done
echo "OK"
# Create the tarball
#
# Without owner/group args, extracting the files as root will
# use ownership from the tar archive.
cd "$blender_srcdir"
echo -n "Creating archive: \"$BASE_DIR/$TARBALL\" ..."
tar \
--transform "s,^,blender-$VERSION/,g" \
--use-compress-program="xz -9" \
--create \
--file="$BASE_DIR/$TARBALL" \
--files-from="$BASE_DIR/$MANIFEST" \
--owner=0 \
--group=0
echo "OK"
# Create checksum file
cd "$BASE_DIR"
echo -n "Creating checksum: \"$BASE_DIR/$TARBALL.md5sum\" ..."
md5sum "$TARBALL" > "$TARBALL.md5sum"
echo "OK"
# Cleanup
echo -n "Cleaning up ..."
rm "$BASE_DIR/$MANIFEST"
echo "OK"
echo "Done!"

View File

@@ -32,7 +32,9 @@ if(MSVC_CLANG AND WITH_OPENMP AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "9.0.1
endif()
# Exporting functions from the blender binary gives linker warnings on Apple arm64 systems.
# Silence them here.
# For now and until Apple arm64 is officially supported, these will just be silenced here.
# TODO (sebbas): Check if official arm64 devices give linker warnings without this block.
if(APPLE AND ("${CMAKE_OSX_ARCHITECTURES}" STREQUAL "arm64"))
if(CMAKE_COMPILER_IS_GNUCXX OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
string(APPEND CMAKE_C_FLAGS " -fvisibility=hidden")

View File

@@ -1,4 +1,4 @@
/*
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
@@ -38,13 +38,8 @@
#endif
#if defined(_MSC_VER)
# include <Windows.h>
# include <VersionHelpers.h> /* This needs to be included after Windows.h. */
# include <io.h>
# if !defined(ENABLE_VIRTUAL_TERMINAL_PROCESSING)
# define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x0004
# endif
# include <windows.h>
#endif
/* For printing timestamp. */
@@ -233,9 +228,6 @@ enum eCLogColor {
#define COLOR_LEN (COLOR_RESET + 1)
static const char *clg_color_table[COLOR_LEN] = {NULL};
#ifdef _WIN32
static DWORD clg_previous_console_mode = 0;
#endif
static void clg_color_table_init(bool use_color)
{
@@ -303,27 +295,19 @@ static enum eCLogColor clg_severity_to_color(enum CLG_Severity severity)
* - `foo` exact match of `foo`.
* - `foo.bar` exact match for `foo.bar`
* - `foo.*` match for `foo` & `foo.bar` & `foo.bar.baz`
* - `*bar*` match for `foo.bar` & `baz.bar` & `foo.barbaz`
* - `*` matches everything.
*/
static bool clg_ctx_filter_check(CLogContext *ctx, const char *identifier)
{
const size_t identifier_len = strlen(identifier);
const int identifier_len = strlen(identifier);
for (uint i = 0; i < 2; i++) {
const CLG_IDFilter *flt = ctx->filters[i];
while (flt != NULL) {
const size_t len = strlen(flt->match);
const int len = strlen(flt->match);
if (STREQ(flt->match, "*") || ((len == identifier_len) && (STREQ(identifier, flt->match)))) {
return (bool)i;
}
if (flt->match[0] == '*' && flt->match[len - 1] == '*') {
char *match = MEM_callocN(sizeof(char) * len - 1, __func__);
memcpy(match, flt->match + 1, len - 2);
if (strstr(identifier, match) != NULL) {
return (bool)i;
}
}
else if ((len >= 2) && (STREQLEN(".*", &flt->match[len - 2], 2))) {
if ((len >= 2) && (STREQLEN(".*", &flt->match[len - 2], 2))) {
if (((identifier_len == len - 2) && STREQLEN(identifier, flt->match, len - 2)) ||
((identifier_len >= len - 1) && STREQLEN(identifier, flt->match, len - 1))) {
return (bool)i;
@@ -564,22 +548,13 @@ static void CLG_ctx_output_set(CLogContext *ctx, void *file_handle)
#if defined(__unix__) || defined(__APPLE__)
ctx->use_color = isatty(ctx->output);
#elif defined(WIN32)
/* As of Windows 10 build 18298 all the standard consoles supports color
* like the Linux Terminal do, but it needs to be turned on.
* To turn on colors we need to enable virtual terminal processing by passing the flag
* ENABLE_VIRTUAL_TERMINAL_PROCESSING into SetConsoleMode.
* If the system doesn't support virtual terminal processing it will fail silently and the flag
* will not be set. */
/* Windows Terminal supports color like the Linux terminals do while the standard console does
* not, the way to tell the two apart is to look at the `WT_SESSION` environment variable which
* will only be defined for Windows Terminal. */
GetConsoleMode(GetStdHandle(STD_OUTPUT_HANDLE), &clg_previous_console_mode);
ctx->use_color = 0;
if (IsWindows10OrGreater() && isatty(ctx->output)) {
DWORD mode = clg_previous_console_mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING;
if (SetConsoleMode(GetStdHandle(STD_OUTPUT_HANDLE), mode)) {
ctx->use_color = 1;
}
}
/* #getenv is used here rather than #BLI_getenv since it would be a bad level call
* and there are no benefits for using it in this context. */
ctx->use_color = isatty(ctx->output) && getenv("WT_SESSION");
#endif
}
@@ -663,9 +638,6 @@ static CLogContext *CLG_ctx_init(void)
static void CLG_ctx_free(CLogContext *ctx)
{
#if defined(WIN32)
SetConsoleMode(GetStdHandle(STD_OUTPUT_HANDLE), clg_previous_console_mode);
#endif
while (ctx->types != NULL) {
CLG_LogType *item = ctx->types;
ctx->types = item->next;

View File

@@ -27,8 +27,8 @@ BVHOptiX::BVHOptiX(const BVHParams &params_,
Device *device)
: BVH(params_, geometry_, objects_),
traversable_handle(0),
as_data(device, params_.top_level ? "optix tlas" : "optix blas", false),
motion_transform_data(device, "optix motion transform", false)
as_data(device, params_.top_level ? "optix tlas" : "optix blas"),
motion_transform_data(device, "optix motion transform")
{
}

View File

@@ -854,7 +854,7 @@ CUDADevice::CUDAMem *CUDADevice::generic_alloc(device_memory &mem, size_t pitch_
void *shared_pointer = 0;
if (mem_alloc_result != CUDA_SUCCESS && can_map_host && mem.type != MEM_DEVICE_ONLY) {
if (mem_alloc_result != CUDA_SUCCESS && can_map_host) {
if (mem.shared_pointer) {
/* Another device already allocated host memory. */
mem_alloc_result = CUDA_SUCCESS;
@@ -877,14 +877,8 @@ CUDADevice::CUDAMem *CUDADevice::generic_alloc(device_memory &mem, size_t pitch_
}
if (mem_alloc_result != CUDA_SUCCESS) {
if (mem.type == MEM_DEVICE_ONLY) {
status = " failed, out of device memory";
set_error("System is out of GPU memory");
}
else {
status = " failed, out of device and host memory";
set_error("System is out of GPU and shared host memory");
}
status = " failed, out of device and host memory";
set_error("System is out of GPU and shared host memory");
}
if (mem.name) {

View File

@@ -396,7 +396,8 @@ class CPUDevice : public Device {
<< string_human_readable_size(mem.memory_size()) << ")";
}
if (mem.type == MEM_DEVICE_ONLY || !mem.host_pointer) {
if (mem.type == MEM_DEVICE_ONLY) {
assert(!mem.host_pointer);
size_t alignment = MIN_ALIGNMENT_CPU_DATA_TYPES;
void *data = util_aligned_malloc(mem.memory_size(), alignment);
mem.device_pointer = (device_ptr)data;
@@ -458,7 +459,7 @@ class CPUDevice : public Device {
tex_free((device_texture &)mem);
}
else if (mem.device_pointer) {
if (mem.type == MEM_DEVICE_ONLY || !mem.host_pointer) {
if (mem.type == MEM_DEVICE_ONLY) {
util_aligned_free((void *)mem.device_pointer);
}
mem.device_pointer = 0;

View File

@@ -171,8 +171,7 @@ class DenoisingTask {
bool gpu_temporary_mem;
DenoiseBuffers(Device *device)
: mem(device, "denoising pixel buffer"),
temporary_mem(device, "denoising temporary mem", true)
: mem(device, "denoising pixel buffer"), temporary_mem(device, "denoising temporary mem")
{
}
} buffer;

View File

@@ -270,8 +270,8 @@ class device_memory {
template<typename T> class device_only_memory : public device_memory {
public:
device_only_memory(Device *device, const char *name, bool allow_host_memory_fallback = false)
: device_memory(device, name, allow_host_memory_fallback ? MEM_READ_WRITE : MEM_DEVICE_ONLY)
device_only_memory(Device *device, const char *name)
: device_memory(device, name, MEM_DEVICE_ONLY)
{
data_type = device_type_traits<T>::data_type;
data_elements = max(device_type_traits<T>::num_elements, 1);

View File

@@ -197,8 +197,8 @@ class OptiXDevice : public CUDADevice {
OptiXDevice(DeviceInfo &info_, Stats &stats_, Profiler &profiler_, bool background_)
: CUDADevice(info_, stats_, profiler_, background_),
sbt_data(this, "__sbt", MEM_READ_ONLY),
launch_params(this, "__params", false),
denoiser_state(this, "__denoiser_state", true)
launch_params(this, "__params"),
denoiser_state(this, "__denoiser_state")
{
// Store number of CUDA streams in device info
info.cpu_threads = DebugFlags().optix.cuda_streams;
@@ -878,8 +878,8 @@ class OptiXDevice : public CUDADevice {
device_ptr input_ptr = rtile.buffer + pixel_offset;
// Copy tile data into a common buffer if necessary
device_only_memory<float> input(this, "denoiser input", true);
device_vector<TileInfo> tile_info_mem(this, "denoiser tile info", MEM_READ_ONLY);
device_only_memory<float> input(this, "denoiser input");
device_vector<TileInfo> tile_info_mem(this, "denoiser tile info", MEM_READ_WRITE);
bool contiguous_memory = true;
for (int i = 0; i < RenderTileNeighbors::SIZE; i++) {
@@ -924,7 +924,7 @@ class OptiXDevice : public CUDADevice {
}
# if OPTIX_DENOISER_NO_PIXEL_STRIDE
device_only_memory<float> input_rgb(this, "denoiser input rgb", true);
device_only_memory<float> input_rgb(this, "denoiser input rgb");
input_rgb.alloc_to_device(rect_size.x * rect_size.y * 3 * task.denoising.input_passes);
void *input_args[] = {&input_rgb.device_pointer,
@@ -1146,13 +1146,6 @@ class OptiXDevice : public CUDADevice {
const OptixBuildInput &build_input,
uint16_t num_motion_steps)
{
/* Allocate and build acceleration structures only one at a time, to prevent parallel builds
* from running out of memory (since both original and compacted acceleration structure memory
* may be allocated at the same time for the duration of this function). The builds would
* otherwise happen on the same CUDA stream anyway. */
static thread_mutex mutex;
thread_scoped_lock lock(mutex);
const CUDAContextScope scope(cuContext);
// Compute memory usage
@@ -1177,12 +1170,11 @@ class OptiXDevice : public CUDADevice {
optixAccelComputeMemoryUsage(context, &options, &build_input, 1, &sizes));
// Allocate required output buffers
device_only_memory<char> temp_mem(this, "optix temp as build mem", true);
device_only_memory<char> temp_mem(this, "optix temp as build mem");
temp_mem.alloc_to_device(align_up(sizes.tempSizeInBytes, 8) + 8);
if (!temp_mem.device_pointer)
return false; // Make sure temporary memory allocation succeeded
// Acceleration structure memory has to be allocated on the device (not allowed to be on host)
device_only_memory<char> &out_data = bvh->as_data;
if (operation == OPTIX_BUILD_OPERATION_BUILD) {
assert(out_data.device == this);
@@ -1230,7 +1222,7 @@ class OptiXDevice : public CUDADevice {
// There is no point compacting if the size does not change
if (compacted_size < sizes.outputSizeInBytes) {
device_only_memory<char> compacted_data(this, "optix compacted as", false);
device_only_memory<char> compacted_data(this, "optix compacted as");
compacted_data.alloc_to_device(compacted_size);
if (!compacted_data.device_pointer)
// Do not compact if memory allocation for compacted acceleration structure fails
@@ -1250,7 +1242,6 @@ class OptiXDevice : public CUDADevice {
std::swap(out_data.device_size, compacted_data.device_size);
std::swap(out_data.device_pointer, compacted_data.device_pointer);
// Original acceleration structure memory is freed when 'compacted_data' goes out of scope
}
}

View File

@@ -45,16 +45,7 @@ DeviceTask::DeviceTask(Type type_)
shader_filter(0),
shader_x(0),
shader_w(0),
buffers(nullptr),
tile_types(0),
denoising_from_render(false),
pass_stride(0),
frame_stride(0),
target_pass_stride(0),
pass_denoising_data(0),
pass_denoising_clean(0),
need_finish_queue(false),
integrator_branched(false)
buffers(nullptr)
{
last_update_time = time_dt();
}

View File

@@ -154,7 +154,7 @@ void NodeType::register_input(ustring name,
int struct_offset,
const void *default_value,
const NodeEnum *enum_values,
const NodeType *node_type,
const NodeType **node_type,
int flags,
int extra_flags)
{

View File

@@ -87,7 +87,7 @@ struct SocketType {
int struct_offset;
const void *default_value;
const NodeEnum *enum_values;
const NodeType *node_type;
const NodeType **node_type;
int flags;
ustring ui_name;
SocketModifiedFlags modified_flag_bit;
@@ -115,7 +115,7 @@ struct NodeType {
int struct_offset,
const void *default_value,
const NodeEnum *enum_values = NULL,
const NodeType *node_type = NULL,
const NodeType **node_type = NULL,
int flags = 0,
int extra_flags = 0);
void register_output(ustring name, ustring ui_name, SocketType::Type type);
@@ -140,38 +140,27 @@ struct NodeType {
static unordered_map<ustring, NodeType, ustringHash> &types();
};
/* Node Definition Macros
*
* Node we use accessor to get node types to ensure correct static
* initialization order. */
/* Node Definition Macros */
#define NODE_DECLARE \
static const NodeType *get_node_type(); \
template<typename T> static const NodeType *register_type(); \
static Node *create(const NodeType *type);
static Node *create(const NodeType *type); \
static const NodeType *node_type;
#define NODE_DEFINE(structname) \
const NodeType *structname::node_type = structname::register_type<structname>(); \
Node *structname::create(const NodeType *) \
{ \
return new structname(); \
} \
const NodeType *structname::get_node_type() \
{ \
static const NodeType *node_type = register_type<structname>(); \
return node_type; \
} \
template<typename T> const NodeType *structname::register_type()
#define NODE_ABSTRACT_DECLARE \
template<typename T> static const NodeType *register_base_type(); \
static const NodeType *get_node_base_type();
static const NodeType *node_base_type;
#define NODE_ABSTRACT_DEFINE(structname) \
const NodeType *structname::get_node_base_type() \
{ \
static const NodeType *node_base_type = register_base_type<structname>(); \
return node_base_type; \
} \
const NodeType *structname::node_base_type = structname::register_base_type<structname>(); \
template<typename T> const NodeType *structname::register_base_type()
/* Sock Definition Macros */

View File

@@ -200,7 +200,7 @@ void xml_read_node(XMLReader &reader, Node *node, xml_node xml_node)
map<ustring, Node *>::iterator it = reader.node_map.find(value);
if (it != reader.node_map.end()) {
Node *value_node = it->second;
if (value_node->is_a(socket.node_type))
if (value_node->is_a(*(socket.node_type)))
node->set(socket, it->second);
}
break;
@@ -215,7 +215,7 @@ void xml_read_node(XMLReader &reader, Node *node, xml_node xml_node)
map<ustring, Node *>::iterator it = reader.node_map.find(ustring(tokens[i]));
if (it != reader.node_map.end()) {
Node *value_node = it->second;
value[i] = (value_node->is_a(socket.node_type)) ? value_node : NULL;
value[i] = (value_node->is_a(*(socket.node_type))) ? value_node : NULL;
}
else {
value[i] = NULL;

View File

@@ -195,31 +195,108 @@ ccl_device float2 regular_polygon_sample(float corners, float rotation, float u,
ccl_device float3 ensure_valid_reflection(float3 Ng, float3 I, float3 N)
{
float3 R;
float NI = dot(N, I);
float NgR, threshold;
float3 R = 2 * dot(N, I) * N - I;
/* Check if the incident ray is coming from behind normal N. */
if (NI > 0) {
/* Normal reflection */
R = (2 * NI) * N - I;
NgR = dot(Ng, R);
/* Reflection rays may always be at least as shallow as the incoming ray. */
float threshold = min(0.9f * dot(Ng, I), 0.01f);
if (dot(Ng, R) >= threshold) {
return N;
}
/* Reflection rays may always be at least as shallow as the incoming ray. */
threshold = min(0.9f * dot(Ng, I), 0.01f);
if (NgR >= threshold) {
return N;
/* Form coordinate system with Ng as the Z axis and N inside the X-Z-plane.
* The X axis is found by normalizing the component of N that's orthogonal to Ng.
* The Y axis isn't actually needed.
*/
float NdotNg = dot(N, Ng);
float3 X = normalize(N - NdotNg * Ng);
/* Keep math expressions. */
/* clang-format off */
/* Calculate N.z and N.x in the local coordinate system.
*
* The goal of this computation is to find a N' that is rotated towards Ng just enough
* to lift R' above the threshold (here called t), therefore dot(R', Ng) = t.
*
* According to the standard reflection equation,
* this means that we want dot(2*dot(N', I)*N' - I, Ng) = t.
*
* Since the Z axis of our local coordinate system is Ng, dot(x, Ng) is just x.z, so we get
* 2*dot(N', I)*N'.z - I.z = t.
*
* The rotation is simple to express in the coordinate system we formed -
* since N lies in the X-Z-plane, we know that N' will also lie in the X-Z-plane,
* so N'.y = 0 and therefore dot(N', I) = N'.x*I.x + N'.z*I.z .
*
* Furthermore, we want N' to be normalized, so N'.x = sqrt(1 - N'.z^2).
*
* With these simplifications,
* we get the final equation 2*(sqrt(1 - N'.z^2)*I.x + N'.z*I.z)*N'.z - I.z = t.
*
* The only unknown here is N'.z, so we can solve for that.
*
* The equation has four solutions in general:
*
* N'.z = +-sqrt(0.5*(+-sqrt(I.x^2*(I.x^2 + I.z^2 - t^2)) + t*I.z + I.x^2 + I.z^2)/(I.x^2 + I.z^2))
* We can simplify this expression a bit by grouping terms:
*
* a = I.x^2 + I.z^2
* b = sqrt(I.x^2 * (a - t^2))
* c = I.z*t + a
* N'.z = +-sqrt(0.5*(+-b + c)/a)
*
* Two solutions can immediately be discarded because they're negative so N' would lie in the
* lower hemisphere.
*/
/* clang-format on */
float Ix = dot(I, X), Iz = dot(I, Ng);
float Ix2 = sqr(Ix), Iz2 = sqr(Iz);
float a = Ix2 + Iz2;
float b = safe_sqrtf(Ix2 * (a - sqr(threshold)));
float c = Iz * threshold + a;
/* Evaluate both solutions.
* In many cases one can be immediately discarded (if N'.z would be imaginary or larger than
* one), so check for that first. If no option is viable (might happen in extreme cases like N
* being in the wrong hemisphere), give up and return Ng. */
float fac = 0.5f / a;
float N1_z2 = fac * (b + c), N2_z2 = fac * (-b + c);
bool valid1 = (N1_z2 > 1e-5f) && (N1_z2 <= (1.0f + 1e-5f));
bool valid2 = (N2_z2 > 1e-5f) && (N2_z2 <= (1.0f + 1e-5f));
float2 N_new;
if (valid1 && valid2) {
/* If both are possible, do the expensive reflection-based check. */
float2 N1 = make_float2(safe_sqrtf(1.0f - N1_z2), safe_sqrtf(N1_z2));
float2 N2 = make_float2(safe_sqrtf(1.0f - N2_z2), safe_sqrtf(N2_z2));
float R1 = 2 * (N1.x * Ix + N1.y * Iz) * N1.y - Iz;
float R2 = 2 * (N2.x * Ix + N2.y * Iz) * N2.y - Iz;
valid1 = (R1 >= 1e-5f);
valid2 = (R2 >= 1e-5f);
if (valid1 && valid2) {
/* If both solutions are valid, return the one with the shallower reflection since it will be
* closer to the input (if the original reflection wasn't shallow, we would not be in this
* part of the function). */
N_new = (R1 < R2) ? N1 : N2;
}
else {
/* If only one reflection is valid (= positive), pick that one. */
N_new = (R1 > R2) ? N1 : N2;
}
}
else if (valid1 || valid2) {
/* Only one solution passes the N'.z criterium, so pick that one. */
float Nz2 = valid1 ? N1_z2 : N2_z2;
N_new = make_float2(safe_sqrtf(1.0f - Nz2), safe_sqrtf(Nz2));
}
else {
/* Bad incident */
R = -I;
NgR = dot(Ng, R);
threshold = 0.01f;
return Ng;
}
R = R + Ng * (threshold - NgR); /* Lift the reflection above the threshold. */
return normalize(I * len(R) + R * len(I)); /* Find a bisector. */
return N_new.x * X + N_new.y * Ng;
}
CCL_NAMESPACE_END

View File

@@ -175,8 +175,7 @@ ccl_device_inline void kernel_volume_step_init(KernelGlobals *kg,
const float object_step_size,
float t,
float *step_size,
float *step_shade_offset,
float *steps_offset)
float *step_offset)
{
const int max_steps = kernel_data.integrator.volume_max_steps;
float step = min(object_step_size, t);
@@ -187,14 +186,7 @@ ccl_device_inline void kernel_volume_step_init(KernelGlobals *kg,
}
*step_size = step;
/* Perform shading at this offset within a step, to integrate over
* over the entire step segment. */
*step_shade_offset = path_state_rng_1D_hash(kg, state, 0x1e31d8a4);
/* Shift starting point of all segment by this random amount to avoid
* banding artifacts from the volume bounding shape. */
*steps_offset = path_state_rng_1D_hash(kg, state, 0x3d22c7b3);
*step_offset = path_state_rng_1D_hash(kg, state, 0x1e31d8a4) * step;
}
/* Volume Shadows
@@ -228,15 +220,10 @@ ccl_device void kernel_volume_shadow_heterogeneous(KernelGlobals *kg,
float3 tp = *throughput;
const float tp_eps = 1e-6f; /* todo: this is likely not the right value */
/* Prepare for stepping.
* For shadows we do not offset all segments, since the starting point is
* already a random distance inside the volume. It also appears to create
* banding artifacts for unknown reasons. */
/* prepare for stepping */
int max_steps = kernel_data.integrator.volume_max_steps;
float step_size, step_shade_offset, unused;
kernel_volume_step_init(
kg, state, object_step_size, ray->t, &step_size, &step_shade_offset, &unused);
const float steps_offset = 1.0f;
float step_offset, step_size;
kernel_volume_step_init(kg, state, object_step_size, ray->t, &step_size, &step_offset);
/* compute extinction at the start */
float t = 0.0f;
@@ -245,17 +232,23 @@ ccl_device void kernel_volume_shadow_heterogeneous(KernelGlobals *kg,
for (int i = 0; i < max_steps; i++) {
/* advance to new position */
float new_t = min(ray->t, (i + steps_offset) * step_size);
float dt = new_t - t;
float new_t = min(ray->t, (i + 1) * step_size);
float3 new_P = ray->P + ray->D * (t + dt * step_shade_offset);
/* use random position inside this segment to sample shader, adjust
* for last step that is shorter than other steps. */
if (new_t == ray->t) {
step_offset *= (new_t - t) / step_size;
}
float3 new_P = ray->P + ray->D * (t + step_offset);
float3 sigma_t = zero_float3();
/* compute attenuation over segment */
if (volume_shader_extinction_sample(kg, sd, state, new_P, &sigma_t)) {
/* Compute expf() only for every Nth step, to save some calculations
* because exp(a)*exp(b) = exp(a+b), also do a quick tp_eps check then. */
sum += (-sigma_t * dt);
sum += (-sigma_t * (new_t - t));
if ((i & 0x07) == 0) { /* ToDo: Other interval? */
tp = *throughput * exp3(sum);
@@ -574,12 +567,10 @@ kernel_volume_integrate_heterogeneous_distance(KernelGlobals *kg,
float3 tp = *throughput;
const float tp_eps = 1e-6f; /* todo: this is likely not the right value */
/* Prepare for stepping.
* Using a different step offset for the first step avoids banding artifacts. */
/* prepare for stepping */
int max_steps = kernel_data.integrator.volume_max_steps;
float step_size, step_shade_offset, steps_offset;
kernel_volume_step_init(
kg, state, object_step_size, ray->t, &step_size, &step_shade_offset, &steps_offset);
float step_offset, step_size;
kernel_volume_step_init(kg, state, object_step_size, ray->t, &step_size, &step_offset);
/* compute coefficients at the start */
float t = 0.0f;
@@ -593,10 +584,16 @@ kernel_volume_integrate_heterogeneous_distance(KernelGlobals *kg,
for (int i = 0; i < max_steps; i++) {
/* advance to new position */
float new_t = min(ray->t, (i + steps_offset) * step_size);
float new_t = min(ray->t, (i + 1) * step_size);
float dt = new_t - t;
float3 new_P = ray->P + ray->D * (t + dt * step_shade_offset);
/* use random position inside this segment to sample shader,
* for last shorter step we remap it to fit within the segment. */
if (new_t == ray->t) {
step_offset *= (new_t - t) / step_size;
}
float3 new_P = ray->P + ray->D * (t + step_offset);
VolumeShaderCoefficients coeff ccl_optional_struct_init;
/* compute segment */
@@ -774,12 +771,11 @@ ccl_device void kernel_volume_decoupled_record(KernelGlobals *kg,
/* prepare for volume stepping */
int max_steps;
float step_size, step_shade_offset, steps_offset;
float step_size, step_offset;
if (object_step_size != FLT_MAX) {
max_steps = kernel_data.integrator.volume_max_steps;
kernel_volume_step_init(
kg, state, object_step_size, ray->t, &step_size, &step_shade_offset, &steps_offset);
kernel_volume_step_init(kg, state, object_step_size, ray->t, &step_size, &step_offset);
# ifdef __KERNEL_CPU__
/* NOTE: For the branched path tracing it's possible to have direct
@@ -806,8 +802,7 @@ ccl_device void kernel_volume_decoupled_record(KernelGlobals *kg,
else {
max_steps = 1;
step_size = ray->t;
step_shade_offset = 0.0f;
steps_offset = 1.0f;
step_offset = 0.0f;
segment->steps = &segment->stack_step;
}
@@ -826,10 +821,16 @@ ccl_device void kernel_volume_decoupled_record(KernelGlobals *kg,
for (int i = 0; i < max_steps; i++, step++) {
/* advance to new position */
float new_t = min(ray->t, (i + steps_offset) * step_size);
float new_t = min(ray->t, (i + 1) * step_size);
float dt = new_t - t;
float3 new_P = ray->P + ray->D * (t + dt * step_shade_offset);
/* use random position inside this segment to sample shader,
* for last shorter step we remap it to fit within the segment. */
if (new_t == ray->t) {
step_offset *= (new_t - t) / step_size;
}
float3 new_P = ray->P + ray->D * (t + step_offset);
VolumeShaderCoefficients coeff ccl_optional_struct_init;
/* compute segment */
@@ -887,7 +888,7 @@ ccl_device void kernel_volume_decoupled_record(KernelGlobals *kg,
step->accum_transmittance = accum_transmittance;
step->cdf_distance = cdf_distance;
step->t = new_t;
step->shade_t = t + dt * step_shade_offset;
step->shade_t = t + step_offset;
/* stop if at the end of the volume */
t = new_t;

View File

@@ -68,8 +68,8 @@ ccl_device T kernel_tex_image_interp_bicubic(const TextureInfo &info, float x, f
x = (x * info.width) - 0.5f;
y = (y * info.height) - 0.5f;
float px = floorf(x);
float py = floorf(y);
float px = floor(x);
float py = floor(y);
float fx = x - px;
float fy = y - py;
@@ -95,9 +95,9 @@ ccl_device T kernel_tex_image_interp_tricubic(const TextureInfo &info, float x,
y = (y * info.height) - 0.5f;
z = (z * info.depth) - 0.5f;
float px = floorf(x);
float py = floorf(y);
float pz = floorf(z);
float px = floor(x);
float py = floor(y);
float pz = floor(z);
float fx = x - px;
float fy = y - py;
float fz = z - pz;
@@ -127,9 +127,9 @@ ccl_device T kernel_tex_image_interp_tricubic(const TextureInfo &info, float x,
template<typename T, typename S>
ccl_device T kernel_tex_image_interp_tricubic_nanovdb(S &s, float x, float y, float z)
{
float px = floorf(x);
float py = floorf(y);
float pz = floorf(z);
float px = floor(x);
float py = floor(y);
float pz = floor(z);
float fx = x - px;
float fy = y - py;
float fz = z - pz;

View File

@@ -23,7 +23,7 @@ shader node_attribute(string bump_offset = "center",
output float Fac = 0.0,
output float Alpha = 0.0)
{
float data[4] = {0.0, 0.0, 0.0, 0.0};
float data[4];
getattribute(name, data);
Color = color(data[0], data[1], data[2]);
Vector = point(Color);

View File

@@ -42,23 +42,23 @@ shader node_light_path(output float IsCameraRay = 0.0,
getattribute("path:ray_length", RayLength);
int ray_depth = 0;
int ray_depth;
getattribute("path:ray_depth", ray_depth);
RayDepth = (float)ray_depth;
int diffuse_depth = 0;
int diffuse_depth;
getattribute("path:diffuse_depth", diffuse_depth);
DiffuseDepth = (float)diffuse_depth;
int glossy_depth = 0;
int glossy_depth;
getattribute("path:glossy_depth", glossy_depth);
GlossyDepth = (float)glossy_depth;
int transparent_depth = 0;
int transparent_depth;
getattribute("path:transparent_depth", transparent_depth);
TransparentDepth = (float)transparent_depth;
int transmission_depth = 0;
int transmission_depth;
getattribute("path:transmission_depth", transmission_depth);
TransmissionDepth = (float)transmission_depth;
}

View File

@@ -31,7 +31,7 @@ shader node_normal_map(normal NormalIn = N,
vector tangent;
vector ninterp;
float tangent_sign;
float is_smooth = 0.0;
float is_smooth;
getattribute("geom:is_smooth", is_smooth);
if (!is_smooth) {

View File

@@ -22,7 +22,7 @@ shader node_tangent(normal NormalIn = N,
string axis = "z",
output normal Tangent = normalize(dPdu))
{
vector T = vector(0.0, 0.0, 0.0);
vector T;
if (direction_type == "uv_map") {
getattribute(attr_name, T);

View File

@@ -84,30 +84,67 @@ closure color principled_hair(normal N,
closure color henyey_greenstein(float g) BUILTIN;
closure color absorption() BUILTIN;
normal ensure_valid_reflection(normal Ng, normal I, normal N)
normal ensure_valid_reflection(normal Ng, vector I, normal N)
{
/* The implementation here mirrors the one in kernel_montecarlo.h,
* check there for an explanation of the algorithm. */
vector R;
float NI = dot(N, I);
float NgR, threshold;
if (NI > 0) {
R = (2 * NI) * N - I;
NgR = dot(Ng, R);
threshold = min(0.9 * dot(Ng, I), 0.01);
if (NgR >= threshold) {
return N;
float sqr(float x)
{
return x * x;
}
vector R = 2 * dot(N, I) * N - I;
float threshold = min(0.9 * dot(Ng, I), 0.01);
if (dot(Ng, R) >= threshold) {
return N;
}
float NdotNg = dot(N, Ng);
vector X = normalize(N - NdotNg * Ng);
float Ix = dot(I, X), Iz = dot(I, Ng);
float Ix2 = sqr(Ix), Iz2 = sqr(Iz);
float a = Ix2 + Iz2;
float b = sqrt(Ix2 * (a - sqr(threshold)));
float c = Iz * threshold + a;
float fac = 0.5 / a;
float N1_z2 = fac * (b + c), N2_z2 = fac * (-b + c);
int valid1 = (N1_z2 > 1e-5) && (N1_z2 <= (1.0 + 1e-5));
int valid2 = (N2_z2 > 1e-5) && (N2_z2 <= (1.0 + 1e-5));
float N_new_x, N_new_z;
if (valid1 && valid2) {
float N1_x = sqrt(1.0 - N1_z2), N1_z = sqrt(N1_z2);
float N2_x = sqrt(1.0 - N2_z2), N2_z = sqrt(N2_z2);
float R1 = 2 * (N1_x * Ix + N1_z * Iz) * N1_z - Iz;
float R2 = 2 * (N2_x * Ix + N2_z * Iz) * N2_z - Iz;
valid1 = (R1 >= 1e-5);
valid2 = (R2 >= 1e-5);
if (valid1 && valid2) {
N_new_x = (R1 < R2) ? N1_x : N2_x;
N_new_z = (R1 < R2) ? N1_z : N2_z;
}
else {
N_new_x = (R1 > R2) ? N1_x : N2_x;
N_new_z = (R1 > R2) ? N1_z : N2_z;
}
}
else if (valid1 || valid2) {
float Nz2 = valid1 ? N1_z2 : N2_z2;
N_new_x = sqrt(1.0 - Nz2);
N_new_z = sqrt(Nz2);
}
else {
R = -I;
NgR = dot(Ng, R);
threshold = 0.01;
return Ng;
}
R = R + Ng * (threshold - NgR);
return normalize(I * length(R) + R * length(I));
return N_new_x * X + N_new_z * Ng;
}
#endif /* CCL_STDOSL_H */

View File

@@ -402,16 +402,16 @@ static void add_uvs(AlembicProcedural *proc,
}
const ISampleSelector iss = ISampleSelector(time);
const IV2fGeomParam::Sample sample = uvs.getExpandedValue(iss);
const IV2fGeomParam::Sample uvsample = uvs.getIndexedValue(iss);
if (!uvsample.valid()) {
continue;
}
const array<int3> *triangles =
cached_data.triangles.data_for_time_no_check(time).get_data_or_null();
const array<int3> *triangles_loops =
cached_data.triangles_loops.data_for_time_no_check(time).get_data_or_null();
const array<int3> *triangles = cached_data.triangles.data_for_time_no_check(time);
const array<int3> *triangles_loops = cached_data.triangles_loops.data_for_time_no_check(time);
if (!triangles || !triangles_loops) {
continue;
@@ -458,8 +458,7 @@ static void add_normals(const Int32ArraySamplePtr face_indices,
*normals.getTimeSampling());
attr.std = ATTR_STD_VERTEX_NORMAL;
const array<float3> *vertices =
cached_data.vertices.data_for_time_no_check(time).get_data_or_null();
const array<float3> *vertices = cached_data.vertices.data_for_time_no_check(time);
if (!vertices) {
return;
@@ -494,8 +493,7 @@ static void add_normals(const Int32ArraySamplePtr face_indices,
*normals.getTimeSampling());
attr.std = ATTR_STD_VERTEX_NORMAL;
const array<float3> *vertices =
cached_data.vertices.data_for_time_no_check(time).get_data_or_null();
const array<float3> *vertices = cached_data.vertices.data_for_time_no_check(time);
if (!vertices) {
return;
@@ -599,7 +597,7 @@ NODE_DEFINE(AlembicObject)
NodeType *type = NodeType::add("alembic_object", create);
SOCKET_STRING(path, "Alembic Path", ustring());
SOCKET_NODE_ARRAY(used_shaders, "Used Shaders", Shader::get_node_type());
SOCKET_NODE_ARRAY(used_shaders, "Used Shaders", &Shader::node_type);
SOCKET_INT(subd_max_level, "Max Subdivision Level", 1);
SOCKET_FLOAT(subd_dicing_rate, "Subdivision Dicing Rate", 1.0f);
@@ -609,7 +607,7 @@ NODE_DEFINE(AlembicObject)
return type;
}
AlembicObject::AlembicObject() : Node(get_node_type())
AlembicObject::AlembicObject() : Node(node_type)
{
schema_type = INVALID;
}
@@ -719,15 +717,11 @@ void AlembicObject::read_face_sets(SchemaType &schema,
void AlembicObject::load_all_data(AlembicProcedural *proc,
IPolyMeshSchema &schema,
float scale,
Progress &progress)
{
cached_data.clear();
/* Only load data for the original Geometry. */
if (instance_of) {
return;
}
const TimeSamplingPtr time_sampling = schema.getTimeSampling();
cached_data.set_time_sampling(*time_sampling);
@@ -786,18 +780,22 @@ void AlembicObject::load_all_data(AlembicProcedural *proc,
add_uvs(proc, uvs, cached_data, progress);
}
if (progress.get_cancel()) {
return;
}
setup_transform_cache(scale);
data_loaded = true;
}
void AlembicObject::load_all_data(AlembicProcedural *proc, ISubDSchema &schema, Progress &progress)
void AlembicObject::load_all_data(AlembicProcedural *proc,
ISubDSchema &schema,
float scale,
Progress &progress)
{
cached_data.clear();
/* Only load data for the original Geometry. */
if (instance_of) {
return;
}
AttributeRequestSet requested_attributes = get_requested_attributes();
const TimeSamplingPtr time_sampling = schema.getTimeSampling();
@@ -922,21 +920,19 @@ void AlembicObject::load_all_data(AlembicProcedural *proc, ISubDSchema &schema,
return;
}
setup_transform_cache(scale);
data_loaded = true;
}
void AlembicObject::load_all_data(AlembicProcedural *proc,
const ICurvesSchema &schema,
float scale,
Progress &progress,
float default_radius)
{
cached_data.clear();
/* Only load data for the original Geometry. */
if (instance_of) {
return;
}
const TimeSamplingPtr time_sampling = schema.getTimeSampling();
cached_data.set_time_sampling(*time_sampling);
@@ -1011,6 +1007,8 @@ void AlembicObject::load_all_data(AlembicProcedural *proc,
// TODO(@kevindietrich): attributes, need example files
setup_transform_cache(scale);
data_loaded = true;
}
@@ -1019,14 +1017,6 @@ void AlembicObject::setup_transform_cache(float scale)
cached_data.transforms.clear();
cached_data.transforms.invalidate_last_loaded_time();
if (scale == 0.0f) {
scale = 1.0f;
}
if (xform_time_sampling) {
cached_data.transforms.set_time_sampling(*xform_time_sampling);
}
if (xform_samples.size() == 0) {
Transform tfm = transform_scale(make_float3(scale));
cached_data.transforms.add_data(tfm, 0.0);
@@ -1113,10 +1103,9 @@ void AlembicObject::read_attribute(const ICompoundProperty &arb_geom_params,
attribute.element = ATTR_ELEMENT_CORNER;
attribute.type_desc = TypeFloat2;
const array<int3> *triangles =
cached_data.triangles.data_for_time_no_check(time).get_data_or_null();
const array<int3> *triangles_loops =
cached_data.triangles_loops.data_for_time_no_check(time).get_data_or_null();
const array<int3> *triangles = cached_data.triangles.data_for_time_no_check(time);
const array<int3> *triangles_loops = cached_data.triangles_loops.data_for_time_no_check(
time);
if (!triangles || !triangles_loops) {
return;
@@ -1169,8 +1158,7 @@ void AlembicObject::read_attribute(const ICompoundProperty &arb_geom_params,
attribute.element = ATTR_ELEMENT_CORNER_BYTE;
attribute.type_desc = TypeRGBA;
const array<int3> *triangles =
cached_data.triangles.data_for_time_no_check(time).get_data_or_null();
const array<int3> *triangles = cached_data.triangles.data_for_time_no_check(time);
if (!triangles) {
return;
@@ -1226,8 +1214,7 @@ void AlembicObject::read_attribute(const ICompoundProperty &arb_geom_params,
attribute.element = ATTR_ELEMENT_CORNER_BYTE;
attribute.type_desc = TypeRGBA;
const array<int3> *triangles =
cached_data.triangles.data_for_time_no_check(time).get_data_or_null();
const array<int3> *triangles = cached_data.triangles.data_for_time_no_check(time);
if (!triangles) {
return;
@@ -1266,7 +1253,7 @@ static void update_attributes(AttributeSet &attributes, CachedData &cached_data,
set<Attribute *> cached_attributes;
for (CachedData::CachedAttribute &attribute : cached_data.attributes) {
const array<char> *attr_data = attribute.data.data_for_time(frame_time).get_data_or_null();
const array<char> *attr_data = attribute.data.data_for_time(frame_time);
Attribute *attr = nullptr;
if (attribute.std != ATTR_STD_NONE) {
@@ -1291,7 +1278,6 @@ static void update_attributes(AttributeSet &attributes, CachedData &cached_data,
}
memcpy(attr->data(), attr_data->data(), attr_data->size());
attr->modified = true;
}
/* remove any attributes not in cached_attributes */
@@ -1299,7 +1285,6 @@ static void update_attributes(AttributeSet &attributes, CachedData &cached_data,
for (it = attributes.attributes.begin(); it != attributes.attributes.end();) {
if (cached_attributes.find(&(*it)) == cached_attributes.end()) {
attributes.attributes.erase(it++);
attributes.modified = true;
continue;
}
@@ -1320,12 +1305,12 @@ NODE_DEFINE(AlembicProcedural)
SOCKET_FLOAT(default_radius, "Default Radius", 0.01f);
SOCKET_FLOAT(scale, "Scale", 1.0f);
SOCKET_NODE_ARRAY(objects, "Objects", AlembicObject::get_node_type());
SOCKET_NODE_ARRAY(objects, "Objects", &AlembicObject::node_type);
return type;
}
AlembicProcedural::AlembicProcedural() : Procedural(get_node_type())
AlembicProcedural::AlembicProcedural() : Procedural(node_type)
{
objects_loaded = false;
scene_ = nullptr;
@@ -1373,16 +1358,11 @@ void AlembicProcedural::generate(Scene *scene, Progress &progress)
}
bool need_shader_updates = false;
bool need_data_updates = false;
/* Check for changes in shaders (newly requested attributes). */
foreach (Node *object_node, objects) {
AlembicObject *object = static_cast<AlembicObject *>(object_node);
if (object->is_modified()) {
need_data_updates = true;
}
/* Check for changes in shaders (e.g. newly requested attributes). */
foreach (Node *shader_node, object->get_used_shaders()) {
Shader *shader = static_cast<Shader *>(shader_node);
@@ -1393,7 +1373,7 @@ void AlembicProcedural::generate(Scene *scene, Progress &progress)
}
}
if (!is_modified() && !need_shader_updates && !need_data_updates) {
if (!is_modified() && !need_shader_updates) {
return;
}
@@ -1417,8 +1397,6 @@ void AlembicProcedural::generate(Scene *scene, Progress &progress)
const chrono_t frame_time = (chrono_t)((frame - frame_offset) / frame_rate);
build_caches(progress);
foreach (Node *node, objects) {
AlembicObject *object = static_cast<AlembicObject *>(node);
@@ -1427,19 +1405,19 @@ void AlembicProcedural::generate(Scene *scene, Progress &progress)
}
/* skip constant objects */
if (object->is_constant() && !object->is_modified() && !object->need_shader_update &&
!scale_is_modified()) {
if (object->has_data_loaded() && object->is_constant() && !object->is_modified() &&
!object->need_shader_update && !scale_is_modified()) {
continue;
}
if (object->schema_type == AlembicObject::POLY_MESH) {
read_mesh(object, frame_time);
read_mesh(scene, object, frame_time, progress);
}
else if (object->schema_type == AlembicObject::CURVES) {
read_curves(object, frame_time);
read_curves(scene, object, frame_time, progress);
}
else if (object->schema_type == AlembicObject::SUBD) {
read_subd(object, frame_time);
read_subd(scene, object, frame_time, progress);
}
object->clear_modified();
@@ -1493,7 +1471,7 @@ void AlembicProcedural::load_objects(Progress &progress)
IObject root = archive.getTop();
for (size_t i = 0; i < root.getNumChildren(); ++i) {
walk_hierarchy(root, root.getChildHeader(i), {}, object_map, progress);
walk_hierarchy(root, root.getChildHeader(i), nullptr, object_map, progress);
}
/* Create nodes in the scene. */
@@ -1502,24 +1480,22 @@ void AlembicProcedural::load_objects(Progress &progress)
Geometry *geometry = nullptr;
if (!abc_object->instance_of) {
if (abc_object->schema_type == AlembicObject::CURVES) {
geometry = scene_->create_node<Hair>();
}
else if (abc_object->schema_type == AlembicObject::POLY_MESH ||
abc_object->schema_type == AlembicObject::SUBD) {
geometry = scene_->create_node<Mesh>();
}
else {
continue;
}
geometry->set_owner(this);
geometry->name = abc_object->iobject.getName();
array<Node *> used_shaders = abc_object->get_used_shaders();
geometry->set_used_shaders(used_shaders);
if (abc_object->schema_type == AlembicObject::CURVES) {
geometry = scene_->create_node<Hair>();
}
else if (abc_object->schema_type == AlembicObject::POLY_MESH ||
abc_object->schema_type == AlembicObject::SUBD) {
geometry = scene_->create_node<Mesh>();
}
else {
continue;
}
geometry->set_owner(this);
geometry->name = abc_object->iobject.getName();
array<Node *> used_shaders = abc_object->get_used_shaders();
geometry->set_used_shaders(used_shaders);
Object *object = scene_->create_node<Object>();
object->set_owner(this);
@@ -1528,44 +1504,43 @@ void AlembicProcedural::load_objects(Progress &progress)
abc_object->set_object(object);
}
/* Share geometries between instances. */
foreach (Node *node, objects) {
AlembicObject *abc_object = static_cast<AlembicObject *>(node);
if (abc_object->instance_of) {
abc_object->get_object()->set_geometry(
abc_object->instance_of->get_object()->get_geometry());
abc_object->schema_type = abc_object->instance_of->schema_type;
}
}
}
void AlembicProcedural::read_mesh(AlembicObject *abc_object, Abc::chrono_t frame_time)
void AlembicProcedural::read_mesh(Scene *scene,
AlembicObject *abc_object,
Abc::chrono_t frame_time,
Progress &progress)
{
IPolyMesh polymesh(abc_object->iobject, Alembic::Abc::kWrapExisting);
Mesh *mesh = static_cast<Mesh *>(abc_object->get_object()->get_geometry());
CachedData &cached_data = abc_object->get_cached_data();
IPolyMeshSchema schema = polymesh.getSchema();
if (!abc_object->has_data_loaded()) {
abc_object->load_all_data(this, schema, scale, progress);
}
else {
if (abc_object->need_shader_update) {
abc_object->update_shader_attributes(schema.getArbGeomParams(), progress);
}
if (scale_is_modified()) {
abc_object->setup_transform_cache(scale);
}
}
/* update sockets */
Object *object = abc_object->get_object();
cached_data.transforms.copy_to_socket(frame_time, object, object->get_tfm_socket());
if (object->is_modified()) {
object->tag_update(scene_);
}
/* Only update sockets for the original Geometry. */
if (abc_object->instance_of) {
return;
}
Mesh *mesh = static_cast<Mesh *>(object->get_geometry());
cached_data.vertices.copy_to_socket(frame_time, mesh, mesh->get_verts_socket());
cached_data.shader.copy_to_socket(frame_time, mesh, mesh->get_shader_socket());
array<int3> *triangle_data = cached_data.triangles.data_for_time(frame_time).get_data_or_null();
array<int3> *triangle_data = cached_data.triangles.data_for_time(frame_time);
if (triangle_data) {
array<int> triangles;
array<bool> smooth;
@@ -1591,7 +1566,7 @@ void AlembicProcedural::read_mesh(AlembicObject *abc_object, Abc::chrono_t frame
/* we don't yet support arbitrary attributes, for now add vertex
* coordinates as generated coordinates if requested */
if (mesh->need_attribute(scene_, ATTR_STD_GENERATED)) {
if (mesh->need_attribute(scene, ATTR_STD_GENERATED)) {
Attribute *attr = mesh->attributes.add(ATTR_STD_GENERATED);
memcpy(
attr->data_float3(), mesh->get_verts().data(), sizeof(float3) * mesh->get_verts().size());
@@ -1599,12 +1574,39 @@ void AlembicProcedural::read_mesh(AlembicObject *abc_object, Abc::chrono_t frame
if (mesh->is_modified()) {
bool need_rebuild = mesh->triangles_is_modified();
mesh->tag_update(scene_, need_rebuild);
mesh->tag_update(scene, need_rebuild);
}
}
void AlembicProcedural::read_subd(AlembicObject *abc_object, Abc::chrono_t frame_time)
void AlembicProcedural::read_subd(Scene *scene,
AlembicObject *abc_object,
Abc::chrono_t frame_time,
Progress &progress)
{
ISubD subd_mesh(abc_object->iobject, Alembic::Abc::kWrapExisting);
ISubDSchema schema = subd_mesh.getSchema();
Mesh *mesh = static_cast<Mesh *>(abc_object->get_object()->get_geometry());
/* Alembic is OpenSubDiv compliant, there is no option to set another subdivision type. */
mesh->set_subdivision_type(Mesh::SubdivisionType::SUBDIVISION_CATMULL_CLARK);
if (!abc_object->has_data_loaded()) {
abc_object->load_all_data(this, schema, scale, progress);
}
else {
if (abc_object->need_shader_update) {
abc_object->update_shader_attributes(schema.getArbGeomParams(), progress);
}
if (scale_is_modified()) {
abc_object->setup_transform_cache(scale);
}
}
mesh->set_subd_max_level(abc_object->get_subd_max_level());
mesh->set_subd_dicing_rate(abc_object->get_subd_dicing_rate());
CachedData &cached_data = abc_object->get_cached_data();
if (abc_object->subd_max_level_is_modified() || abc_object->subd_dicing_rate_is_modified()) {
@@ -1612,22 +1614,6 @@ void AlembicProcedural::read_subd(AlembicObject *abc_object, Abc::chrono_t frame
cached_data.invalidate_last_loaded_time();
}
/* Update sockets. */
Object *object = abc_object->get_object();
cached_data.transforms.copy_to_socket(frame_time, object, object->get_tfm_socket());
if (object->is_modified()) {
object->tag_update(scene_);
}
/* Only update sockets for the original Geometry. */
if (abc_object->instance_of) {
return;
}
Mesh *mesh = static_cast<Mesh *>(object->get_geometry());
/* Cycles overwrites the original triangles when computing displacement, so we always have to
* repass the data if something is animated (vertices most likely) to avoid buffer overflows. */
if (!cached_data.is_constant()) {
@@ -1640,10 +1626,10 @@ void AlembicProcedural::read_subd(AlembicObject *abc_object, Abc::chrono_t frame
mesh->clear_non_sockets();
/* Alembic is OpenSubDiv compliant, there is no option to set another subdivision type. */
mesh->set_subdivision_type(Mesh::SubdivisionType::SUBDIVISION_CATMULL_CLARK);
mesh->set_subd_max_level(abc_object->get_subd_max_level());
mesh->set_subd_dicing_rate(abc_object->get_subd_dicing_rate());
/* Update sockets. */
Object *object = abc_object->get_object();
cached_data.transforms.copy_to_socket(frame_time, object, object->get_tfm_socket());
cached_data.vertices.copy_to_socket(frame_time, mesh, mesh->get_verts_socket());
@@ -1680,7 +1666,7 @@ void AlembicProcedural::read_subd(AlembicObject *abc_object, Abc::chrono_t frame
/* we don't yet support arbitrary attributes, for now add vertex
* coordinates as generated coordinates if requested */
if (mesh->need_attribute(scene_, ATTR_STD_GENERATED)) {
if (mesh->need_attribute(scene, ATTR_STD_GENERATED)) {
Attribute *attr = mesh->attributes.add(ATTR_STD_GENERATED);
memcpy(
attr->data_float3(), mesh->get_verts().data(), sizeof(float3) * mesh->get_verts().size());
@@ -1694,12 +1680,30 @@ void AlembicProcedural::read_subd(AlembicObject *abc_object, Abc::chrono_t frame
(mesh->subd_start_corner_is_modified()) ||
(mesh->subd_face_corners_is_modified());
mesh->tag_update(scene_, need_rebuild);
mesh->tag_update(scene, need_rebuild);
}
}
void AlembicProcedural::read_curves(AlembicObject *abc_object, Abc::chrono_t frame_time)
void AlembicProcedural::read_curves(Scene *scene,
AlembicObject *abc_object,
Abc::chrono_t frame_time,
Progress &progress)
{
ICurves curves(abc_object->iobject, Alembic::Abc::kWrapExisting);
Hair *hair = static_cast<Hair *>(abc_object->get_object()->get_geometry());
ICurvesSchema schema = curves.getSchema();
if (!abc_object->has_data_loaded() || default_radius_is_modified() ||
abc_object->radius_scale_is_modified()) {
abc_object->load_all_data(this, schema, scale, progress, default_radius);
}
else {
if (scale_is_modified()) {
abc_object->setup_transform_cache(scale);
}
}
CachedData &cached_data = abc_object->get_cached_data();
/* update sockets */
@@ -1707,17 +1711,6 @@ void AlembicProcedural::read_curves(AlembicObject *abc_object, Abc::chrono_t fra
Object *object = abc_object->get_object();
cached_data.transforms.copy_to_socket(frame_time, object, object->get_tfm_socket());
if (object->is_modified()) {
object->tag_update(scene_);
}
/* Only update sockets for the original Geometry. */
if (abc_object->instance_of) {
return;
}
Hair *hair = static_cast<Hair *>(object->get_geometry());
cached_data.curve_keys.copy_to_socket(frame_time, hair, hair->get_curve_keys_socket());
cached_data.curve_radius.copy_to_socket(frame_time, hair, hair->get_curve_radius_socket());
@@ -1732,7 +1725,7 @@ void AlembicProcedural::read_curves(AlembicObject *abc_object, Abc::chrono_t fra
/* we don't yet support arbitrary attributes, for now add first keys as generated coordinates if
* requested */
if (hair->need_attribute(scene_, ATTR_STD_GENERATED)) {
if (hair->need_attribute(scene, ATTR_STD_GENERATED)) {
Attribute *attr_generated = hair->attributes.add(ATTR_STD_GENERATED);
float3 *generated = attr_generated->data_float3();
@@ -1742,13 +1735,13 @@ void AlembicProcedural::read_curves(AlembicObject *abc_object, Abc::chrono_t fra
}
const bool rebuild = (hair->curve_keys_is_modified() || hair->curve_radius_is_modified());
hair->tag_update(scene_, rebuild);
hair->tag_update(scene, rebuild);
}
void AlembicProcedural::walk_hierarchy(
IObject parent,
const ObjectHeader &header,
MatrixSamplesData matrix_samples_data,
MatrixSampleMap *xform_samples,
const unordered_map<std::string, AlembicObject *> &object_map,
Progress &progress)
{
@@ -1770,7 +1763,7 @@ void AlembicProcedural::walk_hierarchy(
MatrixSampleMap local_xform_samples;
MatrixSampleMap *temp_xform_samples = nullptr;
if (matrix_samples_data.samples == nullptr) {
if (xform_samples == nullptr) {
/* If there is no parent transforms, fill the map directly. */
temp_xform_samples = &concatenated_xform_samples;
}
@@ -1785,13 +1778,11 @@ void AlembicProcedural::walk_hierarchy(
temp_xform_samples->insert({sample_time, sample.getMatrix()});
}
if (matrix_samples_data.samples != nullptr) {
concatenate_xform_samples(
*matrix_samples_data.samples, local_xform_samples, concatenated_xform_samples);
if (xform_samples != nullptr) {
concatenate_xform_samples(*xform_samples, local_xform_samples, concatenated_xform_samples);
}
matrix_samples_data.samples = &concatenated_xform_samples;
matrix_samples_data.time_sampling = ts;
xform_samples = &concatenated_xform_samples;
}
next_object = xform;
@@ -1807,9 +1798,8 @@ void AlembicProcedural::walk_hierarchy(
abc_object->iobject = subd;
abc_object->schema_type = AlembicObject::SUBD;
if (matrix_samples_data.samples) {
abc_object->xform_samples = *matrix_samples_data.samples;
abc_object->xform_time_sampling = matrix_samples_data.time_sampling;
if (xform_samples) {
abc_object->xform_samples = *xform_samples;
}
}
@@ -1826,9 +1816,8 @@ void AlembicProcedural::walk_hierarchy(
abc_object->iobject = mesh;
abc_object->schema_type = AlembicObject::POLY_MESH;
if (matrix_samples_data.samples) {
abc_object->xform_samples = *matrix_samples_data.samples;
abc_object->xform_time_sampling = matrix_samples_data.time_sampling;
if (xform_samples) {
abc_object->xform_samples = *xform_samples;
}
}
@@ -1845,9 +1834,8 @@ void AlembicProcedural::walk_hierarchy(
abc_object->iobject = curves;
abc_object->schema_type = AlembicObject::CURVES;
if (matrix_samples_data.samples) {
abc_object->xform_samples = *matrix_samples_data.samples;
abc_object->xform_time_sampling = matrix_samples_data.time_sampling;
if (xform_samples) {
abc_object->xform_samples = *xform_samples;
}
}
@@ -1856,92 +1844,15 @@ void AlembicProcedural::walk_hierarchy(
else if (IFaceSet::matches(header)) {
// ignore the face set, it will be read along with the data
}
else if (IPoints::matches(header)) {
// unsupported for now
}
else if (INuPatch::matches(header)) {
// unsupported for now
}
else {
// unsupported type for now (Points, NuPatch)
next_object = parent.getChild(header.getName());
if (next_object.isInstanceRoot()) {
unordered_map<std::string, AlembicObject *>::const_iterator iter;
/* Was this object asked to be rendered? */
iter = object_map.find(next_object.getFullName());
if (iter != object_map.end()) {
AlembicObject *abc_object = iter->second;
/* Only try to render an instance if the original object is also rendered. */
iter = object_map.find(next_object.instanceSourcePath());
if (iter != object_map.end()) {
abc_object->iobject = next_object;
abc_object->instance_of = iter->second;
if (matrix_samples_data.samples) {
abc_object->xform_samples = *matrix_samples_data.samples;
abc_object->xform_time_sampling = matrix_samples_data.time_sampling;
}
}
}
}
}
if (next_object.valid()) {
for (size_t i = 0; i < next_object.getNumChildren(); ++i) {
walk_hierarchy(
next_object, next_object.getChildHeader(i), matrix_samples_data, object_map, progress);
}
}
}
void AlembicProcedural::build_caches(Progress &progress)
{
for (Node *node : objects) {
AlembicObject *object = static_cast<AlembicObject *>(node);
if (progress.get_cancel()) {
return;
}
if (object->schema_type == AlembicObject::POLY_MESH) {
if (!object->has_data_loaded()) {
IPolyMesh polymesh(object->iobject, Alembic::Abc::kWrapExisting);
IPolyMeshSchema schema = polymesh.getSchema();
object->load_all_data(this, schema, progress);
}
else if (object->need_shader_update) {
IPolyMesh polymesh(object->iobject, Alembic::Abc::kWrapExisting);
IPolyMeshSchema schema = polymesh.getSchema();
object->update_shader_attributes(schema.getArbGeomParams(), progress);
}
}
else if (object->schema_type == AlembicObject::CURVES) {
if (!object->has_data_loaded() || default_radius_is_modified() ||
object->radius_scale_is_modified()) {
ICurves curves(object->iobject, Alembic::Abc::kWrapExisting);
ICurvesSchema schema = curves.getSchema();
object->load_all_data(this, schema, progress, default_radius);
}
}
else if (object->schema_type == AlembicObject::SUBD) {
if (!object->has_data_loaded()) {
ISubD subd_mesh(object->iobject, Alembic::Abc::kWrapExisting);
ISubDSchema schema = subd_mesh.getSchema();
object->load_all_data(this, schema, progress);
}
else if (object->need_shader_update) {
ISubD subd_mesh(object->iobject, Alembic::Abc::kWrapExisting);
ISubDSchema schema = subd_mesh.getSchema();
object->update_shader_attributes(schema.getArbGeomParams(), progress);
}
}
if (scale_is_modified() || object->get_cached_data().transforms.size() == 0) {
object->setup_transform_cache(scale);
next_object, next_object.getChildHeader(i), xform_samples, object_map, progress);
}
}
}

View File

@@ -38,11 +38,6 @@ class Shader;
using MatrixSampleMap = std::map<Alembic::Abc::chrono_t, Alembic::Abc::M44d>;
struct MatrixSamplesData {
MatrixSampleMap *samples = nullptr;
Alembic::AbcCoreAbstract::TimeSamplingPtr time_sampling;
};
/* Helpers to detect if some type is a ccl::array. */
template<typename> struct is_array : public std::false_type {
};
@@ -50,78 +45,6 @@ template<typename> struct is_array : public std::false_type {
template<typename T> struct is_array<array<T>> : public std::true_type {
};
/* Holds the data for a cache lookup at a given time, as well as informations to
* help disambiguate successes or failures to get data from the cache. */
template<typename T> class CacheLookupResult {
enum class State {
NEW_DATA,
ALREADY_LOADED,
NO_DATA_FOR_TIME,
};
T *data;
State state;
protected:
/* Prevent default construction outside of the class: for a valid result, we
* should use the static functions below. */
CacheLookupResult() = default;
public:
static CacheLookupResult new_data(T *data_)
{
CacheLookupResult result;
result.data = data_;
result.state = State::NEW_DATA;
return result;
}
static CacheLookupResult no_data_found_for_time()
{
CacheLookupResult result;
result.data = nullptr;
result.state = State::NO_DATA_FOR_TIME;
return result;
}
static CacheLookupResult already_loaded()
{
CacheLookupResult result;
result.data = nullptr;
result.state = State::ALREADY_LOADED;
return result;
}
/* This should only be call if new data is available. */
const T &get_data() const
{
assert(state == State::NEW_DATA);
assert(data != nullptr);
return *data;
}
T *get_data_or_null() const
{
// data_ should already be null if there is no new data so no need to check
return data;
}
bool has_new_data() const
{
return state == State::NEW_DATA;
}
bool has_already_loaded() const
{
return state == State::ALREADY_LOADED;
}
bool has_no_data_for_time() const
{
return state == State::NO_DATA_FOR_TIME;
}
};
/* Store the data set for an animation at every time points, or at the beginning of the animation
* for constant data.
*
@@ -151,10 +74,10 @@ template<typename T> class DataStore {
/* Get the data for the specified time.
* Return nullptr if there is no data or if the data for this time was already loaded. */
CacheLookupResult<T> data_for_time(double time)
T *data_for_time(double time)
{
if (size() == 0) {
return CacheLookupResult<T>::no_data_found_for_time();
return nullptr;
}
std::pair<size_t, Alembic::Abc::chrono_t> index_pair;
@@ -162,26 +85,26 @@ template<typename T> class DataStore {
DataTimePair &data_pair = data[index_pair.first];
if (last_loaded_time == data_pair.time) {
return CacheLookupResult<T>::already_loaded();
return nullptr;
}
last_loaded_time = data_pair.time;
return CacheLookupResult<T>::new_data(&data_pair.data);
return &data_pair.data;
}
/* get the data for the specified time, but do not check if the data was already loaded for this
* time return nullptr if there is no data */
CacheLookupResult<T> data_for_time_no_check(double time)
T *data_for_time_no_check(double time)
{
if (size() == 0) {
return CacheLookupResult<T>::no_data_found_for_time();
return nullptr;
}
std::pair<size_t, Alembic::Abc::chrono_t> index_pair;
index_pair = time_sampling.getNearIndex(time, data.size());
DataTimePair &data_pair = data[index_pair.first];
return CacheLookupResult<T>::new_data(&data_pair.data);
return &data_pair.data;
}
void add_data(T &data_, double time)
@@ -221,15 +144,15 @@ template<typename T> class DataStore {
* data for this time or it was already loaded, do nothing. */
void copy_to_socket(double time, Node *node, const SocketType *socket)
{
CacheLookupResult<T> result = data_for_time(time);
T *data_ = data_for_time(time);
if (!result.has_new_data()) {
if (data_ == nullptr) {
return;
}
/* TODO(kevindietrich): arrays are emptied when passed to the sockets, so for now we copy the
* arrays to avoid reloading the data */
T value = result.get_data();
T value = *data_;
node->set(*socket, value);
}
};
@@ -326,12 +249,15 @@ class AlembicObject : public Node {
void load_all_data(AlembicProcedural *proc,
Alembic::AbcGeom::IPolyMeshSchema &schema,
float scale,
Progress &progress);
void load_all_data(AlembicProcedural *proc,
Alembic::AbcGeom::ISubDSchema &schema,
float scale,
Progress &progress);
void load_all_data(AlembicProcedural *proc,
const Alembic::AbcGeom::ICurvesSchema &schema,
float scale,
Progress &progress,
float default_radius);
@@ -348,9 +274,6 @@ class AlembicObject : public Node {
bool need_shader_update = true;
AlembicObject *instance_of = nullptr;
Alembic::AbcCoreAbstract::TimeSamplingPtr xform_time_sampling;
MatrixSampleMap xform_samples;
Alembic::AbcGeom::IObject iobject;
@@ -461,23 +384,30 @@ class AlembicProcedural : public Procedural {
* way for each IObject. */
void walk_hierarchy(Alembic::AbcGeom::IObject parent,
const Alembic::AbcGeom::ObjectHeader &ohead,
MatrixSamplesData matrix_samples_data,
MatrixSampleMap *xform_samples,
const unordered_map<string, AlembicObject *> &object_map,
Progress &progress);
/* Read the data for an IPolyMesh at the specified frame_time. Creates corresponding Geometry and
* Object Nodes in the Cycles scene if none exist yet. */
void read_mesh(AlembicObject *abc_object, Alembic::AbcGeom::Abc::chrono_t frame_time);
void read_mesh(Scene *scene,
AlembicObject *abc_object,
Alembic::AbcGeom::Abc::chrono_t frame_time,
Progress &progress);
/* Read the data for an ICurves at the specified frame_time. Creates corresponding Geometry and
* Object Nodes in the Cycles scene if none exist yet. */
void read_curves(AlembicObject *abc_object, Alembic::AbcGeom::Abc::chrono_t frame_time);
void read_curves(Scene *scene,
AlembicObject *abc_object,
Alembic::AbcGeom::Abc::chrono_t frame_time,
Progress &progress);
/* Read the data for an ISubD at the specified frame_time. Creates corresponding Geometry and
* Object Nodes in the Cycles scene if none exist yet. */
void read_subd(AlembicObject *abc_object, Alembic::AbcGeom::Abc::chrono_t frame_time);
void build_caches(Progress &progress);
void read_subd(Scene *scene,
AlembicObject *abc_object,
Alembic::AbcGeom::Abc::chrono_t frame_time,
Progress &progress);
};
CCL_NAMESPACE_END

View File

@@ -47,12 +47,12 @@ NODE_DEFINE(Background)
SOCKET_FLOAT(volume_step_size, "Volume Step Size", 0.1f);
SOCKET_NODE(shader, "Shader", Shader::get_node_type());
SOCKET_NODE(shader, "Shader", &Shader::node_type);
return type;
}
Background::Background() : Node(get_node_type())
Background::Background() : Node(node_type)
{
shader = NULL;
}

View File

@@ -163,7 +163,7 @@ NODE_DEFINE(Camera)
return type;
}
Camera::Camera() : Node(get_node_type())
Camera::Camera() : Node(node_type)
{
shutter_table_offset = TABLE_OFFSET_INVALID;

View File

@@ -96,7 +96,7 @@ NODE_DEFINE(Pass)
return type;
}
Pass::Pass() : Node(get_node_type())
Pass::Pass() : Node(node_type)
{
}
@@ -407,7 +407,7 @@ NODE_DEFINE(Film)
return type;
}
Film::Film() : Node(get_node_type())
Film::Film() : Node(node_type)
{
use_light_visibility = false;
filter_table_offset = TABLE_OFFSET_INVALID;

View File

@@ -52,7 +52,7 @@ NODE_ABSTRACT_DEFINE(Geometry)
SOCKET_UINT(motion_steps, "Motion Steps", 3);
SOCKET_BOOLEAN(use_motion_blur, "Use Motion Blur", false);
SOCKET_NODE_ARRAY(used_shaders, "Shaders", Shader::get_node_type());
SOCKET_NODE_ARRAY(used_shaders, "Shaders", &Shader::node_type);
return type;
}
@@ -1367,7 +1367,7 @@ void GeometryManager::device_update_bvh(Device *device,
dscene->data.bvh.use_bvh_steps = (scene->params.num_bvh_time_steps != 0);
dscene->data.bvh.curve_subdivisions = scene->params.curve_subdivisions();
/* The scene handle is set in 'CPUDevice::const_copy_to' and 'OptiXDevice::const_copy_to' */
dscene->data.bvh.scene = 0;
dscene->data.bvh.scene = NULL;
}
/* Set of flags used to help determining what data has been modified or needs reallocation, so we

View File

@@ -283,7 +283,7 @@ void Hair::Curve::cardinal_keys_for_step(const float3 *curve_keys,
NODE_DEFINE(Hair)
{
NodeType *type = NodeType::add("hair", create, NodeType::NONE, Geometry::get_node_base_type());
NodeType *type = NodeType::add("hair", create, NodeType::NONE, Geometry::node_base_type);
SOCKET_POINT_ARRAY(curve_keys, "Curve Keys", array<float3>());
SOCKET_FLOAT_ARRAY(curve_radius, "Curve Radius", array<float>());
@@ -293,7 +293,7 @@ NODE_DEFINE(Hair)
return type;
}
Hair::Hair() : Geometry(get_node_type(), Geometry::HAIR)
Hair::Hair() : Geometry(node_type, Geometry::HAIR)
{
curvekey_offset = 0;
curve_shape = CURVE_RIBBON;

View File

@@ -96,7 +96,7 @@ NODE_DEFINE(Integrator)
return type;
}
Integrator::Integrator() : Node(get_node_type())
Integrator::Integrator() : Node(node_type)
{
}

View File

@@ -151,12 +151,12 @@ NODE_DEFINE(Light)
SOCKET_BOOLEAN(is_portal, "Is Portal", false);
SOCKET_BOOLEAN(is_enabled, "Is Enabled", true);
SOCKET_NODE(shader, "Shader", Shader::get_node_type());
SOCKET_NODE(shader, "Shader", &Shader::node_type);
return type;
}
Light::Light() : Node(get_node_type())
Light::Light() : Node(node_type)
{
}
@@ -609,7 +609,7 @@ void LightManager::device_update_background(Device *device,
Shader *shader = scene->background->get_shader(scene);
int num_suns = 0;
foreach (ShaderNode *node, shader->graph->nodes) {
if (node->type == EnvironmentTextureNode::get_node_type()) {
if (node->type == EnvironmentTextureNode::node_type) {
EnvironmentTextureNode *env = (EnvironmentTextureNode *)node;
ImageMetaData metadata;
if (!env->handle.empty()) {
@@ -618,7 +618,7 @@ void LightManager::device_update_background(Device *device,
environment_res.y = max(environment_res.y, metadata.height);
}
}
if (node->type == SkyTextureNode::get_node_type()) {
if (node->type == SkyTextureNode::node_type) {
SkyTextureNode *sky = (SkyTextureNode *)node;
if (sky->get_sky_type() == NODE_SKY_NISHITA && sky->get_sun_disc()) {
/* Ensure that the input coordinates aren't transformed before they reach the node.
@@ -627,7 +627,7 @@ void LightManager::device_update_background(Device *device,
const ShaderInput *vec_in = sky->input("Vector");
if (vec_in && vec_in->link && vec_in->link->parent) {
ShaderNode *vec_src = vec_in->link->parent;
if ((vec_src->type != TextureCoordinateNode::get_node_type()) ||
if ((vec_src->type != TextureCoordinateNode::node_type) ||
(vec_in->link != vec_src->output("Generated"))) {
environment_res.x = max(environment_res.x, 4096);
environment_res.y = max(environment_res.y, 2048);

View File

@@ -125,7 +125,7 @@ float3 Mesh::SubdFace::normal(const Mesh *mesh) const
NODE_DEFINE(Mesh)
{
NodeType *type = NodeType::add("mesh", create, NodeType::NONE, Geometry::get_node_base_type());
NodeType *type = NodeType::add("mesh", create, NodeType::NONE, Geometry::node_base_type);
SOCKET_INT_ARRAY(triangles, "Triangles", array<int>());
SOCKET_POINT_ARRAY(verts, "Vertices", array<float3>());
@@ -202,7 +202,7 @@ Mesh::Mesh(const NodeType *node_type, Type geom_type_)
patch_table = NULL;
}
Mesh::Mesh() : Mesh(get_node_type(), Geometry::MESH)
Mesh::Mesh() : Mesh(node_type, Geometry::MESH)
{
}

View File

@@ -258,7 +258,7 @@ NODE_DEFINE(ImageTextureNode)
return type;
}
ImageTextureNode::ImageTextureNode() : ImageSlotTextureNode(get_node_type())
ImageTextureNode::ImageTextureNode() : ImageSlotTextureNode(node_type)
{
colorspace = u_colorspace_raw;
animated = false;
@@ -309,11 +309,11 @@ void ImageTextureNode::cull_tiles(Scene *scene, ShaderGraph *graph)
ustring attribute;
if (vector_in->link) {
ShaderNode *node = vector_in->link->parent;
if (node->type == UVMapNode::get_node_type()) {
if (node->type == UVMapNode::node_type) {
UVMapNode *uvmap = (UVMapNode *)node;
attribute = uvmap->get_attribute();
}
else if (node->type == TextureCoordinateNode::get_node_type()) {
else if (node->type == TextureCoordinateNode::node_type) {
if (vector_in->link != node->output("UV")) {
return;
}
@@ -525,7 +525,7 @@ NODE_DEFINE(EnvironmentTextureNode)
return type;
}
EnvironmentTextureNode::EnvironmentTextureNode() : ImageSlotTextureNode(get_node_type())
EnvironmentTextureNode::EnvironmentTextureNode() : ImageSlotTextureNode(node_type)
{
colorspace = u_colorspace_raw;
animated = false;
@@ -818,7 +818,7 @@ NODE_DEFINE(SkyTextureNode)
return type;
}
SkyTextureNode::SkyTextureNode() : TextureNode(get_node_type())
SkyTextureNode::SkyTextureNode() : TextureNode(node_type)
{
}
@@ -1000,7 +1000,7 @@ NODE_DEFINE(GradientTextureNode)
return type;
}
GradientTextureNode::GradientTextureNode() : TextureNode(get_node_type())
GradientTextureNode::GradientTextureNode() : TextureNode(node_type)
{
}
@@ -1057,7 +1057,7 @@ NODE_DEFINE(NoiseTextureNode)
return type;
}
NoiseTextureNode::NoiseTextureNode() : TextureNode(get_node_type())
NoiseTextureNode::NoiseTextureNode() : TextureNode(node_type)
{
}
@@ -1150,7 +1150,7 @@ NODE_DEFINE(VoronoiTextureNode)
return type;
}
VoronoiTextureNode::VoronoiTextureNode() : TextureNode(get_node_type())
VoronoiTextureNode::VoronoiTextureNode() : TextureNode(node_type)
{
}
@@ -1229,7 +1229,7 @@ NODE_DEFINE(IESLightNode)
return type;
}
IESLightNode::IESLightNode() : TextureNode(get_node_type())
IESLightNode::IESLightNode() : TextureNode(node_type)
{
light_manager = NULL;
slot = -1;
@@ -1321,7 +1321,7 @@ NODE_DEFINE(WhiteNoiseTextureNode)
return type;
}
WhiteNoiseTextureNode::WhiteNoiseTextureNode() : ShaderNode(get_node_type())
WhiteNoiseTextureNode::WhiteNoiseTextureNode() : ShaderNode(node_type)
{
}
@@ -1386,7 +1386,7 @@ NODE_DEFINE(MusgraveTextureNode)
return type;
}
MusgraveTextureNode::MusgraveTextureNode() : TextureNode(get_node_type())
MusgraveTextureNode::MusgraveTextureNode() : TextureNode(node_type)
{
}
@@ -1484,7 +1484,7 @@ NODE_DEFINE(WaveTextureNode)
return type;
}
WaveTextureNode::WaveTextureNode() : TextureNode(get_node_type())
WaveTextureNode::WaveTextureNode() : TextureNode(node_type)
{
}
@@ -1558,7 +1558,7 @@ NODE_DEFINE(MagicTextureNode)
return type;
}
MagicTextureNode::MagicTextureNode() : TextureNode(get_node_type())
MagicTextureNode::MagicTextureNode() : TextureNode(node_type)
{
}
@@ -1611,7 +1611,7 @@ NODE_DEFINE(CheckerTextureNode)
return type;
}
CheckerTextureNode::CheckerTextureNode() : TextureNode(get_node_type())
CheckerTextureNode::CheckerTextureNode() : TextureNode(node_type)
{
}
@@ -1677,7 +1677,7 @@ NODE_DEFINE(BrickTextureNode)
return type;
}
BrickTextureNode::BrickTextureNode() : TextureNode(get_node_type())
BrickTextureNode::BrickTextureNode() : TextureNode(node_type)
{
}
@@ -1770,7 +1770,7 @@ NODE_DEFINE(PointDensityTextureNode)
return type;
}
PointDensityTextureNode::PointDensityTextureNode() : ShaderNode(get_node_type())
PointDensityTextureNode::PointDensityTextureNode() : ShaderNode(node_type)
{
}
@@ -1887,7 +1887,7 @@ NODE_DEFINE(NormalNode)
return type;
}
NormalNode::NormalNode() : ShaderNode(get_node_type())
NormalNode::NormalNode() : ShaderNode(node_type)
{
}
@@ -1934,7 +1934,7 @@ NODE_DEFINE(MappingNode)
return type;
}
MappingNode::MappingNode() : ShaderNode(get_node_type())
MappingNode::MappingNode() : ShaderNode(node_type)
{
}
@@ -1989,7 +1989,7 @@ NODE_DEFINE(RGBToBWNode)
return type;
}
RGBToBWNode::RGBToBWNode() : ShaderNode(get_node_type())
RGBToBWNode::RGBToBWNode() : ShaderNode(node_type)
{
}
@@ -2312,7 +2312,7 @@ NODE_DEFINE(AnisotropicBsdfNode)
return type;
}
AnisotropicBsdfNode::AnisotropicBsdfNode() : BsdfNode(get_node_type())
AnisotropicBsdfNode::AnisotropicBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_MICROFACET_GGX_ID;
}
@@ -2370,7 +2370,7 @@ NODE_DEFINE(GlossyBsdfNode)
return type;
}
GlossyBsdfNode::GlossyBsdfNode() : BsdfNode(get_node_type())
GlossyBsdfNode::GlossyBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_MICROFACET_GGX_ID;
distribution_orig = NBUILTIN_CLOSURES;
@@ -2463,7 +2463,7 @@ NODE_DEFINE(GlassBsdfNode)
return type;
}
GlassBsdfNode::GlassBsdfNode() : BsdfNode(get_node_type())
GlassBsdfNode::GlassBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_SHARP_GLASS_ID;
distribution_orig = NBUILTIN_CLOSURES;
@@ -2556,7 +2556,7 @@ NODE_DEFINE(RefractionBsdfNode)
return type;
}
RefractionBsdfNode::RefractionBsdfNode() : BsdfNode(get_node_type())
RefractionBsdfNode::RefractionBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_REFRACTION_ID;
distribution_orig = NBUILTIN_CLOSURES;
@@ -2644,7 +2644,7 @@ NODE_DEFINE(ToonBsdfNode)
return type;
}
ToonBsdfNode::ToonBsdfNode() : BsdfNode(get_node_type())
ToonBsdfNode::ToonBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_DIFFUSE_TOON_ID;
}
@@ -2678,7 +2678,7 @@ NODE_DEFINE(VelvetBsdfNode)
return type;
}
VelvetBsdfNode::VelvetBsdfNode() : BsdfNode(get_node_type())
VelvetBsdfNode::VelvetBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_ASHIKHMIN_VELVET_ID;
}
@@ -2709,7 +2709,7 @@ NODE_DEFINE(DiffuseBsdfNode)
return type;
}
DiffuseBsdfNode::DiffuseBsdfNode() : BsdfNode(get_node_type())
DiffuseBsdfNode::DiffuseBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_DIFFUSE_ID;
}
@@ -2773,7 +2773,7 @@ NODE_DEFINE(PrincipledBsdfNode)
return type;
}
PrincipledBsdfNode::PrincipledBsdfNode() : BsdfBaseNode(get_node_type())
PrincipledBsdfNode::PrincipledBsdfNode() : BsdfBaseNode(node_type)
{
closure = CLOSURE_BSDF_PRINCIPLED_ID;
distribution = CLOSURE_BSDF_MICROFACET_MULTI_GGX_GLASS_ID;
@@ -2994,7 +2994,7 @@ NODE_DEFINE(TranslucentBsdfNode)
return type;
}
TranslucentBsdfNode::TranslucentBsdfNode() : BsdfNode(get_node_type())
TranslucentBsdfNode::TranslucentBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_TRANSLUCENT_ID;
}
@@ -3023,7 +3023,7 @@ NODE_DEFINE(TransparentBsdfNode)
return type;
}
TransparentBsdfNode::TransparentBsdfNode() : BsdfNode(get_node_type())
TransparentBsdfNode::TransparentBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_TRANSPARENT_ID;
}
@@ -3064,7 +3064,7 @@ NODE_DEFINE(SubsurfaceScatteringNode)
return type;
}
SubsurfaceScatteringNode::SubsurfaceScatteringNode() : BsdfNode(get_node_type())
SubsurfaceScatteringNode::SubsurfaceScatteringNode() : BsdfNode(node_type)
{
closure = falloff;
}
@@ -3106,7 +3106,7 @@ NODE_DEFINE(EmissionNode)
return type;
}
EmissionNode::EmissionNode() : ShaderNode(get_node_type())
EmissionNode::EmissionNode() : ShaderNode(node_type)
{
}
@@ -3155,7 +3155,7 @@ NODE_DEFINE(BackgroundNode)
return type;
}
BackgroundNode::BackgroundNode() : ShaderNode(get_node_type())
BackgroundNode::BackgroundNode() : ShaderNode(node_type)
{
}
@@ -3203,7 +3203,7 @@ NODE_DEFINE(HoldoutNode)
return type;
}
HoldoutNode::HoldoutNode() : ShaderNode(get_node_type())
HoldoutNode::HoldoutNode() : ShaderNode(node_type)
{
}
@@ -3241,7 +3241,7 @@ NODE_DEFINE(AmbientOcclusionNode)
return type;
}
AmbientOcclusionNode::AmbientOcclusionNode() : ShaderNode(get_node_type())
AmbientOcclusionNode::AmbientOcclusionNode() : ShaderNode(node_type)
{
}
@@ -3329,7 +3329,7 @@ NODE_DEFINE(AbsorptionVolumeNode)
return type;
}
AbsorptionVolumeNode::AbsorptionVolumeNode() : VolumeNode(get_node_type())
AbsorptionVolumeNode::AbsorptionVolumeNode() : VolumeNode(node_type)
{
closure = CLOSURE_VOLUME_ABSORPTION_ID;
}
@@ -3360,7 +3360,7 @@ NODE_DEFINE(ScatterVolumeNode)
return type;
}
ScatterVolumeNode::ScatterVolumeNode() : VolumeNode(get_node_type())
ScatterVolumeNode::ScatterVolumeNode() : VolumeNode(node_type)
{
closure = CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID;
}
@@ -3401,7 +3401,7 @@ NODE_DEFINE(PrincipledVolumeNode)
return type;
}
PrincipledVolumeNode::PrincipledVolumeNode() : VolumeNode(get_node_type())
PrincipledVolumeNode::PrincipledVolumeNode() : VolumeNode(node_type)
{
closure = CLOSURE_VOLUME_HENYEY_GREENSTEIN_ID;
density_attribute = ustring("density");
@@ -3526,7 +3526,7 @@ NODE_DEFINE(PrincipledHairBsdfNode)
return type;
}
PrincipledHairBsdfNode::PrincipledHairBsdfNode() : BsdfBaseNode(get_node_type())
PrincipledHairBsdfNode::PrincipledHairBsdfNode() : BsdfBaseNode(node_type)
{
closure = CLOSURE_BSDF_HAIR_PRINCIPLED_ID;
}
@@ -3637,7 +3637,7 @@ NODE_DEFINE(HairBsdfNode)
return type;
}
HairBsdfNode::HairBsdfNode() : BsdfNode(get_node_type())
HairBsdfNode::HairBsdfNode() : BsdfNode(node_type)
{
closure = CLOSURE_BSDF_HAIR_REFLECTION_ID;
}
@@ -3677,7 +3677,7 @@ NODE_DEFINE(GeometryNode)
return type;
}
GeometryNode::GeometryNode() : ShaderNode(get_node_type())
GeometryNode::GeometryNode() : ShaderNode(node_type)
{
special_type = SHADER_SPECIAL_TYPE_GEOMETRY;
}
@@ -3824,7 +3824,7 @@ NODE_DEFINE(TextureCoordinateNode)
return type;
}
TextureCoordinateNode::TextureCoordinateNode() : ShaderNode(get_node_type())
TextureCoordinateNode::TextureCoordinateNode() : ShaderNode(node_type)
{
}
@@ -3971,7 +3971,7 @@ NODE_DEFINE(UVMapNode)
return type;
}
UVMapNode::UVMapNode() : ShaderNode(get_node_type())
UVMapNode::UVMapNode() : ShaderNode(node_type)
{
}
@@ -4060,7 +4060,7 @@ NODE_DEFINE(LightPathNode)
return type;
}
LightPathNode::LightPathNode() : ShaderNode(get_node_type())
LightPathNode::LightPathNode() : ShaderNode(node_type)
{
}
@@ -4160,7 +4160,7 @@ NODE_DEFINE(LightFalloffNode)
return type;
}
LightFalloffNode::LightFalloffNode() : ShaderNode(get_node_type())
LightFalloffNode::LightFalloffNode() : ShaderNode(node_type)
{
}
@@ -4217,7 +4217,7 @@ NODE_DEFINE(ObjectInfoNode)
return type;
}
ObjectInfoNode::ObjectInfoNode() : ShaderNode(get_node_type())
ObjectInfoNode::ObjectInfoNode() : ShaderNode(node_type)
{
}
@@ -4275,7 +4275,7 @@ NODE_DEFINE(ParticleInfoNode)
return type;
}
ParticleInfoNode::ParticleInfoNode() : ShaderNode(get_node_type())
ParticleInfoNode::ParticleInfoNode() : ShaderNode(node_type)
{
}
@@ -4382,7 +4382,7 @@ NODE_DEFINE(HairInfoNode)
return type;
}
HairInfoNode::HairInfoNode() : ShaderNode(get_node_type())
HairInfoNode::HairInfoNode() : ShaderNode(node_type)
{
}
@@ -4457,7 +4457,7 @@ NODE_DEFINE(VolumeInfoNode)
return type;
}
VolumeInfoNode::VolumeInfoNode() : ShaderNode(get_node_type())
VolumeInfoNode::VolumeInfoNode() : ShaderNode(node_type)
{
}
@@ -4538,7 +4538,7 @@ NODE_DEFINE(VertexColorNode)
return type;
}
VertexColorNode::VertexColorNode() : ShaderNode(get_node_type())
VertexColorNode::VertexColorNode() : ShaderNode(node_type)
{
}
@@ -4619,7 +4619,7 @@ NODE_DEFINE(ValueNode)
return type;
}
ValueNode::ValueNode() : ShaderNode(get_node_type())
ValueNode::ValueNode() : ShaderNode(node_type)
{
}
@@ -4653,7 +4653,7 @@ NODE_DEFINE(ColorNode)
return type;
}
ColorNode::ColorNode() : ShaderNode(get_node_type())
ColorNode::ColorNode() : ShaderNode(node_type)
{
}
@@ -4692,7 +4692,7 @@ NODE_DEFINE(AddClosureNode)
return type;
}
AddClosureNode::AddClosureNode() : ShaderNode(get_node_type())
AddClosureNode::AddClosureNode() : ShaderNode(node_type)
{
special_type = SHADER_SPECIAL_TYPE_COMBINE_CLOSURE;
}
@@ -4736,7 +4736,7 @@ NODE_DEFINE(MixClosureNode)
return type;
}
MixClosureNode::MixClosureNode() : ShaderNode(get_node_type())
MixClosureNode::MixClosureNode() : ShaderNode(node_type)
{
special_type = SHADER_SPECIAL_TYPE_COMBINE_CLOSURE;
}
@@ -4790,7 +4790,7 @@ NODE_DEFINE(MixClosureWeightNode)
return type;
}
MixClosureWeightNode::MixClosureWeightNode() : ShaderNode(get_node_type())
MixClosureWeightNode::MixClosureWeightNode() : ShaderNode(node_type)
{
}
@@ -4827,7 +4827,7 @@ NODE_DEFINE(InvertNode)
return type;
}
InvertNode::InvertNode() : ShaderNode(get_node_type())
InvertNode::InvertNode() : ShaderNode(node_type)
{
}
@@ -4903,7 +4903,7 @@ NODE_DEFINE(MixNode)
return type;
}
MixNode::MixNode() : ShaderNode(get_node_type())
MixNode::MixNode() : ShaderNode(node_type)
{
}
@@ -4958,7 +4958,7 @@ NODE_DEFINE(CombineRGBNode)
return type;
}
CombineRGBNode::CombineRGBNode() : ShaderNode(get_node_type())
CombineRGBNode::CombineRGBNode() : ShaderNode(node_type)
{
}
@@ -5006,7 +5006,7 @@ NODE_DEFINE(CombineXYZNode)
return type;
}
CombineXYZNode::CombineXYZNode() : ShaderNode(get_node_type())
CombineXYZNode::CombineXYZNode() : ShaderNode(node_type)
{
}
@@ -5054,7 +5054,7 @@ NODE_DEFINE(CombineHSVNode)
return type;
}
CombineHSVNode::CombineHSVNode() : ShaderNode(get_node_type())
CombineHSVNode::CombineHSVNode() : ShaderNode(node_type)
{
}
@@ -5097,7 +5097,7 @@ NODE_DEFINE(GammaNode)
return type;
}
GammaNode::GammaNode() : ShaderNode(get_node_type())
GammaNode::GammaNode() : ShaderNode(node_type)
{
}
@@ -5153,7 +5153,7 @@ NODE_DEFINE(BrightContrastNode)
return type;
}
BrightContrastNode::BrightContrastNode() : ShaderNode(get_node_type())
BrightContrastNode::BrightContrastNode() : ShaderNode(node_type)
{
}
@@ -5198,7 +5198,7 @@ NODE_DEFINE(SeparateRGBNode)
return type;
}
SeparateRGBNode::SeparateRGBNode() : ShaderNode(get_node_type())
SeparateRGBNode::SeparateRGBNode() : ShaderNode(node_type)
{
}
@@ -5251,7 +5251,7 @@ NODE_DEFINE(SeparateXYZNode)
return type;
}
SeparateXYZNode::SeparateXYZNode() : ShaderNode(get_node_type())
SeparateXYZNode::SeparateXYZNode() : ShaderNode(node_type)
{
}
@@ -5304,7 +5304,7 @@ NODE_DEFINE(SeparateHSVNode)
return type;
}
SeparateHSVNode::SeparateHSVNode() : ShaderNode(get_node_type())
SeparateHSVNode::SeparateHSVNode() : ShaderNode(node_type)
{
}
@@ -5358,7 +5358,7 @@ NODE_DEFINE(HSVNode)
return type;
}
HSVNode::HSVNode() : ShaderNode(get_node_type())
HSVNode::HSVNode() : ShaderNode(node_type)
{
}
@@ -5401,7 +5401,7 @@ NODE_DEFINE(AttributeNode)
return type;
}
AttributeNode::AttributeNode() : ShaderNode(get_node_type())
AttributeNode::AttributeNode() : ShaderNode(node_type)
{
}
@@ -5489,7 +5489,7 @@ NODE_DEFINE(CameraNode)
return type;
}
CameraNode::CameraNode() : ShaderNode(get_node_type())
CameraNode::CameraNode() : ShaderNode(node_type)
{
}
@@ -5525,7 +5525,7 @@ NODE_DEFINE(FresnelNode)
return type;
}
FresnelNode::FresnelNode() : ShaderNode(get_node_type())
FresnelNode::FresnelNode() : ShaderNode(node_type)
{
}
@@ -5563,7 +5563,7 @@ NODE_DEFINE(LayerWeightNode)
return type;
}
LayerWeightNode::LayerWeightNode() : ShaderNode(get_node_type())
LayerWeightNode::LayerWeightNode() : ShaderNode(node_type)
{
}
@@ -5611,7 +5611,7 @@ NODE_DEFINE(WireframeNode)
return type;
}
WireframeNode::WireframeNode() : ShaderNode(get_node_type())
WireframeNode::WireframeNode() : ShaderNode(node_type)
{
}
@@ -5659,7 +5659,7 @@ NODE_DEFINE(WavelengthNode)
return type;
}
WavelengthNode::WavelengthNode() : ShaderNode(get_node_type())
WavelengthNode::WavelengthNode() : ShaderNode(node_type)
{
}
@@ -5689,7 +5689,7 @@ NODE_DEFINE(BlackbodyNode)
return type;
}
BlackbodyNode::BlackbodyNode() : ShaderNode(get_node_type())
BlackbodyNode::BlackbodyNode() : ShaderNode(node_type)
{
}
@@ -5728,7 +5728,7 @@ NODE_DEFINE(OutputNode)
return type;
}
OutputNode::OutputNode() : ShaderNode(get_node_type())
OutputNode::OutputNode() : ShaderNode(node_type)
{
special_type = SHADER_SPECIAL_TYPE_OUTPUT;
}
@@ -5780,7 +5780,7 @@ NODE_DEFINE(MapRangeNode)
return type;
}
MapRangeNode::MapRangeNode() : ShaderNode(get_node_type())
MapRangeNode::MapRangeNode() : ShaderNode(node_type)
{
}
@@ -5868,7 +5868,7 @@ NODE_DEFINE(ClampNode)
return type;
}
ClampNode::ClampNode() : ShaderNode(get_node_type())
ClampNode::ClampNode() : ShaderNode(node_type)
{
}
@@ -5923,7 +5923,7 @@ NODE_DEFINE(OutputAOVNode)
return type;
}
OutputAOVNode::OutputAOVNode() : ShaderNode(get_node_type())
OutputAOVNode::OutputAOVNode() : ShaderNode(node_type)
{
special_type = SHADER_SPECIAL_TYPE_OUTPUT_AOV;
slot = -1;
@@ -6021,7 +6021,7 @@ NODE_DEFINE(MathNode)
return type;
}
MathNode::MathNode() : ShaderNode(get_node_type())
MathNode::MathNode() : ShaderNode(node_type)
{
}
@@ -6124,7 +6124,7 @@ NODE_DEFINE(VectorMathNode)
return type;
}
VectorMathNode::VectorMathNode() : ShaderNode(get_node_type())
VectorMathNode::VectorMathNode() : ShaderNode(node_type)
{
}
@@ -6213,7 +6213,7 @@ NODE_DEFINE(VectorRotateNode)
return type;
}
VectorRotateNode::VectorRotateNode() : ShaderNode(get_node_type())
VectorRotateNode::VectorRotateNode() : ShaderNode(node_type)
{
}
@@ -6269,7 +6269,7 @@ NODE_DEFINE(VectorTransformNode)
return type;
}
VectorTransformNode::VectorTransformNode() : ShaderNode(get_node_type())
VectorTransformNode::VectorTransformNode() : ShaderNode(node_type)
{
}
@@ -6317,7 +6317,7 @@ NODE_DEFINE(BumpNode)
return type;
}
BumpNode::BumpNode() : ShaderNode(get_node_type())
BumpNode::BumpNode() : ShaderNode(node_type)
{
special_type = SHADER_SPECIAL_TYPE_BUMP;
}
@@ -6464,7 +6464,7 @@ NODE_DEFINE(RGBCurvesNode)
return type;
}
RGBCurvesNode::RGBCurvesNode() : CurvesNode(get_node_type())
RGBCurvesNode::RGBCurvesNode() : CurvesNode(node_type)
{
}
@@ -6501,7 +6501,7 @@ NODE_DEFINE(VectorCurvesNode)
return type;
}
VectorCurvesNode::VectorCurvesNode() : CurvesNode(get_node_type())
VectorCurvesNode::VectorCurvesNode() : CurvesNode(node_type)
{
}
@@ -6538,7 +6538,7 @@ NODE_DEFINE(RGBRampNode)
return type;
}
RGBRampNode::RGBRampNode() : ShaderNode(get_node_type())
RGBRampNode::RGBRampNode() : ShaderNode(node_type)
{
}
@@ -6611,7 +6611,7 @@ NODE_DEFINE(SetNormalNode)
return type;
}
SetNormalNode::SetNormalNode() : ShaderNode(get_node_type())
SetNormalNode::SetNormalNode() : ShaderNode(node_type)
{
}
@@ -6733,7 +6733,7 @@ NODE_DEFINE(NormalMapNode)
return type;
}
NormalMapNode::NormalMapNode() : ShaderNode(get_node_type())
NormalMapNode::NormalMapNode() : ShaderNode(node_type)
{
}
@@ -6827,7 +6827,7 @@ NODE_DEFINE(TangentNode)
return type;
}
TangentNode::TangentNode() : ShaderNode(get_node_type())
TangentNode::TangentNode() : ShaderNode(node_type)
{
}
@@ -6897,7 +6897,7 @@ NODE_DEFINE(BevelNode)
return type;
}
BevelNode::BevelNode() : ShaderNode(get_node_type())
BevelNode::BevelNode() : ShaderNode(node_type)
{
}
@@ -6942,7 +6942,7 @@ NODE_DEFINE(DisplacementNode)
return type;
}
DisplacementNode::DisplacementNode() : ShaderNode(get_node_type())
DisplacementNode::DisplacementNode() : ShaderNode(node_type)
{
}
@@ -7001,7 +7001,7 @@ NODE_DEFINE(VectorDisplacementNode)
return type;
}
VectorDisplacementNode::VectorDisplacementNode() : ShaderNode(get_node_type())
VectorDisplacementNode::VectorDisplacementNode() : ShaderNode(node_type)
{
}

View File

@@ -82,7 +82,7 @@ NODE_DEFINE(Object)
{
NodeType *type = NodeType::add("object", create);
SOCKET_NODE(geometry, "Geometry", Geometry::get_node_base_type());
SOCKET_NODE(geometry, "Geometry", &Geometry::node_base_type);
SOCKET_TRANSFORM(tfm, "Transform", transform_identity());
SOCKET_UINT(visibility, "Visibility", ~0);
SOCKET_COLOR(color, "Color", zero_float3());
@@ -98,13 +98,13 @@ NODE_DEFINE(Object)
SOCKET_BOOLEAN(is_shadow_catcher, "Shadow Catcher", false);
SOCKET_NODE(particle_system, "Particle System", ParticleSystem::get_node_type());
SOCKET_NODE(particle_system, "Particle System", &ParticleSystem::node_type);
SOCKET_INT(particle_index, "Particle Index", 0);
return type;
}
Object::Object() : Node(get_node_type())
Object::Object() : Node(node_type)
{
particle_system = NULL;
particle_index = 0;

View File

@@ -36,7 +36,7 @@ NODE_DEFINE(ParticleSystem)
return type;
}
ParticleSystem::ParticleSystem() : Node(get_node_type())
ParticleSystem::ParticleSystem() : Node(node_type)
{
}

View File

@@ -91,7 +91,6 @@ DeviceScene::DeviceScene(Device *device)
Scene::Scene(const SceneParams &params_, Device *device)
: name("Scene"),
bvh(NULL),
default_surface(NULL),
default_volume(NULL),
default_light(NULL),
@@ -100,36 +99,37 @@ Scene::Scene(const SceneParams &params_, Device *device)
device(device),
dscene(device),
params(params_),
update_stats(NULL),
kernels_loaded(false),
/* TODO(sergey): Check if it's indeed optimal value for the split kernel. */
max_closure_global(1)
update_stats(NULL)
{
memset((void *)&dscene.data, 0, sizeof(dscene.data));
bvh = NULL;
camera = create_node<Camera>();
dicing_camera = create_node<Camera>();
lookup_tables = new LookupTables();
film = create_node<Film>();
background = create_node<Background>();
light_manager = new LightManager();
geometry_manager = new GeometryManager();
object_manager = new ObjectManager();
integrator = create_node<Integrator>();
image_manager = new ImageManager(device->info);
particle_system_manager = new ParticleSystemManager();
bake_manager = new BakeManager();
procedural_manager = new ProceduralManager();
kernels_loaded = false;
/* TODO(sergey): Check if it's indeed optimal value for the split kernel. */
max_closure_global = 1;
film->add_default(this);
/* OSL only works on the CPU */
if (device->info.has_osl)
shader_manager = ShaderManager::create(params.shadingsystem);
else
shader_manager = ShaderManager::create(SHADINGSYSTEM_SVM);
light_manager = new LightManager();
geometry_manager = new GeometryManager();
object_manager = new ObjectManager();
image_manager = new ImageManager(device->info);
particle_system_manager = new ParticleSystemManager();
bake_manager = new BakeManager();
procedural_manager = new ProceduralManager();
/* Create nodes after managers, since create_node() can tag the managers. */
camera = create_node<Camera>();
dicing_camera = create_node<Camera>();
lookup_tables = new LookupTables();
film = create_node<Film>();
background = create_node<Background>();
integrator = create_node<Integrator>();
film->add_default(this);
shader_manager->add_default(this);
}

View File

@@ -194,7 +194,7 @@ NODE_DEFINE(Shader)
return type;
}
Shader::Shader() : Node(get_node_type())
Shader::Shader() : Node(node_type)
{
pass_id = 0;
@@ -245,7 +245,7 @@ bool Shader::is_constant_emission(float3 *emission)
return false;
}
if (surf->link->parent->type == EmissionNode::get_node_type()) {
if (surf->link->parent->type == EmissionNode::node_type) {
EmissionNode *node = (EmissionNode *)surf->link->parent;
assert(node->input("Color"));
@@ -257,7 +257,7 @@ bool Shader::is_constant_emission(float3 *emission)
*emission = node->get_color() * node->get_strength();
}
else if (surf->link->parent->type == BackgroundNode::get_node_type()) {
else if (surf->link->parent->type == BackgroundNode::node_type) {
BackgroundNode *node = (BackgroundNode *)surf->link->parent;
assert(node->input("Color"));
@@ -831,8 +831,7 @@ static bool to_scene_linear_transform(OCIO::ConstConfigRcPtr &config,
void ShaderManager::init_xyz_transforms()
{
/* Default to ITU-BT.709 in case no appropriate transform found.
* Note XYZ here is defined as having a D65 white point. */
/* Default to ITU-BT.709 in case no appropriate transform found. */
xyz_to_r = make_float3(3.2404542f, -1.5371385f, -0.4985314f);
xyz_to_g = make_float3(-0.9692660f, 1.8760108f, 0.0415560f);
xyz_to_b = make_float3(0.0556434f, -0.2040259f, 1.0572252f);
@@ -849,27 +848,24 @@ void ShaderManager::init_xyz_transforms()
if (config->hasRole("aces_interchange")) {
/* Standard OpenColorIO role, defined as ACES2065-1. */
const Transform xyz_E_to_aces = make_transform(1.0498110175f,
0.0f,
-0.0000974845f,
0.0f,
-0.4959030231f,
1.3733130458f,
0.0982400361f,
0.0f,
0.0f,
0.0f,
0.9912520182f,
0.0f);
const Transform xyz_D65_to_E = make_transform(
1.0521111f, 0.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.9184170f, 0.0f);
const Transform xyz_to_aces = make_transform(1.0498110175f,
0.0f,
-0.0000974845f,
0.0f,
-0.4959030231f,
1.3733130458f,
0.0982400361f,
0.0f,
0.0f,
0.0f,
0.9912520182f,
0.0f);
Transform aces_to_rgb;
if (!to_scene_linear_transform(config, "aces_interchange", aces_to_rgb)) {
return;
}
xyz_to_rgb = aces_to_rgb * xyz_E_to_aces * xyz_D65_to_E;
xyz_to_rgb = aces_to_rgb * xyz_to_aces;
}
else if (config->hasRole("XYZ")) {
/* Custom role used before the standard existed. */

View File

@@ -36,7 +36,7 @@ CCL_NAMESPACE_BEGIN
NODE_DEFINE(Volume)
{
NodeType *type = NodeType::add("volume", create, NodeType::NONE, Mesh::get_node_type());
NodeType *type = NodeType::add("volume", create, NodeType::NONE, Mesh::node_type);
SOCKET_FLOAT(clipping, "Clipping", 0.001f);
SOCKET_FLOAT(step_size, "Step Size", 0.0f);
@@ -45,7 +45,7 @@ NODE_DEFINE(Volume)
return type;
}
Volume::Volume() : Mesh(get_node_type(), Geometry::VOLUME)
Volume::Volume() : Mesh(node_type, Geometry::VOLUME)
{
clipping = 0.001f;
step_size = 0.0f;

View File

@@ -278,7 +278,7 @@ GHOST_TSuccess GHOST_ContextCGL::initializeDrawingContext()
#ifdef GHOST_WAIT_FOR_VSYNC
{
GLint swapInt = 1;
/* Wait for vertical-sync, to avoid tearing artifacts. */
/* wait for vsync, to avoid tearing artifacts */
[m_openGLContext setValues:&swapInt forParameter:NSOpenGLCPSwapInterval];
}
#endif

View File

@@ -386,11 +386,13 @@ extern "C" int GHOST_HACK_getFirstFile(char buf[FIRSTFILEBUFLG])
- (id)init
{
self = [super init];
NSNotificationCenter *center = [NSNotificationCenter defaultCenter];
[center addObserver:self
selector:@selector(windowWillClose:)
name:NSWindowWillCloseNotification
object:nil];
if (self) {
NSNotificationCenter *center = [NSNotificationCenter defaultCenter];
[center addObserver:self
selector:@selector(windowWillClose:)
name:NSWindowWillCloseNotification
object:nil];
}
return self;
}
@@ -561,96 +563,97 @@ GHOST_TSuccess GHOST_SystemCocoa::init()
SetFrontProcess(&psn);
}*/
@autoreleasepool {
[NSApplication sharedApplication]; // initializes NSApp
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
[NSApplication sharedApplication]; // initializes NSApp
if ([NSApp mainMenu] == nil) {
NSMenu *mainMenubar = [[NSMenu alloc] init];
NSMenuItem *menuItem;
NSMenu *windowMenu;
NSMenu *appMenu;
if ([NSApp mainMenu] == nil) {
NSMenu *mainMenubar = [[NSMenu alloc] init];
NSMenuItem *menuItem;
NSMenu *windowMenu;
NSMenu *appMenu;
// Create the application menu
appMenu = [[NSMenu alloc] initWithTitle:@"Blender"];
// Create the application menu
appMenu = [[NSMenu alloc] initWithTitle:@"Blender"];
[appMenu addItemWithTitle:@"About Blender"
action:@selector(orderFrontStandardAboutPanel:)
keyEquivalent:@""];
[appMenu addItem:[NSMenuItem separatorItem]];
[appMenu addItemWithTitle:@"About Blender"
action:@selector(orderFrontStandardAboutPanel:)
keyEquivalent:@""];
[appMenu addItem:[NSMenuItem separatorItem]];
menuItem = [appMenu addItemWithTitle:@"Hide Blender"
action:@selector(hide:)
keyEquivalent:@"h"];
[menuItem setKeyEquivalentModifierMask:NSEventModifierFlagCommand];
menuItem = [appMenu addItemWithTitle:@"Hide Blender"
action:@selector(hide:)
keyEquivalent:@"h"];
[menuItem setKeyEquivalentModifierMask:NSEventModifierFlagCommand];
menuItem = [appMenu addItemWithTitle:@"Hide Others"
action:@selector(hideOtherApplications:)
keyEquivalent:@"h"];
[menuItem
setKeyEquivalentModifierMask:(NSEventModifierFlagOption | NSEventModifierFlagCommand)];
menuItem = [appMenu addItemWithTitle:@"Hide Others"
action:@selector(hideOtherApplications:)
keyEquivalent:@"h"];
[menuItem
setKeyEquivalentModifierMask:(NSEventModifierFlagOption | NSEventModifierFlagCommand)];
[appMenu addItemWithTitle:@"Show All"
action:@selector(unhideAllApplications:)
keyEquivalent:@""];
[appMenu addItemWithTitle:@"Show All"
action:@selector(unhideAllApplications:)
keyEquivalent:@""];
menuItem = [appMenu addItemWithTitle:@"Quit Blender"
action:@selector(terminate:)
keyEquivalent:@"q"];
[menuItem setKeyEquivalentModifierMask:NSEventModifierFlagCommand];
menuItem = [appMenu addItemWithTitle:@"Quit Blender"
action:@selector(terminate:)
keyEquivalent:@"q"];
[menuItem setKeyEquivalentModifierMask:NSEventModifierFlagCommand];
menuItem = [[NSMenuItem alloc] init];
[menuItem setSubmenu:appMenu];
menuItem = [[NSMenuItem alloc] init];
[menuItem setSubmenu:appMenu];
[mainMenubar addItem:menuItem];
[menuItem release];
[NSApp performSelector:@selector(setAppleMenu:) withObject:appMenu]; // Needed for 10.5
[appMenu release];
[mainMenubar addItem:menuItem];
[menuItem release];
[NSApp performSelector:@selector(setAppleMenu:) withObject:appMenu]; // Needed for 10.5
[appMenu release];
// Create the window menu
windowMenu = [[NSMenu alloc] initWithTitle:@"Window"];
// Create the window menu
windowMenu = [[NSMenu alloc] initWithTitle:@"Window"];
menuItem = [windowMenu addItemWithTitle:@"Minimize"
action:@selector(performMiniaturize:)
keyEquivalent:@"m"];
[menuItem setKeyEquivalentModifierMask:NSEventModifierFlagCommand];
menuItem = [windowMenu addItemWithTitle:@"Minimize"
action:@selector(performMiniaturize:)
keyEquivalent:@"m"];
[menuItem setKeyEquivalentModifierMask:NSEventModifierFlagCommand];
[windowMenu addItemWithTitle:@"Zoom" action:@selector(performZoom:) keyEquivalent:@""];
[windowMenu addItemWithTitle:@"Zoom" action:@selector(performZoom:) keyEquivalent:@""];
menuItem = [windowMenu addItemWithTitle:@"Enter Full Screen"
action:@selector(toggleFullScreen:)
keyEquivalent:@"f"];
[menuItem
setKeyEquivalentModifierMask:NSEventModifierFlagControl | NSEventModifierFlagCommand];
menuItem = [windowMenu addItemWithTitle:@"Enter Full Screen"
action:@selector(toggleFullScreen:)
keyEquivalent:@"f"];
[menuItem
setKeyEquivalentModifierMask:NSEventModifierFlagControl | NSEventModifierFlagCommand];
menuItem = [windowMenu addItemWithTitle:@"Close"
action:@selector(performClose:)
keyEquivalent:@"w"];
[menuItem setKeyEquivalentModifierMask:NSEventModifierFlagCommand];
menuItem = [windowMenu addItemWithTitle:@"Close"
action:@selector(performClose:)
keyEquivalent:@"w"];
[menuItem setKeyEquivalentModifierMask:NSEventModifierFlagCommand];
menuItem = [[NSMenuItem alloc] init];
[menuItem setSubmenu:windowMenu];
menuItem = [[NSMenuItem alloc] init];
[menuItem setSubmenu:windowMenu];
[mainMenubar addItem:menuItem];
[menuItem release];
[mainMenubar addItem:menuItem];
[menuItem release];
[NSApp setMainMenu:mainMenubar];
[NSApp setWindowsMenu:windowMenu];
[windowMenu release];
}
if ([NSApp delegate] == nil) {
CocoaAppDelegate *appDelegate = [[CocoaAppDelegate alloc] init];
[appDelegate setSystemCocoa:this];
[NSApp setDelegate:appDelegate];
}
// AppKit provides automatic window tabbing. Blender is a single-tabbed application
// without a macOS tab bar, and should explicitly opt-out of this. This is also
// controlled by the macOS user default #NSWindowTabbingEnabled.
NSWindow.allowsAutomaticWindowTabbing = NO;
[NSApp finishLaunching];
[NSApp setMainMenu:mainMenubar];
[NSApp setWindowsMenu:windowMenu];
[windowMenu release];
}
if ([NSApp delegate] == nil) {
CocoaAppDelegate *appDelegate = [[CocoaAppDelegate alloc] init];
[appDelegate setSystemCocoa:this];
[NSApp setDelegate:appDelegate];
}
// AppKit provides automatic window tabbing. Blender is a single-tabbed application without a
// macOS tab bar, and should explicitly opt-out of this. This is also controlled by the macOS
// user default #NSWindowTabbingEnabled.
NSWindow.allowsAutomaticWindowTabbing = NO;
[NSApp finishLaunching];
[pool drain];
}
return success;
}
@@ -673,26 +676,30 @@ GHOST_TUns8 GHOST_SystemCocoa::getNumDisplays() const
{
// Note that OS X supports monitor hot plug
// We do not support multiple monitors at the moment
@autoreleasepool {
return NSScreen.screens.count;
}
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
GHOST_TUns8 count = [[NSScreen screens] count];
[pool drain];
return count;
}
void GHOST_SystemCocoa::getMainDisplayDimensions(GHOST_TUns32 &width, GHOST_TUns32 &height) const
{
@autoreleasepool {
// Get visible frame, that is frame excluding dock and top menu bar
NSRect frame = [[NSScreen mainScreen] visibleFrame];
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
// Get visible frame, that is frame excluding dock and top menu bar
NSRect frame = [[NSScreen mainScreen] visibleFrame];
// Returns max window contents (excluding title bar...)
NSRect contentRect = [NSWindow
contentRectForFrameRect:frame
styleMask:(NSWindowStyleMaskTitled | NSWindowStyleMaskClosable |
NSWindowStyleMaskMiniaturizable)];
// Returns max window contents (excluding title bar...)
NSRect contentRect = [NSWindow
contentRectForFrameRect:frame
styleMask:(NSWindowStyleMaskTitled | NSWindowStyleMaskClosable |
NSWindowStyleMaskMiniaturizable)];
width = contentRect.size.width;
height = contentRect.size.height;
}
width = contentRect.size.width;
height = contentRect.size.height;
[pool drain];
}
void GHOST_SystemCocoa::getAllDisplayDimensions(GHOST_TUns32 &width, GHOST_TUns32 &height) const
@@ -713,52 +720,53 @@ GHOST_IWindow *GHOST_SystemCocoa::createWindow(const char *title,
const bool is_dialog,
const GHOST_IWindow *parentWindow)
{
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
GHOST_IWindow *window = NULL;
@autoreleasepool {
// Get the available rect for including window contents
NSRect frame = [[NSScreen mainScreen] visibleFrame];
NSRect contentRect = [NSWindow
contentRectForFrameRect:frame
styleMask:(NSWindowStyleMaskTitled | NSWindowStyleMaskClosable |
NSWindowStyleMaskMiniaturizable)];
// Get the available rect for including window contents
NSRect frame = [[NSScreen mainScreen] visibleFrame];
NSRect contentRect = [NSWindow
contentRectForFrameRect:frame
styleMask:(NSWindowStyleMaskTitled | NSWindowStyleMaskClosable |
NSWindowStyleMaskMiniaturizable)];
GHOST_TInt32 bottom = (contentRect.size.height - 1) - height - top;
GHOST_TInt32 bottom = (contentRect.size.height - 1) - height - top;
// Ensures window top left is inside this available rect
left = left > contentRect.origin.x ? left : contentRect.origin.x;
// Add contentRect.origin.y to respect docksize
bottom = bottom > contentRect.origin.y ? bottom + contentRect.origin.y : contentRect.origin.y;
// Ensures window top left is inside this available rect
left = left > contentRect.origin.x ? left : contentRect.origin.x;
// Add contentRect.origin.y to respect docksize
bottom = bottom > contentRect.origin.y ? bottom + contentRect.origin.y : contentRect.origin.y;
window = new GHOST_WindowCocoa(this,
title,
left,
bottom,
width,
height,
state,
type,
glSettings.flags & GHOST_glStereoVisual,
glSettings.flags & GHOST_glDebugContext,
is_dialog,
(GHOST_WindowCocoa *)parentWindow);
window = new GHOST_WindowCocoa(this,
title,
left,
bottom,
width,
height,
state,
type,
glSettings.flags & GHOST_glStereoVisual,
glSettings.flags & GHOST_glDebugContext,
is_dialog,
(GHOST_WindowCocoa *)parentWindow);
if (window->getValid()) {
// Store the pointer to the window
GHOST_ASSERT(m_windowManager, "m_windowManager not initialized");
m_windowManager->addWindow(window);
m_windowManager->setActiveWindow(window);
/* Need to tell window manager the new window is the active one
* (Cocoa does not send the event activate upon window creation). */
pushEvent(new GHOST_Event(getMilliSeconds(), GHOST_kEventWindowActivate, window));
pushEvent(new GHOST_Event(getMilliSeconds(), GHOST_kEventWindowSize, window));
}
else {
GHOST_PRINT("GHOST_SystemCocoa::createWindow(): window invalid\n");
delete window;
window = NULL;
}
if (window->getValid()) {
// Store the pointer to the window
GHOST_ASSERT(m_windowManager, "m_windowManager not initialized");
m_windowManager->addWindow(window);
m_windowManager->setActiveWindow(window);
/* Need to tell window manager the new window is the active one
* (Cocoa does not send the event activate upon window creation). */
pushEvent(new GHOST_Event(getMilliSeconds(), GHOST_kEventWindowActivate, window));
pushEvent(new GHOST_Event(getMilliSeconds(), GHOST_kEventWindowSize, window));
}
else {
GHOST_PRINT("GHOST_SystemCocoa::createWindow(): window invalid\n");
delete window;
window = NULL;
}
[pool drain];
return window;
}
@@ -833,28 +841,29 @@ GHOST_TSuccess GHOST_SystemCocoa::setMouseCursorPosition(GHOST_TInt32 x, GHOST_T
if (!window)
return GHOST_kFailure;
@autoreleasepool {
NSScreen *windowScreen = window->getScreen();
NSRect screenRect = [windowScreen frame];
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
NSScreen *windowScreen = window->getScreen();
NSRect screenRect = [windowScreen frame];
// Set position relative to current screen
xf -= screenRect.origin.x;
yf -= screenRect.origin.y;
// Set position relative to current screen
xf -= screenRect.origin.x;
yf -= screenRect.origin.y;
// Quartz Display Services uses the old coordinates (top left origin)
yf = screenRect.size.height - yf;
// Quartz Display Services uses the old coordinates (top left origin)
yf = screenRect.size.height - yf;
CGDisplayMoveCursorToPoint((CGDirectDisplayID)[[[windowScreen deviceDescription]
objectForKey:@"NSScreenNumber"] unsignedIntValue],
CGPointMake(xf, yf));
CGDisplayMoveCursorToPoint((CGDirectDisplayID)[[[windowScreen deviceDescription]
objectForKey:@"NSScreenNumber"] unsignedIntValue],
CGPointMake(xf, yf));
// See https://stackoverflow.com/a/17559012. By default, hardware events
// will be suppressed for 500ms after a synthetic mouse event. For unknown
// reasons CGEventSourceSetLocalEventsSuppressionInterval does not work,
// however calling CGAssociateMouseAndMouseCursorPosition also removes the
// delay, even if this is undocumented.
CGAssociateMouseAndMouseCursorPosition(true);
}
// See https://stackoverflow.com/a/17559012. By default, hardware events
// will be suppressed for 500ms after a synthetic mouse event. For unknown
// reasons CGEventSourceSetLocalEventsSuppressionInterval does not work,
// however calling CGAssociateMouseAndMouseCursorPosition also removes the
// delay, even if this is undocumented.
CGAssociateMouseAndMouseCursorPosition(true);
[pool drain];
return GHOST_kSuccess;
}
@@ -919,40 +928,42 @@ bool GHOST_SystemCocoa::processEvents(bool waitForEvent)
}
#endif
do {
@autoreleasepool {
event = [NSApp nextEventMatchingMask:NSEventMaskAny
untilDate:[NSDate distantPast]
inMode:NSDefaultRunLoopMode
dequeue:YES];
if (event == nil) {
break;
}
anyProcessed = true;
// Send event to NSApp to ensure Mac wide events are handled,
// this will send events to CocoaWindow which will call back
// to handleKeyEvent, handleMouseEvent and handleTabletEvent
// There is on special exception for ctrl+(shift)+tab. We do not
// get keyDown events delivered to the view because they are
// special hotkeys to switch between views, so override directly
if ([event type] == NSEventTypeKeyDown && [event keyCode] == kVK_Tab &&
([event modifierFlags] & NSEventModifierFlagControl)) {
handleKeyEvent(event);
}
else {
// For some reason NSApp is swallowing the key up events when modifier
// key is pressed, even if there seems to be no apparent reason to do
// so, as a workaround we always handle these up events.
if ([event type] == NSEventTypeKeyUp &&
([event modifierFlags] & (NSEventModifierFlagCommand | NSEventModifierFlagOption)))
handleKeyEvent(event);
[NSApp sendEvent:event];
}
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
event = [NSApp nextEventMatchingMask:NSEventMaskAny
untilDate:[NSDate distantPast]
inMode:NSDefaultRunLoopMode
dequeue:YES];
if (event == nil) {
[pool drain];
break;
}
anyProcessed = true;
// Send event to NSApp to ensure Mac wide events are handled,
// this will send events to CocoaWindow which will call back
// to handleKeyEvent, handleMouseEvent and handleTabletEvent
// There is on special exception for ctrl+(shift)+tab. We do not
// get keyDown events delivered to the view because they are
// special hotkeys to switch between views, so override directly
if ([event type] == NSEventTypeKeyDown && [event keyCode] == kVK_Tab &&
([event modifierFlags] & NSEventModifierFlagControl)) {
handleKeyEvent(event);
}
else {
// For some reason NSApp is swallowing the key up events when modifier
// key is pressed, even if there seems to be no apparent reason to do
// so, as a workaround we always handle these up events.
if ([event type] == NSEventTypeKeyUp &&
([event modifierFlags] & (NSEventModifierFlagCommand | NSEventModifierFlagOption)))
handleKeyEvent(event);
[NSApp sendEvent:event];
}
[pool drain];
} while (event != nil);
#if 0
} while (waitForEvent && !anyProcessed); // Needed only for timer implementation
@@ -1666,8 +1677,10 @@ GHOST_TSuccess GHOST_SystemCocoa::handleMouseEvent(void *eventPtr)
NSEventPhase momentumPhase = NSEventPhaseNone;
NSEventPhase phase = NSEventPhaseNone;
momentumPhase = [event momentumPhase];
phase = [event phase];
if ([event respondsToSelector:@selector(momentumPhase)])
momentumPhase = [event momentumPhase];
if ([event respondsToSelector:@selector(phase)])
phase = [event phase];
/* when pressing a key while momentum scrolling continues after
* lifting fingers off the trackpad, the action can unexpectedly
@@ -1940,48 +1953,78 @@ GHOST_TUns8 *GHOST_SystemCocoa::getClipboard(bool selection) const
GHOST_TUns8 *temp_buff;
size_t pastedTextSize;
@autoreleasepool {
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
NSPasteboard *pasteBoard = [NSPasteboard generalPasteboard];
NSPasteboard *pasteBoard = [NSPasteboard generalPasteboard];
NSString *textPasted = [pasteBoard stringForType:NSStringPboardType];
if (pasteBoard == nil) {
[pool drain];
return NULL;
}
if (textPasted == nil) {
return NULL;
}
NSArray *supportedTypes = [NSArray arrayWithObjects:NSStringPboardType, nil];
pastedTextSize = [textPasted lengthOfBytesUsingEncoding:NSUTF8StringEncoding];
NSString *bestType = [[NSPasteboard generalPasteboard] availableTypeFromArray:supportedTypes];
temp_buff = (GHOST_TUns8 *)malloc(pastedTextSize + 1);
if (bestType == nil) {
[pool drain];
return NULL;
}
if (temp_buff == NULL) {
return NULL;
}
NSString *textPasted = [pasteBoard stringForType:NSStringPboardType];
strncpy(
(char *)temp_buff, [textPasted cStringUsingEncoding:NSUTF8StringEncoding], pastedTextSize);
if (textPasted == nil) {
[pool drain];
return NULL;
}
temp_buff[pastedTextSize] = '\0';
pastedTextSize = [textPasted lengthOfBytesUsingEncoding:NSUTF8StringEncoding];
if (temp_buff) {
return temp_buff;
}
else {
return NULL;
}
temp_buff = (GHOST_TUns8 *)malloc(pastedTextSize + 1);
if (temp_buff == NULL) {
[pool drain];
return NULL;
}
strncpy(
(char *)temp_buff, [textPasted cStringUsingEncoding:NSUTF8StringEncoding], pastedTextSize);
temp_buff[pastedTextSize] = '\0';
[pool drain];
if (temp_buff) {
return temp_buff;
}
else {
return NULL;
}
}
void GHOST_SystemCocoa::putClipboard(GHOST_TInt8 *buffer, bool selection) const
{
NSString *textToCopy;
if (selection)
return; // for copying the selection, used on X11
@autoreleasepool {
NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
NSPasteboard *pasteBoard = NSPasteboard.generalPasteboard;
[pasteBoard declareTypes:@[ NSStringPboardType ] owner:nil];
NSString *textToCopy = [NSString stringWithCString:buffer encoding:NSUTF8StringEncoding];
[pasteBoard setString:textToCopy forType:NSStringPboardType];
NSPasteboard *pasteBoard = [NSPasteboard generalPasteboard];
if (pasteBoard == nil) {
[pool drain];
return;
}
NSArray *supportedTypes = [NSArray arrayWithObject:NSStringPboardType];
[pasteBoard declareTypes:supportedTypes owner:nil];
textToCopy = [NSString stringWithCString:buffer encoding:NSUTF8StringEncoding];
[pasteBoard setString:textToCopy forType:NSStringPboardType];
[pool drain];
}

View File

@@ -34,9 +34,9 @@
#ifdef WITH_X11_XINPUT
# include <X11/extensions/XInput.h>
/* Disable XINPUT warp, currently not implemented by Xorg for multi-head display.
* (see comment in XSERVER `Xi/xiwarppointer.c` -> `FIXME: panoramix stuff is missing` ~ v1.13.4)
* If this is supported we can add back XINPUT for warping (fixing T48901).
/* Disable xinput warp, currently not implemented by Xorg for multi-head display.
* (see comment in xserver "Xi/xiwarppointer.c" -> "FIXME: panoramix stuff is missing" ~ v1.13.4)
* If this is supported we can add back xinput for warping (fixing T48901).
* For now disable (see T50383). */
// # define USE_X11_XINPUT_WARP
#endif

View File

@@ -92,11 +92,16 @@ GHOST_WindowWin32::GHOST_WindowWin32(GHOST_SystemWin32 *system,
{
wchar_t *title_16 = alloc_utf16_from_8((char *)title, 0);
RECT win_rect = {left, top, (long)(left + width), (long)(top + height)};
RECT parent_rect = {0, 0, 0, 0};
// Initialize tablet variables
memset(&m_wintab, 0, sizeof(m_wintab));
m_tabletData = GHOST_TABLET_DATA_NONE;
if (parentwindow) {
GetWindowRect(m_parentWindowHwnd, &parent_rect);
}
DWORD style = parentwindow ?
WS_POPUPWINDOW | WS_CAPTION | WS_MAXIMIZEBOX | WS_MINIMIZEBOX | WS_SIZEBOX :
WS_OVERLAPPEDWINDOW;
@@ -119,7 +124,9 @@ GHOST_WindowWin32::GHOST_WindowWin32(GHOST_SystemWin32 *system,
MONITORINFOEX monitor;
monitor.cbSize = sizeof(MONITORINFOEX);
monitor.dwFlags = 0;
GetMonitorInfo(MonitorFromRect(&win_rect, MONITOR_DEFAULTTONEAREST), &monitor);
GetMonitorInfo(
MonitorFromRect(parentwindow ? &parent_rect : &win_rect, MONITOR_DEFAULTTONEAREST),
&monitor);
/* Adjust our requested size to allow for caption and borders and constrain to monitor. */
AdjustWindowRectEx(&win_rect, WS_CAPTION, FALSE, 0);
@@ -1193,7 +1200,7 @@ GHOST_TSuccess GHOST_WindowWin32::setWindowCustomCursorShape(GHOST_TUns8 *bitmap
GHOST_TUns32 fullBitRow, fullMaskRow;
int x, y, cols;
cols = sizeX / 8; /* Number of whole bytes per row (width of bitmap/mask). */
cols = sizeX / 8; /* Number of whole bytes per row (width of bm/mask). */
if (sizeX % 8)
cols++;

View File

@@ -77,7 +77,6 @@ if(WITH_GTESTS)
set(TEST_SRC
tests/guardedalloc_alignment_test.cc
tests/guardedalloc_overflow_test.cc
tests/guardedalloc_test_base.h
)
set(TEST_INC
../../source/blender/blenlib

View File

@@ -1,34 +1,2 @@
BasedOnStyle: Google
ColumnLimit: 80
Standard: Cpp11
# Indent nested preprocessor.
# #ifdef Foo
# # include <nested>
# #endif
IndentPPDirectives: AfterHash
# For the cases when namespace is closing with a wrong comment
FixNamespaceComments: true
AllowShortFunctionsOnASingleLine: InlineOnly
AllowShortBlocksOnASingleLine: false
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: true
# No bin packing, every argument is on its own line.
BinPackArguments: false
BinPackParameters: false
# Ensure pointer alignment.
# ObjectType* object;
PointerAlignment: Left
DerivePointerAlignment: false
AlignEscapedNewlines: Right
IncludeBlocks: Preserve
SortIncludes: true
DisableFormat: true
SortIncludes: false

View File

@@ -22,15 +22,15 @@
#include "intern/utildefines.h"
#include "libmv/autotrack/autotrack.h"
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
using mv::AutoTrack;
using mv::FrameAccessor;
using mv::Marker;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* frame_accessor) {
return (libmv_AutoTrack*)LIBMV_OBJECT_NEW(AutoTrack,
(FrameAccessor*)frame_accessor);
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor) {
return (libmv_AutoTrack*) LIBMV_OBJECT_NEW(AutoTrack,
(FrameAccessor*) frame_accessor);
}
void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack) {
@@ -39,7 +39,7 @@ void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack) {
void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
const libmv_AutoTrackOptions* options) {
AutoTrack* autotrack = ((AutoTrack*)libmv_autotrack);
AutoTrack *autotrack = ((AutoTrack*) libmv_autotrack);
libmv_configureTrackRegionOptions(options->track_region,
&autotrack->options.track_region);
@@ -51,15 +51,18 @@ void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_TrackRegionOptions* libmv_options,
libmv_Marker* libmv_tracked_marker,
libmv_Marker *libmv_tracked_marker,
libmv_TrackRegionResult* libmv_result) {
Marker tracked_marker;
TrackRegionOptions options;
TrackRegionResult result;
libmv_apiMarkerToMarker(*libmv_tracked_marker, &tracked_marker);
libmv_configureTrackRegionOptions(*libmv_options, &options);
bool ok = (((AutoTrack*)libmv_autotrack)
->TrackMarker(&tracked_marker, &result, &options));
libmv_configureTrackRegionOptions(*libmv_options,
&options);
bool ok = (((AutoTrack*) libmv_autotrack)->TrackMarker(&tracked_marker,
&result,
&options));
libmv_markerToApiMarker(tracked_marker, libmv_tracked_marker);
libmv_regionTrackergetResult(result, libmv_result);
return ok && result.is_usable();
@@ -69,7 +72,7 @@ void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_Marker* libmv_marker) {
Marker marker;
libmv_apiMarkerToMarker(*libmv_marker, &marker);
((AutoTrack*)libmv_autotrack)->AddMarker(marker);
((AutoTrack*) libmv_autotrack)->AddMarker(marker);
}
void libmv_autoTrackSetMarkers(libmv_AutoTrack* libmv_autotrack,
@@ -84,17 +87,19 @@ void libmv_autoTrackSetMarkers(libmv_AutoTrack* libmv_autotrack,
for (size_t i = 0; i < num_markers; ++i) {
libmv_apiMarkerToMarker(libmv_marker[i], &markers[i]);
}
((AutoTrack*)libmv_autotrack)->SetMarkers(&markers);
((AutoTrack*) libmv_autotrack)->SetMarkers(&markers);
}
int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
int clip,
int frame,
int track,
libmv_Marker* libmv_marker) {
libmv_Marker *libmv_marker) {
Marker marker;
int ok =
((AutoTrack*)libmv_autotrack)->GetMarker(clip, frame, track, &marker);
int ok = ((AutoTrack*) libmv_autotrack)->GetMarker(clip,
frame,
track,
&marker);
if (ok) {
libmv_markerToApiMarker(marker, libmv_marker);
}

View File

@@ -21,9 +21,9 @@
#define LIBMV_C_API_AUTOTRACK_H_
#include "intern/frame_accessor.h"
#include "intern/region.h"
#include "intern/track_region.h"
#include "intern/tracksN.h"
#include "intern/track_region.h"
#include "intern/region.h"
#ifdef __cplusplus
extern "C" {
@@ -36,7 +36,7 @@ typedef struct libmv_AutoTrackOptions {
libmv_Region search_region;
} libmv_AutoTrackOptions;
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* frame_accessor);
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor);
void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack);
@@ -45,7 +45,7 @@ void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_TrackRegionOptions* libmv_options,
libmv_Marker* libmv_tracker_marker,
libmv_Marker *libmv_tracker_marker,
libmv_TrackRegionResult* libmv_result);
void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
@@ -59,7 +59,7 @@ int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
int clip,
int frame,
int track,
libmv_Marker* libmv_marker);
libmv_Marker *libmv_marker);
#ifdef __cplusplus
}

View File

@@ -21,56 +21,62 @@
#include "intern/utildefines.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
using libmv::BrownCameraIntrinsics;
using libmv::CameraIntrinsics;
using libmv::DivisionCameraIntrinsics;
using libmv::NukeCameraIntrinsics;
using libmv::PolynomialCameraIntrinsics;
using libmv::NukeCameraIntrinsics;
using libmv::BrownCameraIntrinsics;
libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options) {
CameraIntrinsics* camera_intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
return (libmv_CameraIntrinsics*)camera_intrinsics;
CameraIntrinsics *camera_intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
return (libmv_CameraIntrinsics *) camera_intrinsics;
}
libmv_CameraIntrinsics* libmv_cameraIntrinsicsCopy(
libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* libmv_intrinsics) {
const CameraIntrinsics* orig_intrinsics =
(const CameraIntrinsics*)libmv_intrinsics;
const CameraIntrinsics *orig_intrinsics =
(const CameraIntrinsics *) libmv_intrinsics;
CameraIntrinsics* new_intrinsics = NULL;
CameraIntrinsics *new_intrinsics = NULL;
switch (orig_intrinsics->GetDistortionModelType()) {
case libmv::DISTORTION_MODEL_POLYNOMIAL: {
const PolynomialCameraIntrinsics* polynomial_intrinsics =
case libmv::DISTORTION_MODEL_POLYNOMIAL:
{
const PolynomialCameraIntrinsics *polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics*>(orig_intrinsics);
new_intrinsics =
LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics, *polynomial_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_DIVISION: {
const DivisionCameraIntrinsics* division_intrinsics =
new_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics,
*polynomial_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_DIVISION:
{
const DivisionCameraIntrinsics *division_intrinsics =
static_cast<const DivisionCameraIntrinsics*>(orig_intrinsics);
new_intrinsics =
LIBMV_OBJECT_NEW(DivisionCameraIntrinsics, *division_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_NUKE: {
const NukeCameraIntrinsics* nuke_intrinsics =
new_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics,
*division_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_NUKE:
{
const NukeCameraIntrinsics *nuke_intrinsics =
static_cast<const NukeCameraIntrinsics*>(orig_intrinsics);
new_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics, *nuke_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_BROWN: {
const BrownCameraIntrinsics* brown_intrinsics =
new_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics,
*nuke_intrinsics);
break;
}
case libmv::DISTORTION_MODEL_BROWN:
{
const BrownCameraIntrinsics *brown_intrinsics =
static_cast<const BrownCameraIntrinsics*>(orig_intrinsics);
new_intrinsics =
LIBMV_OBJECT_NEW(BrownCameraIntrinsics, *brown_intrinsics);
break;
}
default: assert(!"Unknown distortion model");
new_intrinsics = LIBMV_OBJECT_NEW(BrownCameraIntrinsics,
*brown_intrinsics);
break;
}
default:
assert(!"Unknown distortion model");
}
return (libmv_CameraIntrinsics*)new_intrinsics;
return (libmv_CameraIntrinsics *) new_intrinsics;
}
void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics) {
@@ -80,7 +86,7 @@ void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics) {
void libmv_cameraIntrinsicsUpdate(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_CameraIntrinsics* libmv_intrinsics) {
CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
double focal_length = libmv_camera_intrinsics_options->focal_length;
double principal_x = libmv_camera_intrinsics_options->principal_point_x;
@@ -109,173 +115,191 @@ void libmv_cameraIntrinsicsUpdate(
}
switch (libmv_camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_POLYNOMIAL);
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_POLYNOMIAL);
PolynomialCameraIntrinsics* polynomial_intrinsics =
(PolynomialCameraIntrinsics*)camera_intrinsics;
PolynomialCameraIntrinsics *polynomial_intrinsics =
(PolynomialCameraIntrinsics *) camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->polynomial_k1;
double k2 = libmv_camera_intrinsics_options->polynomial_k2;
double k3 = libmv_camera_intrinsics_options->polynomial_k3;
double k1 = libmv_camera_intrinsics_options->polynomial_k1;
double k2 = libmv_camera_intrinsics_options->polynomial_k2;
double k3 = libmv_camera_intrinsics_options->polynomial_k3;
if (polynomial_intrinsics->k1() != k1 ||
polynomial_intrinsics->k2() != k2 ||
polynomial_intrinsics->k3() != k3) {
polynomial_intrinsics->SetRadialDistortion(k1, k2, k3);
}
break;
}
case LIBMV_DISTORTION_MODEL_DIVISION: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_DIVISION);
DivisionCameraIntrinsics* division_intrinsics =
(DivisionCameraIntrinsics*)camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->division_k1;
double k2 = libmv_camera_intrinsics_options->division_k2;
if (division_intrinsics->k1() != k1 || division_intrinsics->k2() != k2) {
division_intrinsics->SetDistortion(k1, k2);
if (polynomial_intrinsics->k1() != k1 ||
polynomial_intrinsics->k2() != k2 ||
polynomial_intrinsics->k3() != k3) {
polynomial_intrinsics->SetRadialDistortion(k1, k2, k3);
}
break;
}
break;
}
case LIBMV_DISTORTION_MODEL_DIVISION:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_DIVISION);
case LIBMV_DISTORTION_MODEL_NUKE: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_NUKE);
DivisionCameraIntrinsics *division_intrinsics =
(DivisionCameraIntrinsics *) camera_intrinsics;
NukeCameraIntrinsics* nuke_intrinsics =
(NukeCameraIntrinsics*)camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->division_k1;
double k2 = libmv_camera_intrinsics_options->division_k2;
double k1 = libmv_camera_intrinsics_options->nuke_k1;
double k2 = libmv_camera_intrinsics_options->nuke_k2;
if (division_intrinsics->k1() != k1 ||
division_intrinsics->k2() != k2) {
division_intrinsics->SetDistortion(k1, k2);
}
if (nuke_intrinsics->k1() != k1 || nuke_intrinsics->k2() != k2) {
nuke_intrinsics->SetDistortion(k1, k2);
break;
}
break;
}
case LIBMV_DISTORTION_MODEL_NUKE:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_NUKE);
case LIBMV_DISTORTION_MODEL_BROWN: {
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_BROWN);
NukeCameraIntrinsics *nuke_intrinsics =
(NukeCameraIntrinsics *) camera_intrinsics;
BrownCameraIntrinsics* brown_intrinsics =
(BrownCameraIntrinsics*)camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->nuke_k1;
double k2 = libmv_camera_intrinsics_options->nuke_k2;
double k1 = libmv_camera_intrinsics_options->brown_k1;
double k2 = libmv_camera_intrinsics_options->brown_k2;
double k3 = libmv_camera_intrinsics_options->brown_k3;
double k4 = libmv_camera_intrinsics_options->brown_k4;
if (nuke_intrinsics->k1() != k1 ||
nuke_intrinsics->k2() != k2) {
nuke_intrinsics->SetDistortion(k1, k2);
}
if (brown_intrinsics->k1() != k1 || brown_intrinsics->k2() != k2 ||
brown_intrinsics->k3() != k3 || brown_intrinsics->k4() != k4) {
brown_intrinsics->SetRadialDistortion(k1, k2, k3, k4);
break;
}
double p1 = libmv_camera_intrinsics_options->brown_p1;
double p2 = libmv_camera_intrinsics_options->brown_p2;
case LIBMV_DISTORTION_MODEL_BROWN:
{
assert(camera_intrinsics->GetDistortionModelType() ==
libmv::DISTORTION_MODEL_BROWN);
if (brown_intrinsics->p1() != p1 || brown_intrinsics->p2() != p2) {
brown_intrinsics->SetTangentialDistortion(p1, p2);
BrownCameraIntrinsics *brown_intrinsics =
(BrownCameraIntrinsics *) camera_intrinsics;
double k1 = libmv_camera_intrinsics_options->brown_k1;
double k2 = libmv_camera_intrinsics_options->brown_k2;
double k3 = libmv_camera_intrinsics_options->brown_k3;
double k4 = libmv_camera_intrinsics_options->brown_k4;
if (brown_intrinsics->k1() != k1 ||
brown_intrinsics->k2() != k2 ||
brown_intrinsics->k3() != k3 ||
brown_intrinsics->k4() != k4) {
brown_intrinsics->SetRadialDistortion(k1, k2, k3, k4);
}
double p1 = libmv_camera_intrinsics_options->brown_p1;
double p2 = libmv_camera_intrinsics_options->brown_p2;
if (brown_intrinsics->p1() != p1 || brown_intrinsics->p2() != p2) {
brown_intrinsics->SetTangentialDistortion(p1, p2);
}
break;
}
break;
}
default: assert(!"Unknown distortion model");
default:
assert(!"Unknown distortion model");
}
}
void libmv_cameraIntrinsicsSetThreads(libmv_CameraIntrinsics* libmv_intrinsics,
int threads) {
CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
camera_intrinsics->SetThreads(threads);
}
void libmv_cameraIntrinsicsExtractOptions(
const libmv_CameraIntrinsics* libmv_intrinsics,
libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
const CameraIntrinsics* camera_intrinsics =
(const CameraIntrinsics*)libmv_intrinsics;
const CameraIntrinsics *camera_intrinsics =
(const CameraIntrinsics *) libmv_intrinsics;
// Fill in options which are common for all distortion models.
camera_intrinsics_options->focal_length = camera_intrinsics->focal_length();
camera_intrinsics_options->principal_point_x =
camera_intrinsics->principal_point_x();
camera_intrinsics->principal_point_x();
camera_intrinsics_options->principal_point_y =
camera_intrinsics->principal_point_y();
camera_intrinsics->principal_point_y();
camera_intrinsics_options->image_width = camera_intrinsics->image_width();
camera_intrinsics_options->image_height = camera_intrinsics->image_height();
switch (camera_intrinsics->GetDistortionModelType()) {
case libmv::DISTORTION_MODEL_POLYNOMIAL: {
const PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
case libmv::DISTORTION_MODEL_POLYNOMIAL:
{
const PolynomialCameraIntrinsics *polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_POLYNOMIAL;
camera_intrinsics_options->polynomial_k1 = polynomial_intrinsics->k1();
camera_intrinsics_options->polynomial_k2 = polynomial_intrinsics->k2();
camera_intrinsics_options->polynomial_k3 = polynomial_intrinsics->k3();
camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p1();
camera_intrinsics_options->polynomial_p2 = polynomial_intrinsics->p2();
break;
}
camera_intrinsics_options->polynomial_k1 = polynomial_intrinsics->k1();
camera_intrinsics_options->polynomial_k2 = polynomial_intrinsics->k2();
camera_intrinsics_options->polynomial_k3 = polynomial_intrinsics->k3();
camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p1();
camera_intrinsics_options->polynomial_p2 = polynomial_intrinsics->p2();
break;
}
case libmv::DISTORTION_MODEL_DIVISION: {
const DivisionCameraIntrinsics* division_intrinsics =
static_cast<const DivisionCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
case libmv::DISTORTION_MODEL_DIVISION:
{
const DivisionCameraIntrinsics *division_intrinsics =
static_cast<const DivisionCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_DIVISION;
camera_intrinsics_options->division_k1 = division_intrinsics->k1();
camera_intrinsics_options->division_k2 = division_intrinsics->k2();
break;
}
camera_intrinsics_options->division_k1 = division_intrinsics->k1();
camera_intrinsics_options->division_k2 = division_intrinsics->k2();
break;
}
case libmv::DISTORTION_MODEL_NUKE: {
const NukeCameraIntrinsics* nuke_intrinsics =
static_cast<const NukeCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_NUKE;
camera_intrinsics_options->nuke_k1 = nuke_intrinsics->k1();
camera_intrinsics_options->nuke_k2 = nuke_intrinsics->k2();
break;
}
case libmv::DISTORTION_MODEL_NUKE:
{
const NukeCameraIntrinsics *nuke_intrinsics =
static_cast<const NukeCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_NUKE;
camera_intrinsics_options->nuke_k1 = nuke_intrinsics->k1();
camera_intrinsics_options->nuke_k2 = nuke_intrinsics->k2();
break;
}
case libmv::DISTORTION_MODEL_BROWN: {
const BrownCameraIntrinsics* brown_intrinsics =
static_cast<const BrownCameraIntrinsics*>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
case libmv::DISTORTION_MODEL_BROWN:
{
const BrownCameraIntrinsics *brown_intrinsics =
static_cast<const BrownCameraIntrinsics *>(camera_intrinsics);
camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_BROWN;
camera_intrinsics_options->brown_k1 = brown_intrinsics->k1();
camera_intrinsics_options->brown_k2 = brown_intrinsics->k2();
camera_intrinsics_options->brown_k3 = brown_intrinsics->k3();
camera_intrinsics_options->brown_k4 = brown_intrinsics->k4();
camera_intrinsics_options->brown_p1 = brown_intrinsics->p1();
camera_intrinsics_options->brown_p2 = brown_intrinsics->p2();
break;
}
camera_intrinsics_options->brown_k1 = brown_intrinsics->k1();
camera_intrinsics_options->brown_k2 = brown_intrinsics->k2();
camera_intrinsics_options->brown_k3 = brown_intrinsics->k3();
camera_intrinsics_options->brown_k4 = brown_intrinsics->k4();
camera_intrinsics_options->brown_p1 = brown_intrinsics->p1();
camera_intrinsics_options->brown_p2 = brown_intrinsics->p2();
break;
}
default: assert(!"Unknown distortion model");
default:
assert(!"Unknown distortion model");
}
}
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char* source_image,
const unsigned char *source_image,
int width,
int height,
float overscan,
int channels,
unsigned char* destination_image) {
CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
camera_intrinsics->UndistortBuffer(
source_image, width, height, overscan, channels, destination_image);
CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
camera_intrinsics->UndistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
}
void libmv_cameraIntrinsicsUndistortFloat(
@@ -286,22 +310,28 @@ void libmv_cameraIntrinsicsUndistortFloat(
float overscan,
int channels,
float* destination_image) {
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->UndistortBuffer(
source_image, width, height, overscan, channels, destination_image);
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->UndistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
}
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char* source_image,
const unsigned char *source_image,
int width,
int height,
float overscan,
int channels,
unsigned char* destination_image) {
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->DistortBuffer(
source_image, width, height, overscan, channels, destination_image);
unsigned char *destination_image) {
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->DistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
}
void libmv_cameraIntrinsicsDistortFloat(
@@ -312,9 +342,12 @@ void libmv_cameraIntrinsicsDistortFloat(
float overscan,
int channels,
float* destination_image) {
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->DistortBuffer(
source_image, width, height, overscan, channels, destination_image);
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->DistortBuffer(source_image,
width, height,
overscan,
channels,
destination_image);
}
void libmv_cameraIntrinsicsApply(
@@ -323,7 +356,7 @@ void libmv_cameraIntrinsicsApply(
double y,
double* x1,
double* y1) {
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->ApplyIntrinsics(x, y, x1, y1);
}
@@ -333,7 +366,7 @@ void libmv_cameraIntrinsicsInvert(
double y,
double* x1,
double* y1) {
CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
intrinsics->InvertIntrinsics(x, y, x1, y1);
}
@@ -348,63 +381,69 @@ static void libmv_cameraIntrinsicsFillFromOptions(
camera_intrinsics_options->principal_point_y);
camera_intrinsics->SetImageSize(camera_intrinsics_options->image_width,
camera_intrinsics_options->image_height);
camera_intrinsics_options->image_height);
switch (camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL: {
PolynomialCameraIntrinsics* polynomial_intrinsics =
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
{
PolynomialCameraIntrinsics *polynomial_intrinsics =
static_cast<PolynomialCameraIntrinsics*>(camera_intrinsics);
polynomial_intrinsics->SetRadialDistortion(
camera_intrinsics_options->polynomial_k1,
camera_intrinsics_options->polynomial_k2,
camera_intrinsics_options->polynomial_k3);
polynomial_intrinsics->SetRadialDistortion(
camera_intrinsics_options->polynomial_k1,
camera_intrinsics_options->polynomial_k2,
camera_intrinsics_options->polynomial_k3);
break;
}
break;
}
case LIBMV_DISTORTION_MODEL_DIVISION: {
DivisionCameraIntrinsics* division_intrinsics =
case LIBMV_DISTORTION_MODEL_DIVISION:
{
DivisionCameraIntrinsics *division_intrinsics =
static_cast<DivisionCameraIntrinsics*>(camera_intrinsics);
division_intrinsics->SetDistortion(
camera_intrinsics_options->division_k1,
camera_intrinsics_options->division_k2);
break;
}
division_intrinsics->SetDistortion(
camera_intrinsics_options->division_k1,
camera_intrinsics_options->division_k2);
break;
}
case LIBMV_DISTORTION_MODEL_NUKE: {
NukeCameraIntrinsics* nuke_intrinsics =
case LIBMV_DISTORTION_MODEL_NUKE:
{
NukeCameraIntrinsics *nuke_intrinsics =
static_cast<NukeCameraIntrinsics*>(camera_intrinsics);
nuke_intrinsics->SetDistortion(camera_intrinsics_options->nuke_k1,
camera_intrinsics_options->nuke_k2);
break;
}
nuke_intrinsics->SetDistortion(
camera_intrinsics_options->nuke_k1,
camera_intrinsics_options->nuke_k2);
break;
}
case LIBMV_DISTORTION_MODEL_BROWN: {
BrownCameraIntrinsics* brown_intrinsics =
case LIBMV_DISTORTION_MODEL_BROWN:
{
BrownCameraIntrinsics *brown_intrinsics =
static_cast<BrownCameraIntrinsics*>(camera_intrinsics);
brown_intrinsics->SetRadialDistortion(
camera_intrinsics_options->brown_k1,
camera_intrinsics_options->brown_k2,
camera_intrinsics_options->brown_k3,
camera_intrinsics_options->brown_k4);
brown_intrinsics->SetTangentialDistortion(
brown_intrinsics->SetRadialDistortion(
camera_intrinsics_options->brown_k1,
camera_intrinsics_options->brown_k2,
camera_intrinsics_options->brown_k3,
camera_intrinsics_options->brown_k4);
brown_intrinsics->SetTangentialDistortion(
camera_intrinsics_options->brown_p1,
camera_intrinsics_options->brown_p2);
break;
}
break;
}
default: assert(!"Unknown distortion model");
default:
assert(!"Unknown distortion model");
}
}
CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
const libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
CameraIntrinsics* camera_intrinsics = NULL;
CameraIntrinsics *camera_intrinsics = NULL;
switch (camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
camera_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics);
@@ -418,7 +457,8 @@ CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
case LIBMV_DISTORTION_MODEL_BROWN:
camera_intrinsics = LIBMV_OBJECT_NEW(BrownCameraIntrinsics);
break;
default: assert(!"Unknown distortion model");
default:
assert(!"Unknown distortion model");
}
libmv_cameraIntrinsicsFillFromOptions(camera_intrinsics_options,
camera_intrinsics);

View File

@@ -56,10 +56,10 @@ typedef struct libmv_CameraIntrinsicsOptions {
double brown_p1, brown_p2;
} libmv_CameraIntrinsicsOptions;
libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options);
libmv_CameraIntrinsics* libmv_cameraIntrinsicsCopy(
libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* libmv_intrinsics);
void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics);
@@ -76,7 +76,7 @@ void libmv_cameraIntrinsicsExtractOptions(
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char* source_image,
const unsigned char *source_image,
int width,
int height,
float overscan,
@@ -94,12 +94,12 @@ void libmv_cameraIntrinsicsUndistortFloat(
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* libmv_intrinsics,
const unsigned char* source_image,
const unsigned char *source_image,
int width,
int height,
float overscan,
int channels,
unsigned char* destination_image);
unsigned char *destination_image);
void libmv_cameraIntrinsicsDistortFloat(
const libmv_CameraIntrinsics* libmv_intrinsics,
@@ -131,7 +131,7 @@ void libmv_cameraIntrinsicsInvert(
#ifdef __cplusplus
namespace libmv {
class CameraIntrinsics;
class CameraIntrinsics;
}
libmv::CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(

View File

@@ -34,7 +34,7 @@ struct libmv_Features {
namespace {
libmv_Features* libmv_featuresFromVector(
libmv_Features *libmv_featuresFromVector(
const libmv::vector<Feature>& features) {
libmv_Features* libmv_features = LIBMV_STRUCT_NEW(libmv_Features, 1);
int count = features.size();
@@ -50,12 +50,12 @@ libmv_Features* libmv_featuresFromVector(
return libmv_features;
}
void libmv_convertDetectorOptions(libmv_DetectOptions* options,
DetectOptions* detector_options) {
void libmv_convertDetectorOptions(libmv_DetectOptions *options,
DetectOptions *detector_options) {
switch (options->detector) {
#define LIBMV_CONVERT(the_detector) \
case LIBMV_DETECTOR_##the_detector: \
detector_options->type = DetectOptions::the_detector; \
#define LIBMV_CONVERT(the_detector) \
case LIBMV_DETECTOR_ ## the_detector: \
detector_options->type = DetectOptions::the_detector; \
break;
LIBMV_CONVERT(FAST)
LIBMV_CONVERT(MORAVEC)
@@ -72,7 +72,7 @@ void libmv_convertDetectorOptions(libmv_DetectOptions* options,
} // namespace
libmv_Features* libmv_detectFeaturesByte(const unsigned char* image_buffer,
libmv_Features *libmv_detectFeaturesByte(const unsigned char* image_buffer,
int width,
int height,
int channels,
@@ -133,7 +133,7 @@ void libmv_getFeature(const libmv_Features* libmv_features,
double* y,
double* score,
double* size) {
Feature& feature = libmv_features->features[number];
Feature &feature = libmv_features->features[number];
*x = feature.x;
*y = feature.y;
*score = feature.score;

View File

@@ -38,7 +38,7 @@ typedef struct libmv_DetectOptions {
int min_distance;
int fast_min_trackness;
int moravec_max_count;
unsigned char* moravec_pattern;
unsigned char *moravec_pattern;
double harris_threshold;
} libmv_DetectOptions;

View File

@@ -36,18 +36,20 @@ struct LibmvFrameAccessor : public FrameAccessor {
libmv_ReleaseImageCallback release_image_callback,
libmv_GetMaskForTrackCallback get_mask_for_track_callback,
libmv_ReleaseMaskCallback release_mask_callback)
: user_data_(user_data),
get_image_callback_(get_image_callback),
release_image_callback_(release_image_callback),
get_mask_for_track_callback_(get_mask_for_track_callback),
release_mask_callback_(release_mask_callback) {}
: user_data_(user_data),
get_image_callback_(get_image_callback),
release_image_callback_(release_image_callback),
get_mask_for_track_callback_(get_mask_for_track_callback),
release_mask_callback_(release_mask_callback) { }
virtual ~LibmvFrameAccessor() {}
virtual ~LibmvFrameAccessor() {
}
libmv_InputMode get_libmv_input_mode(InputMode input_mode) {
switch (input_mode) {
#define CHECK_INPUT_MODE(mode) \
case mode: return LIBMV_IMAGE_MODE_##mode;
#define CHECK_INPUT_MODE(mode) \
case mode: \
return LIBMV_IMAGE_MODE_ ## mode;
CHECK_INPUT_MODE(MONO)
CHECK_INPUT_MODE(RGBA)
#undef CHECK_INPUT_MODE
@@ -57,7 +59,8 @@ struct LibmvFrameAccessor : public FrameAccessor {
return LIBMV_IMAGE_MODE_MONO;
}
void get_libmv_region(const Region& region, libmv_Region* libmv_region) {
void get_libmv_region(const Region& region,
libmv_Region* libmv_region) {
libmv_region->min[0] = region.min(0);
libmv_region->min[1] = region.min(1);
libmv_region->max[0] = region.max(0);
@@ -71,7 +74,7 @@ struct LibmvFrameAccessor : public FrameAccessor {
const Region* region,
const Transform* transform,
FloatImage* destination) {
float* float_buffer;
float *float_buffer;
int width, height, channels;
libmv_Region libmv_region;
if (region) {
@@ -83,41 +86,46 @@ struct LibmvFrameAccessor : public FrameAccessor {
get_libmv_input_mode(input_mode),
downscale,
region != NULL ? &libmv_region : NULL,
(libmv_FrameTransform*)transform,
(libmv_FrameTransform*) transform,
&float_buffer,
&width,
&height,
&channels);
// TODO(sergey): Dumb code for until we can set data directly.
FloatImage temp_image(float_buffer, height, width, channels);
FloatImage temp_image(float_buffer,
height,
width,
channels);
destination->CopyFrom(temp_image);
return cache_key;
}
void ReleaseImage(Key cache_key) { release_image_callback_(cache_key); }
void ReleaseImage(Key cache_key) {
release_image_callback_(cache_key);
}
Key GetMaskForTrack(int clip,
int frame,
int track,
const Region* region,
FloatImage* destination) {
float* float_buffer;
float *float_buffer;
int width, height;
libmv_Region libmv_region;
if (region) {
get_libmv_region(*region, &libmv_region);
}
Key cache_key =
get_mask_for_track_callback_(user_data_,
clip,
frame,
track,
region != NULL ? &libmv_region : NULL,
&float_buffer,
&width,
&height);
Key cache_key = get_mask_for_track_callback_(
user_data_,
clip,
frame,
track,
region != NULL ? &libmv_region : NULL,
&float_buffer,
&width,
&height);
if (cache_key == NULL) {
// No mask for the given track.
@@ -125,21 +133,30 @@ struct LibmvFrameAccessor : public FrameAccessor {
}
// TODO(sergey): Dumb code for until we can set data directly.
FloatImage temp_image(float_buffer, height, width, 1);
FloatImage temp_image(float_buffer,
height,
width,
1);
destination->CopyFrom(temp_image);
return cache_key;
}
void ReleaseMask(Key key) { release_mask_callback_(key); }
void ReleaseMask(Key key) {
release_mask_callback_(key);
}
bool GetClipDimensions(int /*clip*/, int* /*width*/, int* /*height*/) {
bool GetClipDimensions(int /*clip*/, int * /*width*/, int * /*height*/) {
return false;
}
int NumClips() { return 1; }
int NumClips() {
return 1;
}
int NumFrames(int /*clip*/) { return 0; }
int NumFrames(int /*clip*/) {
return 0;
}
libmv_FrameAccessorUserData* user_data_;
libmv_GetImageCallback get_image_callback_;
@@ -156,35 +173,35 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_ReleaseImageCallback release_image_callback,
libmv_GetMaskForTrackCallback get_mask_for_track_callback,
libmv_ReleaseMaskCallback release_mask_callback) {
return (libmv_FrameAccessor*)LIBMV_OBJECT_NEW(LibmvFrameAccessor,
user_data,
get_image_callback,
release_image_callback,
get_mask_for_track_callback,
release_mask_callback);
return (libmv_FrameAccessor*) LIBMV_OBJECT_NEW(LibmvFrameAccessor,
user_data,
get_image_callback,
release_image_callback,
get_mask_for_track_callback,
release_mask_callback);
}
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor) {
LIBMV_OBJECT_DELETE(frame_accessor, LibmvFrameAccessor);
}
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform* transform) {
return ((FrameAccessor::Transform*)transform)->key();
int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform) {
return ((FrameAccessor::Transform*) transform)->key();
}
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* transform,
const libmv_FloatImage* input_image,
libmv_FloatImage* output_image) {
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
const libmv_FloatImage *input_image,
libmv_FloatImage *output_image) {
const FloatImage input(input_image->buffer,
input_image->height,
input_image->width,
input_image->channels);
FloatImage output;
((FrameAccessor::Transform*)transform)->run(input, &output);
((FrameAccessor::Transform*) transform)->run(input,
&output);
int num_pixels = output.Width() * output.Height() * output.Depth();
int num_pixels = output.Width() *output.Height() * output.Depth();
output_image->buffer = new float[num_pixels];
memcpy(output_image->buffer, output.Data(), num_pixels * sizeof(float));
output_image->width = output.Width();

View File

@@ -32,14 +32,14 @@ extern "C" {
typedef struct libmv_FrameAccessor libmv_FrameAccessor;
typedef struct libmv_FrameTransform libmv_FrameTransform;
typedef struct libmv_FrameAccessorUserData libmv_FrameAccessorUserData;
typedef void* libmv_CacheKey;
typedef void *libmv_CacheKey;
typedef enum {
LIBMV_IMAGE_MODE_MONO,
LIBMV_IMAGE_MODE_RGBA,
} libmv_InputMode;
typedef libmv_CacheKey (*libmv_GetImageCallback)(
typedef libmv_CacheKey (*libmv_GetImageCallback) (
libmv_FrameAccessorUserData* user_data,
int clip,
int frame,
@@ -52,9 +52,9 @@ typedef libmv_CacheKey (*libmv_GetImageCallback)(
int* height,
int* channels);
typedef void (*libmv_ReleaseImageCallback)(libmv_CacheKey cache_key);
typedef void (*libmv_ReleaseImageCallback) (libmv_CacheKey cache_key);
typedef libmv_CacheKey (*libmv_GetMaskForTrackCallback)(
typedef libmv_CacheKey (*libmv_GetMaskForTrackCallback) (
libmv_FrameAccessorUserData* user_data,
int clip,
int frame,
@@ -63,7 +63,7 @@ typedef libmv_CacheKey (*libmv_GetMaskForTrackCallback)(
float** destination,
int* width,
int* height);
typedef void (*libmv_ReleaseMaskCallback)(libmv_CacheKey cache_key);
typedef void (*libmv_ReleaseMaskCallback) (libmv_CacheKey cache_key);
libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_FrameAccessorUserData* user_data,
@@ -73,12 +73,11 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_ReleaseMaskCallback release_mask_callback);
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor);
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform* transform);
int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform);
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* transform,
const libmv_FloatImage* input_image,
libmv_FloatImage* output_image);
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
const libmv_FloatImage *input_image,
libmv_FloatImage *output_image);
#ifdef __cplusplus
}
#endif

View File

@@ -41,8 +41,10 @@ void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
LG << "x2: " << x2_mat;
libmv::EstimateHomographyOptions options;
libmv::EstimateHomography2DFromCorrespondences(
x1_mat, x2_mat, options, &H_mat);
libmv::EstimateHomography2DFromCorrespondences(x1_mat,
x2_mat,
options,
&H_mat);
LG << "H: " << H_mat;

View File

@@ -21,14 +21,14 @@
#include "intern/utildefines.h"
#include "libmv/tracking/track_region.h"
#include <png.h>
#include <cassert>
#include <png.h>
using libmv::FloatImage;
using libmv::SamplePlanarPatch;
void libmv_floatImageDestroy(libmv_FloatImage* image) {
delete[] image->buffer;
void libmv_floatImageDestroy(libmv_FloatImage *image) {
delete [] image->buffer;
}
/* Image <-> buffers conversion */
@@ -63,7 +63,8 @@ void libmv_floatBufferToFloatImage(const float* buffer,
}
}
void libmv_floatImageToFloatBuffer(const FloatImage& image, float* buffer) {
void libmv_floatImageToFloatBuffer(const FloatImage &image,
float* buffer) {
for (int y = 0, a = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int k = 0; k < image.Depth(); k++) {
@@ -73,9 +74,9 @@ void libmv_floatImageToFloatBuffer(const FloatImage& image, float* buffer) {
}
}
void libmv_floatImageToByteBuffer(const libmv::FloatImage& image,
void libmv_floatImageToByteBuffer(const libmv::FloatImage &image,
unsigned char* buffer) {
for (int y = 0, a = 0; y < image.Height(); y++) {
for (int y = 0, a= 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int k = 0; k < image.Depth(); k++) {
buffer[a++] = image(y, x, k) * 255.0f;
@@ -92,7 +93,7 @@ static bool savePNGImage(png_bytep* row_pointers,
const char* file_name) {
png_infop info_ptr;
png_structp png_ptr;
FILE* fp = fopen(file_name, "wb");
FILE *fp = fopen(file_name, "wb");
if (fp == NULL) {
return false;
@@ -152,7 +153,7 @@ bool libmv_saveImage(const FloatImage& image,
int x0,
int y0) {
int x, y;
png_bytep* row_pointers;
png_bytep *row_pointers;
assert(image.Depth() == 1);
@@ -179,8 +180,9 @@ bool libmv_saveImage(const FloatImage& image,
static int image_counter = 0;
char file_name[128];
snprintf(
file_name, sizeof(file_name), "%s_%02d.png", prefix, ++image_counter);
snprintf(file_name, sizeof(file_name),
"%s_%02d.png",
prefix, ++image_counter);
bool result = savePNGImage(row_pointers,
image.Width(),
image.Height(),
@@ -189,9 +191,9 @@ bool libmv_saveImage(const FloatImage& image,
file_name);
for (y = 0; y < image.Height(); y++) {
delete[] row_pointers[y];
delete [] row_pointers[y];
}
delete[] row_pointers;
delete [] row_pointers;
return result;
}
@@ -209,7 +211,7 @@ void libmv_samplePlanarPatchFloat(const float* image,
double* warped_position_x,
double* warped_position_y) {
FloatImage libmv_image, libmv_patch, libmv_mask;
FloatImage* libmv_mask_for_sample = NULL;
FloatImage *libmv_mask_for_sample = NULL;
libmv_floatBufferToFloatImage(image, width, height, channels, &libmv_image);
@@ -219,10 +221,8 @@ void libmv_samplePlanarPatchFloat(const float* image,
}
SamplePlanarPatch(libmv_image,
xs,
ys,
num_samples_x,
num_samples_y,
xs, ys,
num_samples_x, num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,
@@ -232,19 +232,19 @@ void libmv_samplePlanarPatchFloat(const float* image,
}
void libmv_samplePlanarPatchByte(const unsigned char* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y) {
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y) {
libmv::FloatImage libmv_image, libmv_patch, libmv_mask;
libmv::FloatImage* libmv_mask_for_sample = NULL;
libmv::FloatImage *libmv_mask_for_sample = NULL;
libmv_byteBufferToFloatImage(image, width, height, channels, &libmv_image);
@@ -254,10 +254,8 @@ void libmv_samplePlanarPatchByte(const unsigned char* image,
}
libmv::SamplePlanarPatch(libmv_image,
xs,
ys,
num_samples_x,
num_samples_y,
xs, ys,
num_samples_x, num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,

View File

@@ -35,7 +35,7 @@ void libmv_floatBufferToFloatImage(const float* buffer,
libmv::FloatImage* image);
void libmv_floatImageToFloatBuffer(const libmv::FloatImage& image,
float* buffer);
float *buffer);
void libmv_floatImageToByteBuffer(const libmv::FloatImage& image,
unsigned char* buffer);
@@ -51,13 +51,13 @@ extern "C" {
#endif
typedef struct libmv_FloatImage {
float* buffer;
float *buffer;
int width;
int height;
int channels;
} libmv_FloatImage;
void libmv_floatImageDestroy(libmv_FloatImage* image);
void libmv_floatImageDestroy(libmv_FloatImage *image);
void libmv_samplePlanarPatchFloat(const float* image,
int width,
@@ -72,18 +72,18 @@ void libmv_samplePlanarPatchFloat(const float* image,
double* warped_position_x,
double* warped_position_y);
void libmv_samplePlanarPatchByte(const unsigned char* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y);
void libmv_samplePlanarPatchByte(const unsigned char* image,
int width,
int height,
int channels,
const double* xs,
const double* ys,
int num_samples_x,
int num_samples_y,
const float* mask,
unsigned char* patch,
double* warped_position_x,
double* warped_position_y);
#ifdef __cplusplus
}

View File

@@ -24,8 +24,8 @@
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/bundle.h"
#include "libmv/simple_pipeline/initialize_reconstruction.h"
#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/simple_pipeline/initialize_reconstruction.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include "libmv/simple_pipeline/pipeline.h"
#include "libmv/simple_pipeline/reconstruction_scale.h"
@@ -39,19 +39,19 @@ using libmv::EuclideanScaleToUnity;
using libmv::Marker;
using libmv::ProgressUpdateCallback;
using libmv::PolynomialCameraIntrinsics;
using libmv::Tracks;
using libmv::EuclideanBundle;
using libmv::EuclideanCompleteReconstruction;
using libmv::EuclideanReconstructTwoFrames;
using libmv::EuclideanReprojectionError;
using libmv::PolynomialCameraIntrinsics;
using libmv::Tracks;
struct libmv_Reconstruction {
EuclideanReconstruction reconstruction;
/* Used for per-track average error calculation after reconstruction */
Tracks tracks;
CameraIntrinsics* intrinsics;
CameraIntrinsics *intrinsics;
double error;
bool is_valid;
@@ -63,7 +63,7 @@ class ReconstructUpdateCallback : public ProgressUpdateCallback {
public:
ReconstructUpdateCallback(
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata) {
void *callback_customdata) {
progress_update_callback_ = progress_update_callback;
callback_customdata_ = callback_customdata;
}
@@ -73,14 +73,13 @@ class ReconstructUpdateCallback : public ProgressUpdateCallback {
progress_update_callback_(callback_customdata_, progress, message);
}
}
protected:
reconstruct_progress_update_cb progress_update_callback_;
void* callback_customdata_;
};
void libmv_solveRefineIntrinsics(
const Tracks& tracks,
const Tracks &tracks,
const int refine_intrinsics,
const int bundle_constraints,
reconstruct_progress_update_cb progress_update_callback,
@@ -97,11 +96,11 @@ void libmv_solveRefineIntrinsics(
bundle_intrinsics |= libmv::BUNDLE_PRINCIPAL_POINT;
}
#define SET_DISTORTION_FLAG_CHECKED(type, coefficient) \
do { \
if (refine_intrinsics & LIBMV_REFINE_##type##_DISTORTION_##coefficient) { \
bundle_intrinsics |= libmv::BUNDLE_##type##_##coefficient; \
} \
#define SET_DISTORTION_FLAG_CHECKED(type, coefficient) \
do { \
if (refine_intrinsics & LIBMV_REFINE_ ## type ##_DISTORTION_ ## coefficient) { \
bundle_intrinsics |= libmv::BUNDLE_ ## type ## _ ## coefficient; \
} \
} while (0)
SET_DISTORTION_FLAG_CHECKED(RADIAL, K1);
@@ -124,19 +123,20 @@ void libmv_solveRefineIntrinsics(
}
void finishReconstruction(
const Tracks& tracks,
const CameraIntrinsics& camera_intrinsics,
libmv_Reconstruction* libmv_reconstruction,
const Tracks &tracks,
const CameraIntrinsics &camera_intrinsics,
libmv_Reconstruction *libmv_reconstruction,
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata) {
EuclideanReconstruction& reconstruction =
libmv_reconstruction->reconstruction;
void *callback_customdata) {
EuclideanReconstruction &reconstruction =
libmv_reconstruction->reconstruction;
/* Reprojection error calculation. */
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = tracks;
libmv_reconstruction->error =
EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
libmv_reconstruction->error = EuclideanReprojectionError(tracks,
reconstruction,
camera_intrinsics);
}
bool selectTwoKeyframesBasedOnGRICAndVariance(
@@ -148,8 +148,9 @@ bool selectTwoKeyframesBasedOnGRICAndVariance(
libmv::vector<int> keyframes;
/* Get list of all keyframe candidates first. */
SelectKeyframesBasedOnGRICAndVariance(
normalized_tracks, camera_intrinsics, keyframes);
SelectKeyframesBasedOnGRICAndVariance(normalized_tracks,
camera_intrinsics,
keyframes);
if (keyframes.size() < 2) {
LG << "Not enough keyframes detected by GRIC";
@@ -174,20 +175,24 @@ bool selectTwoKeyframesBasedOnGRICAndVariance(
EuclideanReconstruction reconstruction;
int current_keyframe = keyframes[i];
libmv::vector<Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
current_keyframe);
normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
current_keyframe);
Tracks keyframe_tracks(keyframe_markers);
/* get a solution from two keyframes only */
EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
EuclideanBundle(keyframe_tracks, &reconstruction);
EuclideanCompleteReconstruction(keyframe_tracks, &reconstruction, NULL);
EuclideanCompleteReconstruction(keyframe_tracks,
&reconstruction,
NULL);
double current_error =
EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
double current_error = EuclideanReprojectionError(tracks,
reconstruction,
camera_intrinsics);
LG << "Error between " << previous_keyframe << " and " << current_keyframe
LG << "Error between " << previous_keyframe
<< " and " << current_keyframe
<< ": " << current_error;
if (current_error < best_error) {
@@ -209,49 +214,53 @@ Marker libmv_projectMarker(const EuclideanPoint& point,
projected /= projected(2);
libmv::Marker reprojected_marker;
intrinsics.ApplyIntrinsics(
projected(0), projected(1), &reprojected_marker.x, &reprojected_marker.y);
intrinsics.ApplyIntrinsics(projected(0), projected(1),
&reprojected_marker.x,
&reprojected_marker.y);
reprojected_marker.image = camera.image;
reprojected_marker.track = point.track;
return reprojected_marker;
}
void libmv_getNormalizedTracks(const Tracks& tracks,
const CameraIntrinsics& camera_intrinsics,
Tracks* normalized_tracks) {
void libmv_getNormalizedTracks(const Tracks &tracks,
const CameraIntrinsics &camera_intrinsics,
Tracks *normalized_tracks) {
libmv::vector<Marker> markers = tracks.AllMarkers();
for (int i = 0; i < markers.size(); ++i) {
Marker& marker = markers[i];
camera_intrinsics.InvertIntrinsics(
marker.x, marker.y, &marker.x, &marker.y);
normalized_tracks->Insert(
marker.image, marker.track, marker.x, marker.y, marker.weight);
Marker &marker = markers[i];
camera_intrinsics.InvertIntrinsics(marker.x, marker.y,
&marker.x, &marker.y);
normalized_tracks->Insert(marker.image,
marker.track,
marker.x, marker.y,
marker.weight);
}
}
} // namespace
libmv_Reconstruction* libmv_solveReconstruction(
libmv_Reconstruction *libmv_solveReconstruction(
const libmv_Tracks* libmv_tracks,
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_ReconstructionOptions* libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata) {
libmv_Reconstruction* libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
libmv_Reconstruction *libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
Tracks& tracks = *((Tracks*)libmv_tracks);
EuclideanReconstruction& reconstruction =
libmv_reconstruction->reconstruction;
Tracks &tracks = *((Tracks *) libmv_tracks);
EuclideanReconstruction &reconstruction =
libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
ReconstructUpdateCallback(progress_update_callback,
callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
CameraIntrinsics* camera_intrinsics;
CameraIntrinsics *camera_intrinsics;
camera_intrinsics = libmv_reconstruction->intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
/* Invert the camera intrinsics/ */
Tracks normalized_tracks;
@@ -267,10 +276,10 @@ libmv_Reconstruction* libmv_solveReconstruction(
update_callback.invoke(0, "Selecting keyframes");
if (selectTwoKeyframesBasedOnGRICAndVariance(tracks,
normalized_tracks,
*camera_intrinsics,
keyframe1,
keyframe2)) {
normalized_tracks,
*camera_intrinsics,
keyframe1,
keyframe2)) {
/* so keyframes in the interface would be updated */
libmv_reconstruction_options->keyframe1 = keyframe1;
libmv_reconstruction_options->keyframe2 = keyframe2;
@@ -281,7 +290,7 @@ libmv_Reconstruction* libmv_solveReconstruction(
LG << "frames to init from: " << keyframe1 << " " << keyframe2;
libmv::vector<Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
LG << "number of markers for init: " << keyframe_markers.size();
@@ -300,12 +309,14 @@ libmv_Reconstruction* libmv_solveReconstruction(
}
EuclideanBundle(normalized_tracks, &reconstruction);
EuclideanCompleteReconstruction(
normalized_tracks, &reconstruction, &update_callback);
EuclideanCompleteReconstruction(normalized_tracks,
&reconstruction,
&update_callback);
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(tracks,
libmv_solveRefineIntrinsics(
tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_CONSTRAINTS,
progress_update_callback,
@@ -325,29 +336,31 @@ libmv_Reconstruction* libmv_solveReconstruction(
callback_customdata);
libmv_reconstruction->is_valid = true;
return (libmv_Reconstruction*)libmv_reconstruction;
return (libmv_Reconstruction *) libmv_reconstruction;
}
libmv_Reconstruction* libmv_solveModal(
const libmv_Tracks* libmv_tracks,
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
const libmv_ReconstructionOptions* libmv_reconstruction_options,
libmv_Reconstruction *libmv_solveModal(
const libmv_Tracks *libmv_tracks,
const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
const libmv_ReconstructionOptions *libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata) {
libmv_Reconstruction* libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
void *callback_customdata) {
libmv_Reconstruction *libmv_reconstruction =
LIBMV_OBJECT_NEW(libmv_Reconstruction);
Tracks& tracks = *((Tracks*)libmv_tracks);
EuclideanReconstruction& reconstruction =
libmv_reconstruction->reconstruction;
Tracks &tracks = *((Tracks *) libmv_tracks);
EuclideanReconstruction &reconstruction =
libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
ReconstructUpdateCallback(progress_update_callback, callback_customdata);
ReconstructUpdateCallback(progress_update_callback,
callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
CameraIntrinsics* camera_intrinsics;
CameraIntrinsics *camera_intrinsics;
camera_intrinsics = libmv_reconstruction->intrinsics =
libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
libmv_cameraIntrinsicsCreateFromOptions(
libmv_camera_intrinsics_options);
/* Invert the camera intrinsics. */
Tracks normalized_tracks;
@@ -365,11 +378,11 @@ libmv_Reconstruction* libmv_solveModal(
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
libmv_solveRefineIntrinsics(tracks,
libmv_solveRefineIntrinsics(
tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_TRANSLATION,
progress_update_callback,
callback_customdata,
progress_update_callback, callback_customdata,
&reconstruction,
camera_intrinsics);
}
@@ -382,25 +395,26 @@ libmv_Reconstruction* libmv_solveModal(
callback_customdata);
libmv_reconstruction->is_valid = true;
return (libmv_Reconstruction*)libmv_reconstruction;
return (libmv_Reconstruction *) libmv_reconstruction;
}
int libmv_reconstructionIsValid(libmv_Reconstruction* libmv_reconstruction) {
int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction) {
return libmv_reconstruction->is_valid;
}
void libmv_reconstructionDestroy(libmv_Reconstruction* libmv_reconstruction) {
void libmv_reconstructionDestroy(libmv_Reconstruction *libmv_reconstruction) {
LIBMV_OBJECT_DELETE(libmv_reconstruction->intrinsics, CameraIntrinsics);
LIBMV_OBJECT_DELETE(libmv_reconstruction, libmv_Reconstruction);
}
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction* libmv_reconstruction,
const libmv_Reconstruction *libmv_reconstruction,
int track,
double pos[3]) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanPoint* point = reconstruction->PointForTrack(track);
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanPoint *point =
reconstruction->PointForTrack(track);
if (point) {
pos[0] = point->X[0];
pos[1] = point->X[2];
@@ -411,22 +425,23 @@ int libmv_reprojectionPointForTrack(
}
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction* libmv_reconstruction, int track) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics* intrinsics = libmv_reconstruction->intrinsics;
const libmv_Reconstruction *libmv_reconstruction,
int track) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
libmv::vector<Marker> markers =
libmv_reconstruction->tracks.MarkersForTrack(track);
libmv_reconstruction->tracks.MarkersForTrack(track);
int num_reprojected = 0;
double total_error = 0.0;
for (int i = 0; i < markers.size(); ++i) {
double weight = markers[i].weight;
const EuclideanCamera* camera =
reconstruction->CameraForImage(markers[i].image);
const EuclideanPoint* point =
reconstruction->PointForTrack(markers[i].track);
const EuclideanCamera *camera =
reconstruction->CameraForImage(markers[i].image);
const EuclideanPoint *point =
reconstruction->PointForTrack(markers[i].track);
if (!camera || !point || weight == 0.0) {
continue;
@@ -435,7 +450,7 @@ double libmv_reprojectionErrorForTrack(
num_reprojected++;
Marker reprojected_marker =
libmv_projectMarker(*point, *camera, *intrinsics);
libmv_projectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * weight;
double ey = (reprojected_marker.y - markers[i].y) * weight;
@@ -446,13 +461,14 @@ double libmv_reprojectionErrorForTrack(
}
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction* libmv_reconstruction, int image) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics* intrinsics = libmv_reconstruction->intrinsics;
const libmv_Reconstruction *libmv_reconstruction,
int image) {
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
libmv::vector<Marker> markers =
libmv_reconstruction->tracks.MarkersInImage(image);
const EuclideanCamera* camera = reconstruction->CameraForImage(image);
libmv_reconstruction->tracks.MarkersInImage(image);
const EuclideanCamera *camera = reconstruction->CameraForImage(image);
int num_reprojected = 0;
double total_error = 0.0;
@@ -461,8 +477,8 @@ double libmv_reprojectionErrorForImage(
}
for (int i = 0; i < markers.size(); ++i) {
const EuclideanPoint* point =
reconstruction->PointForTrack(markers[i].track);
const EuclideanPoint *point =
reconstruction->PointForTrack(markers[i].track);
if (!point) {
continue;
@@ -471,7 +487,7 @@ double libmv_reprojectionErrorForImage(
num_reprojected++;
Marker reprojected_marker =
libmv_projectMarker(*point, *camera, *intrinsics);
libmv_projectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * markers[i].weight;
double ey = (reprojected_marker.y - markers[i].y) * markers[i].weight;
@@ -482,12 +498,13 @@ double libmv_reprojectionErrorForImage(
}
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction* libmv_reconstruction,
const libmv_Reconstruction *libmv_reconstruction,
int image,
double mat[4][4]) {
const EuclideanReconstruction* reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanCamera* camera = reconstruction->CameraForImage(image);
const EuclideanReconstruction *reconstruction =
&libmv_reconstruction->reconstruction;
const EuclideanCamera *camera =
reconstruction->CameraForImage(image);
if (camera) {
for (int j = 0; j < 3; ++j) {
@@ -524,11 +541,11 @@ int libmv_reprojectionCameraForImage(
}
double libmv_reprojectionError(
const libmv_Reconstruction* libmv_reconstruction) {
const libmv_Reconstruction *libmv_reconstruction) {
return libmv_reconstruction->error;
}
libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction* libmv_reconstruction) {
return (libmv_CameraIntrinsics*)libmv_reconstruction->intrinsics;
libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction *libmv_reconstruction) {
return (libmv_CameraIntrinsics *) libmv_reconstruction->intrinsics;
}

View File

@@ -31,16 +31,17 @@ struct libmv_CameraIntrinsicsOptions;
typedef struct libmv_Reconstruction libmv_Reconstruction;
enum {
LIBMV_REFINE_FOCAL_LENGTH = (1 << 0),
LIBMV_REFINE_PRINCIPAL_POINT = (1 << 1),
LIBMV_REFINE_FOCAL_LENGTH = (1 << 0),
LIBMV_REFINE_PRINCIPAL_POINT = (1 << 1),
LIBMV_REFINE_RADIAL_DISTORTION_K1 = (1 << 2),
LIBMV_REFINE_RADIAL_DISTORTION_K2 = (1 << 3),
LIBMV_REFINE_RADIAL_DISTORTION_K3 = (1 << 4),
LIBMV_REFINE_RADIAL_DISTORTION_K4 = (1 << 5),
LIBMV_REFINE_RADIAL_DISTORTION =
(LIBMV_REFINE_RADIAL_DISTORTION_K1 | LIBMV_REFINE_RADIAL_DISTORTION_K2 |
LIBMV_REFINE_RADIAL_DISTORTION_K3 | LIBMV_REFINE_RADIAL_DISTORTION_K4),
LIBMV_REFINE_RADIAL_DISTORTION_K1 = (1 << 2),
LIBMV_REFINE_RADIAL_DISTORTION_K2 = (1 << 3),
LIBMV_REFINE_RADIAL_DISTORTION_K3 = (1 << 4),
LIBMV_REFINE_RADIAL_DISTORTION_K4 = (1 << 5),
LIBMV_REFINE_RADIAL_DISTORTION = (LIBMV_REFINE_RADIAL_DISTORTION_K1 |
LIBMV_REFINE_RADIAL_DISTORTION_K2 |
LIBMV_REFINE_RADIAL_DISTORTION_K3 |
LIBMV_REFINE_RADIAL_DISTORTION_K4),
LIBMV_REFINE_TANGENTIAL_DISTORTION_P1 = (1 << 6),
LIBMV_REFINE_TANGENTIAL_DISTORTION_P2 = (1 << 7),
@@ -54,9 +55,9 @@ typedef struct libmv_ReconstructionOptions {
int refine_intrinsics;
} libmv_ReconstructionOptions;
typedef void (*reconstruct_progress_update_cb)(void* customdata,
double progress,
const char* message);
typedef void (*reconstruct_progress_update_cb) (void* customdata,
double progress,
const char* message);
libmv_Reconstruction* libmv_solveReconstruction(
const struct libmv_Tracks* libmv_tracks,
@@ -72,32 +73,35 @@ libmv_Reconstruction* libmv_solveModal(
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata);
int libmv_reconstructionIsValid(libmv_Reconstruction* libmv_reconstruction);
int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction);
void libmv_reconstructionDestroy(libmv_Reconstruction* libmv_reconstruction);
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction* libmv_reconstruction, int track, double pos[3]);
const libmv_Reconstruction* libmv_reconstruction,
int track,
double pos[3]);
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction* libmv_reconstruction, int track);
const libmv_Reconstruction* libmv_reconstruction,
int track);
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction* libmv_reconstruction, int image);
const libmv_Reconstruction* libmv_reconstruction,
int image);
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction* libmv_reconstruction,
int image,
double mat[4][4]);
double libmv_reprojectionError(
const libmv_Reconstruction* libmv_reconstruction);
double libmv_reprojectionError(const libmv_Reconstruction* libmv_reconstruction);
struct libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction* libmv_Reconstruction);
libmv_Reconstruction *libmv_Reconstruction);
#ifdef __cplusplus
}
#endif
#endif // LIBMV_C_API_RECONSTRUCTION_H_
#endif // LIBMV_C_API_RECONSTRUCTION_H_

View File

@@ -24,7 +24,7 @@
/* ************ Logging ************ */
void libmv_initLogging(const char* /*argv0*/) {
void libmv_initLogging(const char * /*argv0*/) {
}
void libmv_startDebugLogging(void) {
@@ -36,18 +36,18 @@ void libmv_setLoggingVerbosity(int /*verbosity*/) {
/* ************ Planar tracker ************ */
/* TrackRegion (new planar tracker) */
int libmv_trackRegion(const libmv_TrackRegionOptions* /*options*/,
const float* /*image1*/,
int libmv_trackRegion(const libmv_TrackRegionOptions * /*options*/,
const float * /*image1*/,
int /*image1_width*/,
int /*image1_height*/,
const float* /*image2*/,
const float * /*image2*/,
int /*image2_width*/,
int /*image2_height*/,
const double* x1,
const double* y1,
libmv_TrackRegionResult* result,
double* x2,
double* y2) {
const double *x1,
const double *y1,
libmv_TrackRegionResult *result,
double *x2,
double *y2) {
/* Convert to doubles for the libmv api. The four corners and the center. */
for (int i = 0; i < 5; ++i) {
x2[i] = x1[i];
@@ -61,46 +61,46 @@ int libmv_trackRegion(const libmv_TrackRegionOptions* /*options*/,
return false;
}
void libmv_samplePlanarPatchFloat(const float* /*image*/,
void libmv_samplePlanarPatchFloat(const float * /*image*/,
int /*width*/,
int /*height*/,
int /*channels*/,
const double* /*xs*/,
const double* /*ys*/,
const double * /*xs*/,
const double * /*ys*/,
int /*num_samples_x*/,
int /*num_samples_y*/,
const float* /*mask*/,
float* /*patch*/,
double* /*warped_position_x*/,
double* /*warped_position_y*/) {
const float * /*mask*/,
float * /*patch*/,
double * /*warped_position_x*/,
double * /*warped_position_y*/) {
/* TODO(sergey): implement */
}
void libmv_samplePlanarPatchByte(const unsigned char* /*image*/,
void libmv_samplePlanarPatchByte(const unsigned char * /*image*/,
int /*width*/,
int /*height*/,
int /*channels*/,
const double* /*xs*/,
const double* /*ys*/,
int /*num_samples_x*/,
int /*num_samples_y*/,
const float* /*mask*/,
unsigned char* /*patch*/,
double* /*warped_position_x*/,
double* /*warped_position_y*/) {
const double * /*xs*/,
const double * /*ys*/,
int /*num_samples_x*/, int /*num_samples_y*/,
const float * /*mask*/,
unsigned char * /*patch*/,
double * /*warped_position_x*/,
double * /*warped_position_y*/) {
/* TODO(sergey): implement */
}
void libmv_floatImageDestroy(libmv_FloatImage* /*image*/) {
void libmv_floatImageDestroy(libmv_FloatImage* /*image*/)
{
}
/* ************ Tracks ************ */
libmv_Tracks* libmv_tracksNew(void) {
libmv_Tracks *libmv_tracksNew(void) {
return NULL;
}
void libmv_tracksInsert(libmv_Tracks* /*libmv_tracks*/,
void libmv_tracksInsert(libmv_Tracks * /*libmv_tracks*/,
int /*image*/,
int /*track*/,
double /*x*/,
@@ -108,152 +108,152 @@ void libmv_tracksInsert(libmv_Tracks* /*libmv_tracks*/,
double /*weight*/) {
}
void libmv_tracksDestroy(libmv_Tracks* /*libmv_tracks*/) {
void libmv_tracksDestroy(libmv_Tracks * /*libmv_tracks*/) {
}
/* ************ Reconstruction solver ************ */
libmv_Reconstruction* libmv_solveReconstruction(
const libmv_Tracks* /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/,
libmv_ReconstructionOptions* /*libmv_reconstruction_options*/,
libmv_Reconstruction *libmv_solveReconstruction(
const libmv_Tracks * /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
reconstruct_progress_update_cb /*progress_update_callback*/,
void* /*callback_customdata*/) {
void * /*callback_customdata*/) {
return NULL;
}
libmv_Reconstruction* libmv_solveModal(
const libmv_Tracks* /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/,
const libmv_ReconstructionOptions* /*libmv_reconstruction_options*/,
libmv_Reconstruction *libmv_solveModal(
const libmv_Tracks * /*libmv_tracks*/,
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
const libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
reconstruct_progress_update_cb /*progress_update_callback*/,
void* /*callback_customdata*/) {
void * /*callback_customdata*/) {
return NULL;
}
int libmv_reconstructionIsValid(
libmv_Reconstruction* /*libmv_reconstruction*/) {
int libmv_reconstructionIsValid(libmv_Reconstruction * /*libmv_reconstruction*/) {
return 0;
}
int libmv_reprojectionPointForTrack(
const libmv_Reconstruction* /*libmv_reconstruction*/,
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*track*/,
double /*pos*/[3]) {
return 0;
}
double libmv_reprojectionErrorForTrack(
const libmv_Reconstruction* /*libmv_reconstruction*/, int /*track*/) {
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*track*/) {
return 0.0;
}
double libmv_reprojectionErrorForImage(
const libmv_Reconstruction* /*libmv_reconstruction*/, int /*image*/) {
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*image*/) {
return 0.0;
}
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction* /*libmv_reconstruction*/,
const libmv_Reconstruction * /*libmv_reconstruction*/,
int /*image*/,
double /*mat*/[4][4]) {
return 0;
}
double libmv_reprojectionError(
const libmv_Reconstruction* /*libmv_reconstruction*/) {
const libmv_Reconstruction * /*libmv_reconstruction*/) {
return 0.0;
}
void libmv_reconstructionDestroy(
struct libmv_Reconstruction* /*libmv_reconstruction*/) {
struct libmv_Reconstruction * /*libmv_reconstruction*/) {
}
/* ************ Feature detector ************ */
libmv_Features* libmv_detectFeaturesByte(const unsigned char* /*image_buffer*/,
libmv_Features *libmv_detectFeaturesByte(const unsigned char * /*image_buffer*/,
int /*width*/,
int /*height*/,
int /*channels*/,
libmv_DetectOptions* /*options*/) {
libmv_DetectOptions * /*options*/) {
return NULL;
}
struct libmv_Features* libmv_detectFeaturesFloat(
const float* /*image_buffer*/,
struct libmv_Features *libmv_detectFeaturesFloat(
const float * /*image_buffer*/,
int /*width*/,
int /*height*/,
int /*channels*/,
libmv_DetectOptions* /*options*/) {
libmv_DetectOptions * /*options*/) {
return NULL;
}
int libmv_countFeatures(const libmv_Features* /*libmv_features*/) {
int libmv_countFeatures(const libmv_Features * /*libmv_features*/) {
return 0;
}
void libmv_getFeature(const libmv_Features* /*libmv_features*/,
void libmv_getFeature(const libmv_Features * /*libmv_features*/,
int /*number*/,
double* x,
double* y,
double* score,
double* size) {
double *x,
double *y,
double *score,
double *size) {
*x = 0.0;
*y = 0.0;
*score = 0.0;
*size = 0.0;
}
void libmv_featuresDestroy(struct libmv_Features* /*libmv_features*/) {
void libmv_featuresDestroy(struct libmv_Features * /*libmv_features*/) {
}
/* ************ Camera intrinsics ************ */
libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction* /*libmv_reconstruction*/) {
libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
libmv_Reconstruction * /*libmv_reconstruction*/) {
return NULL;
}
libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/) {
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/) {
return NULL;
}
libmv_CameraIntrinsics* libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* /*libmvIntrinsics*/) {
libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
return NULL;
}
void libmv_cameraIntrinsicsDestroy(
libmv_CameraIntrinsics* /*libmvIntrinsics*/) {
libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
}
void libmv_cameraIntrinsicsUpdate(
const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/,
libmv_CameraIntrinsics* /*libmv_intrinsics*/) {
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
libmv_CameraIntrinsics * /*libmv_intrinsics*/) {
}
void libmv_cameraIntrinsicsSetThreads(
libmv_CameraIntrinsics* /*libmv_intrinsics*/, int /*threads*/) {
libmv_CameraIntrinsics * /*libmv_intrinsics*/,
int /*threads*/) {
}
void libmv_cameraIntrinsicsExtractOptions(
const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
libmv_CameraIntrinsicsOptions *camera_intrinsics_options) {
memset(camera_intrinsics_options, 0, sizeof(libmv_CameraIntrinsicsOptions));
camera_intrinsics_options->focal_length = 1.0;
}
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
const unsigned char* source_image,
int width,
int height,
const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
const unsigned char *source_image,
int width, int height,
float /*overscan*/,
int channels,
unsigned char* destination_image) {
memcpy(destination_image,
source_image,
unsigned char *destination_image) {
memcpy(destination_image, source_image,
channels * width * height * sizeof(unsigned char));
}
@@ -265,21 +265,19 @@ void libmv_cameraIntrinsicsUndistortFloat(
float /*overscan*/,
int channels,
float* destination_image) {
memcpy(destination_image,
source_image,
memcpy(destination_image, source_image,
channels * width * height * sizeof(float));
}
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* /*libmv_intrinsics*/,
const unsigned char* source_image,
const unsigned char *source_image,
int width,
int height,
float /*overscan*/,
int channels,
unsigned char* destination_image) {
memcpy(destination_image,
source_image,
unsigned char *destination_image) {
memcpy(destination_image, source_image,
channels * width * height * sizeof(unsigned char));
}
@@ -291,8 +289,7 @@ void libmv_cameraIntrinsicsDistortFloat(
float /*overscan*/,
int channels,
float* destination_image) {
memcpy(destination_image,
source_image,
memcpy(destination_image, source_image,
channels * width * height * sizeof(float));
}
@@ -318,8 +315,8 @@ void libmv_cameraIntrinsicsInvert(
*y1 = 0.0;
}
void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*/*x1*/)[2],
/* const */ double (*/*x2*/)[2],
void libmv_homography2DFromCorrespondencesEuc(/* const */ double (* /*x1*/)[2],
/* const */ double (* /*x2*/)[2],
int /*num_points*/,
double H[3][3]) {
memset(H, 0, sizeof(double[3][3]));
@@ -330,38 +327,45 @@ void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*/*x1*/)[2],
/* ************ autotrack ************ */
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/) {
libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/)
{
return NULL;
}
void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/) {
void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/)
{
}
void libmv_autoTrackSetOptions(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_AutoTrackOptions* /*options*/) {
const libmv_AutoTrackOptions* /*options*/)
{
}
int libmv_autoTrackMarker(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_TrackRegionOptions* /*libmv_options*/,
libmv_Marker* /*libmv_tracker_marker*/,
libmv_TrackRegionResult* /*libmv_result*/) {
libmv_Marker * /*libmv_tracker_marker*/,
libmv_TrackRegionResult* /*libmv_result*/)
{
return 0;
}
void libmv_autoTrackAddMarker(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_Marker* /*libmv_marker*/) {
const libmv_Marker* /*libmv_marker*/)
{
}
void libmv_autoTrackSetMarkers(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_Marker* /*libmv_marker-*/,
size_t /*num_markers*/) {
size_t /*num_markers*/)
{
}
int libmv_autoTrackGetMarker(libmv_AutoTrack* /*libmv_autotrack*/,
int /*clip*/,
int /*frame*/,
int /*track*/,
libmv_Marker* /*libmv_marker*/) {
libmv_Marker* /*libmv_marker*/)
{
return 0;
}
@@ -372,20 +376,24 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_GetImageCallback /*get_image_callback*/,
libmv_ReleaseImageCallback /*release_image_callback*/,
libmv_GetMaskForTrackCallback /*get_mask_for_track_callback*/,
libmv_ReleaseMaskCallback /*release_mask_callback*/) {
libmv_ReleaseMaskCallback /*release_mask_callback*/)
{
return NULL;
}
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/) {
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/)
{
}
int64_t libmv_frameAccessorgetTransformKey(
const libmv_FrameTransform* /*transform*/) {
const libmv_FrameTransform * /*transform*/)
{
return 0;
}
void libmv_frameAccessorgetTransformRun(
const libmv_FrameTransform* /*transform*/,
const libmv_FloatImage* /*input_image*/,
libmv_FloatImage* /*output_image*/) {
void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* /*transform*/,
const libmv_FloatImage* /*input_image*/,
libmv_FloatImage* /*output_image*/)
{
}

View File

@@ -21,7 +21,6 @@
#include "intern/image.h"
#include "intern/utildefines.h"
#include "libmv/image/image.h"
#include "libmv/logging/logging.h"
#include "libmv/tracking/track_region.h"
/* define this to generate PNG images with content of search areas
@@ -33,53 +32,27 @@
#undef DUMP_ALWAYS
using libmv::FloatImage;
using libmv::TrackRegion;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
using libmv::TrackRegion;
namespace {
TrackRegionOptions::Direction convertDirection(
libmv_TrackRegionDirection direction) {
switch (direction) {
case LIBMV_TRACK_REGION_FORWARD: return TrackRegionOptions::FORWARD;
case LIBMV_TRACK_REGION_BACKWARD: return TrackRegionOptions::BACKWARD;
}
LOG(FATAL) << "Unhandled tracking direction " << direction
<< ", should never happen.";
return TrackRegionOptions::FORWARD;
}
TrackRegionOptions::Mode convertMotionModelToMode(int motion_model) {
switch (motion_model) {
#define LIBMV_CONVERT(the_model) \
case TrackRegionOptions::the_model: return TrackRegionOptions::the_model;
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
TrackRegionOptions* track_region_options) {
switch (options.motion_model) {
#define LIBMV_CONVERT(the_model) \
case TrackRegionOptions::the_model: \
track_region_options->mode = TrackRegionOptions::the_model; \
break;
LIBMV_CONVERT(TRANSLATION)
LIBMV_CONVERT(TRANSLATION_ROTATION)
LIBMV_CONVERT(TRANSLATION_SCALE)
LIBMV_CONVERT(TRANSLATION_ROTATION_SCALE)
LIBMV_CONVERT(AFFINE)
LIBMV_CONVERT(HOMOGRAPHY)
#undef LIBMV_CONVERT
}
LOG(FATAL) << "Unhandled motion model " << motion_model
<< ", should never happen.";
return TrackRegionOptions::TRANSLATION;
}
} // namespace
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
TrackRegionOptions* track_region_options) {
track_region_options->direction = convertDirection(options.direction);
track_region_options->mode = convertMotionModelToMode(options.motion_model);
track_region_options->minimum_correlation = options.minimum_correlation;
track_region_options->max_iterations = options.num_iterations;
track_region_options->sigma = options.sigma;
@@ -93,8 +66,7 @@ void libmv_configureTrackRegionOptions(
* so disabling for now for until proper prediction model is landed.
*
* The thing is, currently blender sends input coordinates as the guess to
* region tracker and in case of fast motion such an early out ruins the
* track.
* region tracker and in case of fast motion such an early out ruins the track.
*/
track_region_options->attempt_refine_before_brute = false;
track_region_options->use_normalized_intensities = options.use_normalization;
@@ -102,7 +74,7 @@ void libmv_configureTrackRegionOptions(
void libmv_regionTrackergetResult(const TrackRegionResult& track_region_result,
libmv_TrackRegionResult* result) {
result->termination = (int)track_region_result.termination;
result->termination = (int) track_region_result.termination;
result->termination_reason = "";
result->correlation = track_region_result.correlation;
}
@@ -136,27 +108,33 @@ int libmv_trackRegion(const libmv_TrackRegionOptions* options,
libmv_configureTrackRegionOptions(*options, &track_region_options);
if (options->image1_mask) {
libmv_floatBufferToFloatImage(
options->image1_mask, image1_width, image1_height, 1, &image1_mask);
libmv_floatBufferToFloatImage(options->image1_mask,
image1_width,
image1_height,
1,
&image1_mask);
track_region_options.image1_mask = &image1_mask;
}
// Convert from raw float buffers to libmv's FloatImage.
FloatImage old_patch, new_patch;
libmv_floatBufferToFloatImage(
image1, image1_width, image1_height, 1, &old_patch);
libmv_floatBufferToFloatImage(
image2, image2_width, image2_height, 1, &new_patch);
libmv_floatBufferToFloatImage(image1,
image1_width,
image1_height,
1,
&old_patch);
libmv_floatBufferToFloatImage(image2,
image2_width,
image2_height,
1,
&new_patch);
TrackRegionResult track_region_result;
TrackRegion(old_patch,
new_patch,
xx1,
yy1,
TrackRegion(old_patch, new_patch,
xx1, yy1,
track_region_options,
xx2,
yy2,
xx2, yy2,
&track_region_result);
// Convert to floats for the blender api.

View File

@@ -24,20 +24,14 @@
extern "C" {
#endif
typedef enum libmv_TrackRegionDirection {
LIBMV_TRACK_REGION_FORWARD,
LIBMV_TRACK_REGION_BACKWARD,
} libmv_TrackRegionDirection;
typedef struct libmv_TrackRegionOptions {
libmv_TrackRegionDirection direction;
int motion_model;
int num_iterations;
int use_brute;
int use_normalization;
double minimum_correlation;
double sigma;
float* image1_mask;
float *image1_mask;
} libmv_TrackRegionOptions;
typedef struct libmv_TrackRegionResult {
@@ -48,9 +42,9 @@ typedef struct libmv_TrackRegionResult {
#ifdef __cplusplus
namespace libmv {
struct TrackRegionOptions;
struct TrackRegionResult;
} // namespace libmv
struct TrackRegionOptions;
struct TrackRegionResult;
}
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
libmv::TrackRegionOptions* track_region_options);

View File

@@ -28,18 +28,18 @@ using libmv::Tracks;
libmv_Tracks* libmv_tracksNew(void) {
Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
return (libmv_Tracks*)tracks;
return (libmv_Tracks*) tracks;
}
void libmv_tracksDestroy(libmv_Tracks* libmv_tracks) {
LIBMV_OBJECT_DELETE(libmv_tracks, Tracks);
}
void libmv_tracksInsert(libmv_Tracks* libmv_tracks,
void libmv_tracksInsert(libmv_Tracks *libmv_tracks,
int image,
int track,
double x,
double y,
double weight) {
((Tracks*)libmv_tracks)->Insert(image, track, x, y, weight);
((Tracks *) libmv_tracks)->Insert(image, track, x, y, weight);
}

View File

@@ -25,7 +25,8 @@
using mv::Marker;
using mv::Tracks;
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker, Marker* marker) {
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
Marker *marker) {
marker->clip = libmv_marker.clip;
marker->frame = libmv_marker.frame;
marker->track = libmv_marker.track;
@@ -40,16 +41,17 @@ void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker, Marker* marker) {
marker->search_region.max(0) = libmv_marker.search_region_max[0];
marker->search_region.max(1) = libmv_marker.search_region_max[1];
marker->weight = libmv_marker.weight;
marker->source = (Marker::Source)libmv_marker.source;
marker->status = (Marker::Status)libmv_marker.status;
marker->source = (Marker::Source) libmv_marker.source;
marker->status = (Marker::Status) libmv_marker.status;
marker->reference_clip = libmv_marker.reference_clip;
marker->reference_frame = libmv_marker.reference_frame;
marker->model_type = (Marker::ModelType)libmv_marker.model_type;
marker->model_type = (Marker::ModelType) libmv_marker.model_type;
marker->model_id = libmv_marker.model_id;
marker->disabled_channels = libmv_marker.disabled_channels;
}
void libmv_markerToApiMarker(const Marker& marker, libmv_Marker* libmv_marker) {
void libmv_markerToApiMarker(const Marker& marker,
libmv_Marker *libmv_marker) {
libmv_marker->clip = marker.clip;
libmv_marker->frame = marker.frame;
libmv_marker->track = marker.track;
@@ -64,11 +66,11 @@ void libmv_markerToApiMarker(const Marker& marker, libmv_Marker* libmv_marker) {
libmv_marker->search_region_max[0] = marker.search_region.max(0);
libmv_marker->search_region_max[1] = marker.search_region.max(1);
libmv_marker->weight = marker.weight;
libmv_marker->source = (libmv_MarkerSource)marker.source;
libmv_marker->status = (libmv_MarkerStatus)marker.status;
libmv_marker->source = (libmv_MarkerSource) marker.source;
libmv_marker->status = (libmv_MarkerStatus) marker.status;
libmv_marker->reference_clip = marker.reference_clip;
libmv_marker->reference_frame = marker.reference_frame;
libmv_marker->model_type = (libmv_MarkerModelType)marker.model_type;
libmv_marker->model_type = (libmv_MarkerModelType) marker.model_type;
libmv_marker->model_id = marker.model_id;
libmv_marker->disabled_channels = marker.disabled_channels;
}
@@ -76,7 +78,7 @@ void libmv_markerToApiMarker(const Marker& marker, libmv_Marker* libmv_marker) {
libmv_TracksN* libmv_tracksNewN(void) {
Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
return (libmv_TracksN*)tracks;
return (libmv_TracksN*) tracks;
}
void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks) {
@@ -87,7 +89,7 @@ void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
const libmv_Marker* libmv_marker) {
Marker marker;
libmv_apiMarkerToMarker(*libmv_marker, &marker);
((Tracks*)libmv_tracks)->AddMarker(marker);
((Tracks*) libmv_tracks)->AddMarker(marker);
}
void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
@@ -96,7 +98,7 @@ void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
int track,
libmv_Marker* libmv_marker) {
Marker marker;
((Tracks*)libmv_tracks)->GetMarker(clip, frame, track, &marker);
((Tracks*) libmv_tracks)->GetMarker(clip, frame, track, &marker);
libmv_markerToApiMarker(marker, libmv_marker);
}
@@ -104,25 +106,26 @@ void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
int clip,
int frame,
int track) {
((Tracks*)libmv_tracks)->RemoveMarker(clip, frame, track);
((Tracks *) libmv_tracks)->RemoveMarker(clip, frame, track);
}
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks, int track) {
((Tracks*)libmv_tracks)->RemoveMarkersForTrack(track);
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
int track) {
((Tracks *) libmv_tracks)->RemoveMarkersForTrack(track);
}
int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks) {
return ((Tracks*)libmv_tracks)->MaxClip();
return ((Tracks*) libmv_tracks)->MaxClip();
}
int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip) {
return ((Tracks*)libmv_tracks)->MaxFrame(clip);
return ((Tracks*) libmv_tracks)->MaxFrame(clip);
}
int libmv_tracksMaxTrackN(libmv_TracksN* libmv_tracks) {
return ((Tracks*)libmv_tracks)->MaxTrack();
return ((Tracks*) libmv_tracks)->MaxTrack();
}
int libmv_tracksNumMarkersN(libmv_TracksN* libmv_tracks) {
return ((Tracks*)libmv_tracks)->NumMarkers();
return ((Tracks*) libmv_tracks)->NumMarkers();
}

View File

@@ -79,19 +79,20 @@ typedef struct libmv_Marker {
#ifdef __cplusplus
namespace mv {
struct Marker;
struct Marker;
}
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
mv::Marker* marker);
mv::Marker *marker);
void libmv_markerToApiMarker(const mv::Marker& marker,
libmv_Marker* libmv_marker);
libmv_Marker *libmv_marker);
#endif
libmv_TracksN* libmv_tracksNewN(void);
void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks);
void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
const libmv_Marker* libmv_marker);
@@ -106,7 +107,8 @@ void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
int frame,
int track);
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks, int track);
void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
int track);
int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks);
int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip);

View File

@@ -30,33 +30,27 @@
# define LIBMV_OBJECT_NEW OBJECT_GUARDED_NEW
# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
# define LIBMV_STRUCT_NEW(type, count) \
(type*)MEM_mallocN(sizeof(type) * count, __func__)
# define LIBMV_STRUCT_NEW(type, count) \
(type*)MEM_mallocN(sizeof(type) * count, __func__)
# define LIBMV_STRUCT_DELETE(what) MEM_freeN(what)
#else
// Need this to keep libmv-capi potentially standalone.
# if defined __GNUC__ || defined __sun
# define LIBMV_OBJECT_NEW(type, args...) \
new (malloc(sizeof(type))) type(args)
# define LIBMV_OBJECT_NEW(type, args ...) \
new(malloc(sizeof(type))) type(args)
# else
# define LIBMV_OBJECT_NEW(type, ...) \
new (malloc(sizeof(type))) type(__VA_ARGS__)
# endif
# define LIBMV_OBJECT_DELETE(what, type) \
{ \
if (what) { \
((type*)(what))->~type(); \
free(what); \
} \
} \
(void)0
# define LIBMV_OBJECT_NEW(type, ...) \
new(malloc(sizeof(type))) type(__VA_ARGS__)
#endif
# define LIBMV_OBJECT_DELETE(what, type) \
{ \
if (what) { \
((type*)(what))->~type(); \
free(what); \
} \
} (void)0
# define LIBMV_STRUCT_NEW(type, count) (type*)malloc(sizeof(type) * count)
# define LIBMV_STRUCT_DELETE(what) \
{ \
if (what) \
free(what); \
} \
(void)0
# define LIBMV_STRUCT_DELETE(what) { if (what) free(what); } (void)0
#endif
#endif // LIBMV_C_API_UTILDEFINES_H_

View File

@@ -21,9 +21,9 @@
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/autotrack.h"
#include "libmv/autotrack/quad.h"
#include "libmv/autotrack/frame_accessor.h"
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/autotrack/quad.h"
#include "libmv/base/scoped_ptr.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
@@ -35,30 +35,34 @@ namespace {
class DisableChannelsTransform : public FrameAccessor::Transform {
public:
DisableChannelsTransform(int disabled_channels)
: disabled_channels_(disabled_channels) {}
: disabled_channels_(disabled_channels) { }
int64_t key() const { return disabled_channels_; }
int64_t key() const {
return disabled_channels_;
}
void run(const FloatImage& input, FloatImage* output) const {
bool disable_red = (disabled_channels_ & Marker::CHANNEL_R) != 0,
bool disable_red = (disabled_channels_ & Marker::CHANNEL_R) != 0,
disable_green = (disabled_channels_ & Marker::CHANNEL_G) != 0,
disable_blue = (disabled_channels_ & Marker::CHANNEL_B) != 0;
disable_blue = (disabled_channels_ & Marker::CHANNEL_B) != 0;
LG << "Disabling channels: " << (disable_red ? "R " : "")
<< (disable_green ? "G " : "") << (disable_blue ? "B" : "");
LG << "Disabling channels: "
<< (disable_red ? "R " : "")
<< (disable_green ? "G " : "")
<< (disable_blue ? "B" : "");
// It's important to rescale the resultappropriately so that e.g. if only
// blue is selected, it's not zeroed out.
float scale = (disable_red ? 0.0f : 0.2126f) +
float scale = (disable_red ? 0.0f : 0.2126f) +
(disable_green ? 0.0f : 0.7152f) +
(disable_blue ? 0.0f : 0.0722f);
(disable_blue ? 0.0f : 0.0722f);
output->Resize(input.Height(), input.Width(), 1);
for (int y = 0; y < input.Height(); y++) {
for (int x = 0; x < input.Width(); x++) {
float r = disable_red ? 0.0f : input(y, x, 0);
float r = disable_red ? 0.0f : input(y, x, 0);
float g = disable_green ? 0.0f : input(y, x, 1);
float b = disable_blue ? 0.0f : input(y, x, 2);
float b = disable_blue ? 0.0f : input(y, x, 2);
(*output)(y, x, 0) = (0.2126f * r + 0.7152f * g + 0.0722f * b) / scale;
}
}
@@ -69,7 +73,7 @@ class DisableChannelsTransform : public FrameAccessor::Transform {
int disabled_channels_;
};
template <typename QuadT, typename ArrayT>
template<typename QuadT, typename ArrayT>
void QuadToArrays(const QuadT& quad, ArrayT* x, ArrayT* y) {
for (int i = 0; i < 4; ++i) {
x[i] = quad.coordinates(i, 0);
@@ -111,20 +115,11 @@ FrameAccessor::Key GetMaskForMarker(const Marker& marker,
FrameAccessor* frame_accessor,
FloatImage* mask) {
Region region = marker.search_region.Rounded();
return frame_accessor->GetMaskForTrack(
marker.clip, marker.frame, marker.track, &region, mask);
}
PredictDirection getPredictDirection(const TrackRegionOptions* track_options) {
switch (track_options->direction) {
case TrackRegionOptions::FORWARD: return PredictDirection::FORWARD;
case TrackRegionOptions::BACKWARD: return PredictDirection::BACKWARD;
}
LOG(FATAL) << "Unhandled tracking direction " << track_options->direction
<< ", should never happen.";
return PredictDirection::AUTO;
return frame_accessor->GetMaskForTrack(marker.clip,
marker.frame,
marker.track,
&region,
mask);
}
} // namespace
@@ -133,9 +128,8 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
TrackRegionResult* result,
const TrackRegionOptions* track_options) {
// Try to predict the location of the second marker.
const PredictDirection predict_direction = getPredictDirection(track_options);
bool predicted_position = false;
if (PredictMarkerPosition(tracks_, predict_direction, tracked_marker)) {
if (PredictMarkerPosition(tracks_, tracked_marker)) {
LG << "Successfully predicted!";
predicted_position = true;
} else {
@@ -158,20 +152,23 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
// TODO(keir): Technically this could take a smaller slice from the source
// image instead of taking one the size of the search window.
FloatImage reference_image;
FrameAccessor::Key reference_key =
GetImageForMarker(reference_marker, frame_accessor_, &reference_image);
FrameAccessor::Key reference_key = GetImageForMarker(reference_marker,
frame_accessor_,
&reference_image);
if (!reference_key) {
LG << "Couldn't get frame for reference marker: " << reference_marker;
return false;
}
FloatImage reference_mask;
FrameAccessor::Key reference_mask_key =
GetMaskForMarker(reference_marker, frame_accessor_, &reference_mask);
FrameAccessor::Key reference_mask_key = GetMaskForMarker(reference_marker,
frame_accessor_,
&reference_mask);
FloatImage tracked_image;
FrameAccessor::Key tracked_key =
GetImageForMarker(*tracked_marker, frame_accessor_, &tracked_image);
FrameAccessor::Key tracked_key = GetImageForMarker(*tracked_marker,
frame_accessor_,
&tracked_image);
if (!tracked_key) {
frame_accessor_->ReleaseImage(reference_key);
LG << "Couldn't get frame for tracked marker: " << tracked_marker;
@@ -194,11 +191,9 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
local_track_region_options.attempt_refine_before_brute = predicted_position;
TrackRegion(reference_image,
tracked_image,
x1,
y1,
x1, y1,
local_track_region_options,
x2,
y2,
x2, y2,
result);
// Copy results over the tracked marker.
@@ -213,7 +208,7 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
tracked_marker->search_region.Offset(delta);
tracked_marker->source = Marker::TRACKED;
tracked_marker->status = Marker::UNKNOWN;
tracked_marker->reference_clip = reference_marker.clip;
tracked_marker->reference_clip = reference_marker.clip;
tracked_marker->reference_frame = reference_marker.frame;
// Release the images and masks from the accessor cache.
@@ -235,9 +230,7 @@ void AutoTrack::SetMarkers(vector<Marker>* markers) {
tracks_.SetMarkers(markers);
}
bool AutoTrack::GetMarker(int clip,
int frame,
int track,
bool AutoTrack::GetMarker(int clip, int frame, int track,
Marker* markers) const {
return tracks_.GetMarker(clip, frame, track, markers);
}
@@ -249,8 +242,7 @@ void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
vector<Marker> previous_frame_markers;
// Q: How to decide track #s when detecting?
// Q: How to match markers from previous frame? set of prev frame tracks?
// Q: How to decide what markers should get tracked and which ones should
// not?
// Q: How to decide what markers should get tracked and which ones should not?
for (int frame = 0; frame < num_frames; ++frame) {
if (Cancelled()) {
LG << "Got cancel message while detecting and tracking...";
@@ -279,7 +271,8 @@ void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
for (int i = 0; i < this_frame_markers.size(); ++i) {
tracks_in_this_frame.push_back(this_frame_markers[i].track);
}
std::sort(tracks_in_this_frame.begin(), tracks_in_this_frame.end());
std::sort(tracks_in_this_frame.begin(),
tracks_in_this_frame.end());
// Find tracks in the previous frame that are not in this one.
vector<Marker*> previous_frame_markers_to_track;

View File

@@ -23,8 +23,8 @@
#ifndef LIBMV_AUTOTRACK_AUTOTRACK_H_
#define LIBMV_AUTOTRACK_AUTOTRACK_H_
#include "libmv/autotrack/region.h"
#include "libmv/autotrack/tracks.h"
#include "libmv/autotrack/region.h"
#include "libmv/tracking/track_region.h"
namespace libmv {
@@ -74,14 +74,15 @@ class AutoTrack {
Region search_region;
};
AutoTrack(FrameAccessor* frame_accessor) : frame_accessor_(frame_accessor) {}
AutoTrack(FrameAccessor* frame_accessor)
: frame_accessor_(frame_accessor) {}
// Marker manipulation.
// Clip manipulation.
// Set the number of clips. These clips will get accessed from the frame
// accessor, matches between frames found, and a reconstruction created.
// void SetNumFrames(int clip, int num_frames);
//void SetNumFrames(int clip, int num_frames);
// Tracking & Matching
@@ -89,7 +90,7 @@ class AutoTrack {
// Caller maintains ownership of *result and *tracked_marker.
bool TrackMarker(Marker* tracked_marker,
TrackRegionResult* result,
const TrackRegionOptions* track_options = NULL);
const TrackRegionOptions* track_options=NULL);
// Wrapper around Tracks API; however these may add additional processing.
void AddMarker(const Marker& tracked_marker);
@@ -98,36 +99,36 @@ class AutoTrack {
// TODO(keir): Implement frame matching! This could be very cool for loop
// closing and connecting across clips.
// void MatchFrames(int clip1, int frame1, int clip2, int frame2) {}
//void MatchFrames(int clip1, int frame1, int clip2, int frame2) {}
// Wrapper around the Reconstruction API.
// Returns the new ID.
int AddCameraIntrinsics(CameraIntrinsics* intrinsics) {
(void)intrinsics;
(void) intrinsics;
return 0;
} // XXX
int SetClipIntrinsics(int clip, int intrinsics) {
(void)clip;
(void)intrinsics;
(void) clip;
(void) intrinsics;
return 0;
} // XXX
} // XXX
enum Motion {
GENERAL_CAMERA_MOTION,
TRIPOD_CAMERA_MOTION,
};
int SetClipMotion(int clip, Motion motion) {
(void)clip;
(void)motion;
(void) clip;
(void) motion;
return 0;
} // XXX
} // XXX
// Decide what to refine for the given intrinsics. bundle_options is from
// bundle.h (e.g. BUNDLE_FOCAL_LENGTH | BUNDLE_RADIAL_K1).
void SetIntrinsicsRefine(int intrinsics, int bundle_options) {
(void)intrinsics;
(void)bundle_options;
} // XXX
(void) intrinsics;
(void) bundle_options;
} // XXX
// Keyframe read/write.
struct ClipFrame {
@@ -149,19 +150,20 @@ class AutoTrack {
};
void DetectAndTrack(const DetectAndTrackOptions& options);
struct DetectFeaturesInFrameOptions {};
void DetectFeaturesInFrame(
int clip, int frame, const DetectFeaturesInFrameOptions* options = NULL) {
(void)clip;
(void)frame;
(void)options;
} // XXX
struct DetectFeaturesInFrameOptions {
};
void DetectFeaturesInFrame(int clip, int frame,
const DetectFeaturesInFrameOptions* options=NULL) {
(void) clip;
(void) frame;
(void) options;
} // XXX
// Does not take ownership of the given listener, but keeps a reference to it.
void AddListener(OperationListener* listener) { (void)listener; } // XXX
void AddListener(OperationListener* listener) {(void) listener;} // XXX
// Create the initial reconstruction,
// void FindInitialReconstruction();
//void FindInitialReconstruction();
// State machine
//
@@ -200,17 +202,17 @@ class AutoTrack {
bool Cancelled() { return false; }
Tracks tracks_; // May be normalized camera coordinates or raw pixels.
// Reconstruction reconstruction_;
//Reconstruction reconstruction_;
// TODO(keir): Add the motion models here.
// vector<MotionModel> motion_models_;
//vector<MotionModel> motion_models_;
// TODO(keir): Should num_clips and num_frames get moved to FrameAccessor?
// TODO(keir): What about masking for clips and frames to prevent various
// things like reconstruction or tracking from happening on certain frames?
FrameAccessor* frame_accessor_;
// int num_clips_;
// vector<int> num_frames_; // Indexed by clip.
//int num_clips_;
//vector<int> num_frames_; // Indexed by clip.
// The intrinsics for each clip, assuming each clip has fixed intrinsics.
// TODO(keir): Decide what the semantics should be for varying focal length.

View File

@@ -41,7 +41,7 @@ using libmv::FloatImage;
// implementations to cache filtered image pieces).
struct FrameAccessor {
struct Transform {
virtual ~Transform() {}
virtual ~Transform() { }
// The key should depend on the transform arguments. Must be non-zero.
virtual int64_t key() const = 0;
@@ -50,7 +50,10 @@ struct FrameAccessor {
virtual void run(const FloatImage& input, FloatImage* output) const = 0;
};
enum InputMode { MONO, RGBA };
enum InputMode {
MONO,
RGBA
};
typedef void* Key;
@@ -97,6 +100,6 @@ struct FrameAccessor {
virtual int NumFrames(int clip) = 0;
};
} // namespace mv
} // namespace libmv
#endif // LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_

View File

@@ -57,19 +57,23 @@ struct Marker {
float weight;
enum Source {
MANUAL, // The user placed this marker manually.
DETECTED, // A keypoint detector found this point.
TRACKED, // The tracking algorithm placed this marker.
MATCHED, // A matching algorithm (e.g. SIFT or SURF or ORB) found this.
PREDICTED, // A motion model predicted this marker. This is needed for
// handling occlusions in some cases where an imaginary marker
// is placed to keep camera motion smooth.
MANUAL, // The user placed this marker manually.
DETECTED, // A keypoint detector found this point.
TRACKED, // The tracking algorithm placed this marker.
MATCHED, // A matching algorithm (e.g. SIFT or SURF or ORB) found this.
PREDICTED, // A motion model predicted this marker. This is needed for
// handling occlusions in some cases where an imaginary marker
// is placed to keep camera motion smooth.
};
Source source;
// Markers may be inliers or outliers if the tracking fails; this allows
// visualizing the markers in the image.
enum Status { UNKNOWN, INLIER, OUTLIER };
enum Status {
UNKNOWN,
INLIER,
OUTLIER
};
Status status;
// When doing correlation tracking, where to search in the current frame for
@@ -86,7 +90,12 @@ struct Marker {
// another primitive (a rectangular prisim). This captures the information
// needed to say that for example a collection of markers belongs to model #2
// (and model #2 is a plane).
enum ModelType { POINT, PLANE, LINE, CUBE };
enum ModelType {
POINT,
PLANE,
LINE,
CUBE
};
ModelType model_type;
// The model ID this track (e.g. the second model, which is a plane).
@@ -105,7 +114,7 @@ struct Marker {
int disabled_channels;
// Offset everything (center, patch, search) by the given delta.
template <typename T>
template<typename T>
void Offset(const T& offset) {
center += offset.template cast<float>();
patch.coordinates.rowwise() += offset.template cast<int>();
@@ -113,15 +122,19 @@ struct Marker {
}
// Shift the center to the given new position (and patch, search).
template <typename T>
template<typename T>
void SetPosition(const T& new_center) {
Offset(new_center - center);
}
};
inline std::ostream& operator<<(std::ostream& out, const Marker& marker) {
out << "{" << marker.clip << ", " << marker.frame << ", " << marker.track
<< ", (" << marker.center.x() << ", " << marker.center.y() << ")"
out << "{"
<< marker.clip << ", "
<< marker.frame << ", "
<< marker.track << ", ("
<< marker.center.x() << ", "
<< marker.center.y() << ")"
<< "}";
return out;
}

View File

@@ -23,13 +23,18 @@
#ifndef LIBMV_AUTOTRACK_MODEL_H_
#define LIBMV_AUTOTRACK_MODEL_H_
#include "libmv/autotrack/quad.h"
#include "libmv/numeric/numeric.h"
#include "libmv/autotrack/quad.h"
namespace mv {
struct Model {
enum ModelType { POINT, PLANE, LINE, CUBE };
enum ModelType {
POINT,
PLANE,
LINE,
CUBE
};
// ???
};

View File

@@ -20,8 +20,8 @@
//
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/predict_tracks.h"
#include "libmv/autotrack/tracks.h"
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
@@ -31,8 +31,8 @@ namespace mv {
namespace {
using libmv::Vec2;
using libmv::vector;
using libmv::Vec2;
// Implied time delta between steps. Set empirically by tweaking and seeing
// what numbers did best at prediction.
@@ -57,8 +57,6 @@ const double dt = 3.8;
// For a typical system having constant velocity. This gives smooth-appearing
// predictions, but they are not always as accurate.
//
// clang-format off
const double velocity_state_transition_data[] = {
1, dt, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
@@ -67,13 +65,10 @@ const double velocity_state_transition_data[] = {
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
// clang-format on
#if 0
// This 3rd-order system also models acceleration. This makes for "jerky"
// predictions, but that tend to be more accurate.
//
// clang-format off
const double acceleration_state_transition_data[] = {
1, dt, dt*dt/2, 0, 0, 0,
0, 1, dt, 0, 0, 0,
@@ -82,12 +77,9 @@ const double acceleration_state_transition_data[] = {
0, 0, 0, 0, 1, dt,
0, 0, 0, 0, 0, 1
};
// clang-format on
// This system (attempts) to add an angular velocity component. However, it's
// total junk.
//
// clang-format off
const double angular_state_transition_data[] = {
1, dt, -dt, 0, 0, 0, // Position x
0, 1, 0, 0, 0, 0, // Velocity x
@@ -96,22 +88,17 @@ const double angular_state_transition_data[] = {
0, 0, 0, 0, 1, 0, // Velocity y
0, 0, 0, 0, 0, 1 // Ignored
};
// clang-format on
#endif
const double* state_transition_data = velocity_state_transition_data;
// Observation matrix.
// clang-format off
const double observation_data[] = {
1., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 0., 0.
};
// clang-format on
// Process covariance.
//
// clang-format off
const double process_covariance_data[] = {
35, 0, 0, 0, 0, 0,
0, 5, 0, 0, 0, 0,
@@ -120,19 +107,14 @@ const double process_covariance_data[] = {
0, 0, 0, 0, 5, 0,
0, 0, 0, 0, 0, 5
};
// clang-format on
// Process covariance.
const double measurement_covariance_data[] = {
0.01,
0.00,
0.00,
0.01,
0.01, 0.00,
0.00, 0.01,
};
// Initial covariance.
//
// clang-format off
const double initial_covariance_data[] = {
10, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
@@ -141,7 +123,6 @@ const double initial_covariance_data[] = {
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
// clang-format on
typedef mv::KalmanFilter<double, 6, 2> TrackerKalman;
@@ -157,7 +138,7 @@ bool OrderByFrameLessThan(const Marker* a, const Marker* b) {
}
return a->clip < b->clip;
}
return a->frame < b->frame;
return a->frame < b-> frame;
}
// Predicted must be after the previous markers (in the frame numbering sense).
@@ -165,9 +146,9 @@ void RunPrediction(const vector<Marker*> previous_markers,
Marker* predicted_marker) {
TrackerKalman::State state;
state.mean << previous_markers[0]->center.x(), 0, 0,
previous_markers[0]->center.y(), 0, 0;
state.covariance =
Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(initial_covariance_data);
previous_markers[0]->center.y(), 0, 0;
state.covariance = Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(
initial_covariance_data);
int current_frame = previous_markers[0]->frame;
int target_frame = predicted_marker->frame;
@@ -178,18 +159,19 @@ void RunPrediction(const vector<Marker*> previous_markers,
for (int i = 1; i < previous_markers.size(); ++i) {
// Step forward predicting the state until it is on the current marker.
int predictions = 0;
for (; current_frame != previous_markers[i]->frame;
for (;
current_frame != previous_markers[i]->frame;
current_frame += frame_delta) {
filter.Step(&state);
predictions++;
LG << "Predicted point (frame " << current_frame << "): " << state.mean(0)
<< ", " << state.mean(3);
LG << "Predicted point (frame " << current_frame << "): "
<< state.mean(0) << ", " << state.mean(3);
}
// Log the error -- not actually used, but interesting.
Vec2 error = previous_markers[i]->center.cast<double>() -
Vec2(state.mean(0), state.mean(3));
LG << "Prediction error for " << predictions << " steps: (" << error.x()
<< ", " << error.y() << "); norm: " << error.norm();
LG << "Prediction error for " << predictions << " steps: ("
<< error.x() << ", " << error.y() << "); norm: " << error.norm();
// Now that the state is predicted in the current frame, update the state
// based on the measurement from the current frame.
filter.Update(previous_markers[i]->center.cast<double>(),
@@ -202,8 +184,8 @@ void RunPrediction(const vector<Marker*> previous_markers,
// predict until the target frame.
for (; current_frame != target_frame; current_frame += frame_delta) {
filter.Step(&state);
LG << "Final predicted point (frame " << current_frame
<< "): " << state.mean(0) << ", " << state.mean(3);
LG << "Final predicted point (frame " << current_frame << "): "
<< state.mean(0) << ", " << state.mean(3);
}
// The x and y positions are at 0 and 3; ignore acceleration and velocity.
@@ -225,9 +207,7 @@ void RunPrediction(const vector<Marker*> previous_markers,
} // namespace
bool PredictMarkerPosition(const Tracks& tracks,
const PredictDirection direction,
Marker* marker) {
bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
// Get all markers for this clip and track.
vector<Marker> markers;
tracks.GetMarkersForTrackInClip(marker->clip, marker->track, &markers);
@@ -273,13 +253,13 @@ bool PredictMarkerPosition(const Tracks& tracks,
} else if (insert_at != -1) {
// Found existing marker; scan before and after it.
forward_scan_begin = insert_at + 1;
forward_scan_end = markers.size() - 1;
forward_scan_end = markers.size() - 1;;
backward_scan_begin = insert_at - 1;
backward_scan_end = 0;
} else {
// Didn't find existing marker but found an insertion point.
forward_scan_begin = insert_before;
forward_scan_end = markers.size() - 1;
forward_scan_end = markers.size() - 1;;
backward_scan_begin = insert_before - 1;
backward_scan_end = 0;
}
@@ -292,17 +272,9 @@ bool PredictMarkerPosition(const Tracks& tracks,
}
bool predict_forward = false;
switch (direction) {
case PredictDirection::AUTO:
if (backward_scan_end <= backward_scan_begin) {
// TODO(keir): Add smarter handling and detecting of consecutive frames!
predict_forward = true;
}
break;
case PredictDirection::FORWARD: predict_forward = true; break;
case PredictDirection::BACKWARD: predict_forward = false; break;
if (backward_scan_end <= backward_scan_begin) {
// TODO(keir): Add smarter handling and detecting of consecutive frames!
predict_forward = true;
}
const int max_frames_to_predict_from = 20;
@@ -329,8 +301,9 @@ bool PredictMarkerPosition(const Tracks& tracks,
return false;
}
LG << "Predicting backward";
int predict_begin = std::min(
forward_scan_begin + max_frames_to_predict_from, forward_scan_end);
int predict_begin =
std::min(forward_scan_begin + max_frames_to_predict_from,
forward_scan_end);
int predict_end = forward_scan_begin;
vector<Marker*> previous_markers;
for (int i = predict_begin; i >= predict_end; --i) {
@@ -339,6 +312,7 @@ bool PredictMarkerPosition(const Tracks& tracks,
RunPrediction(previous_markers, marker);
return false;
}
}
} // namespace mv

View File

@@ -28,27 +28,9 @@ namespace mv {
class Tracks;
struct Marker;
enum class PredictDirection {
// Detect direction in which to predict marker position based on an existing
// Tracks context. Prediction will happen in the preferred direction of a
// missing information.
// If markers exists to the both sides of the given one the prediction
// direction is preferred to be forward.
AUTO,
// Predict position of the marker from the past to the future (used for
// forward tracking).
FORWARD,
// Predict position from the future to the past (used for backward tracking).
BACKWARD,
};
// Predict the position of the given marker, and update it accordingly. The
// existing position will be overwritten.
bool PredictMarkerPosition(const Tracks& tracks,
const PredictDirection direction,
Marker* marker);
bool PredictMarkerPosition(const Tracks& tracks, Marker* marker);
} // namespace mv

View File

@@ -35,15 +35,17 @@ static void AddMarker(int frame, float x, float y, Tracks* tracks) {
marker.frame = frame;
marker.center.x() = x;
marker.center.y() = y;
marker.patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
y + 1;
marker.patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
tracks->AddMarker(marker);
}
TEST(PredictMarkerPosition, EasyLinearMotion) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
@@ -56,7 +58,7 @@ TEST(PredictMarkerPosition, EasyLinearMotion) {
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, PredictDirection::AUTO, &predicted);
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 0.1);
@@ -64,8 +66,10 @@ TEST(PredictMarkerPosition, EasyLinearMotion) {
// Check the patch coordinates as well.
double x = 9, y = 40.0;
Quad2Df expected_patch;
expected_patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
y + 1;
expected_patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;
@@ -74,8 +78,8 @@ TEST(PredictMarkerPosition, EasyLinearMotion) {
TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
Tracks tracks;
AddMarker(8, 1.0, 0.0, &tracks);
AddMarker(7, 2.0, 5.0, &tracks);
AddMarker(8, 1.0, 0.0, &tracks);
AddMarker(7, 2.0, 5.0, &tracks);
AddMarker(6, 3.0, 10.0, &tracks);
AddMarker(5, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
@@ -88,7 +92,7 @@ TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
predicted.track = 0;
predicted.frame = 0;
PredictMarkerPosition(tracks, PredictDirection::AUTO, &predicted);
PredictMarkerPosition(tracks, &predicted);
LG << predicted;
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
@@ -97,8 +101,10 @@ TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
// Check the patch coordinates as well.
double x = 9.0, y = 40.0;
Quad2Df expected_patch;
expected_patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
y + 1;
expected_patch.coordinates << x - 1, y - 1,
x + 1, y - 1,
x + 1, y + 1,
x - 1, y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;
@@ -107,8 +113,8 @@ TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
TEST(PredictMarkerPosition, TwoFrameGap) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
@@ -121,7 +127,7 @@ TEST(PredictMarkerPosition, TwoFrameGap) {
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, PredictDirection::AUTO, &predicted);
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 0.1);
@@ -129,8 +135,8 @@ TEST(PredictMarkerPosition, TwoFrameGap) {
TEST(PredictMarkerPosition, FourFrameGap) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
// Missing frames 4, 5, 6, 7.
@@ -140,7 +146,7 @@ TEST(PredictMarkerPosition, FourFrameGap) {
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, PredictDirection::AUTO, &predicted);
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 2.0); // Generous error due to larger prediction window.
@@ -148,13 +154,13 @@ TEST(PredictMarkerPosition, FourFrameGap) {
TEST(PredictMarkerPosition, MultipleGaps) {
Tracks tracks;
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
// AddMarker(3, 4.0, 15.0, &tracks); // Note the 3-frame gap.
// AddMarker(4, 5.0, 20.0, &tracks);
// AddMarker(5, 6.0, 25.0, &tracks);
AddMarker(6, 7.0, 30.0, &tracks); // Intermediate measurement.
AddMarker(6, 7.0, 30.0, &tracks); // Intermediate measurement.
// AddMarker(7, 8.0, 35.0, &tracks);
Marker predicted;
@@ -162,7 +168,7 @@ TEST(PredictMarkerPosition, MultipleGaps) {
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, PredictDirection::AUTO, &predicted);
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 1.0); // Generous error due to larger prediction window.
@@ -172,21 +178,21 @@ TEST(PredictMarkerPosition, MarkersInRandomOrder) {
Tracks tracks;
// This is the same as the easy, except that the tracks are randomly ordered.
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(7, 8.0, 35.0, &tracks);
AddMarker(5, 6.0, 25.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(6, 7.0, 30.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(1, 2.0, 5.0, &tracks);
Marker predicted;
predicted.clip = 0;
predicted.track = 0;
predicted.frame = 8;
PredictMarkerPosition(tracks, PredictDirection::AUTO, &predicted);
PredictMarkerPosition(tracks, &predicted);
double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
LG << "Got error: " << error;
EXPECT_LT(error, 0.1);

View File

@@ -27,7 +27,7 @@
namespace mv {
template <typename T, int D>
template<typename T, int D>
struct Quad {
// A quad is 4 points; generally in 2D or 3D.
//
@@ -35,7 +35,7 @@ struct Quad {
// |\.
// | \.
// | z (z goes into screen)
// |
// |
// | r0----->r1
// | ^ |
// | | . |
@@ -44,7 +44,7 @@ struct Quad {
// | \.
// | \.
// v normal goes away (right handed).
// y
// y
//
// Each row is one of the corners coordinates; either (x, y) or (x, y, z).
Eigen::Matrix<T, 4, D> coordinates;

View File

@@ -57,17 +57,17 @@ class Reconstruction {
public:
// All methods copy their input reference or take ownership of the pointer.
void AddCameraPose(const CameraPose& pose);
int AddCameraIntrinsics(CameraIntrinsics* intrinsics);
int AddPoint(const Point& point);
int AddModel(Model* model);
int AddCameraIntrinsics(CameraIntrinsics* intrinsics);
int AddPoint(const Point& point);
int AddModel(Model* model);
// Returns the corresponding pose or point or NULL if missing.
CameraPose* CameraPoseForFrame(int clip, int frame);
CameraPose* CameraPoseForFrame(int clip, int frame);
const CameraPose* CameraPoseForFrame(int clip, int frame) const;
Point* PointForTrack(int track);
Point* PointForTrack(int track);
const Point* PointForTrack(int track) const;
const vector<vector<CameraPose>>& camera_poses() const {
const vector<vector<CameraPose> >& camera_poses() const {
return camera_poses_;
}

View File

@@ -46,7 +46,7 @@ struct Region {
Vec2f min;
Vec2f max;
template <typename T>
template<typename T>
void Offset(const T& offset) {
min += offset.template cast<float>();
max += offset.template cast<float>();

View File

@@ -23,8 +23,8 @@
#include "libmv/autotrack/tracks.h"
#include <algorithm>
#include <iterator>
#include <vector>
#include <iterator>
#include "libmv/numeric/numeric.h"
@@ -34,12 +34,12 @@ Tracks::Tracks(const Tracks& other) {
markers_ = other.markers_;
}
Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {
}
Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {}
bool Tracks::GetMarker(int clip, int frame, int track, Marker* marker) const {
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip && markers_[i].frame == frame &&
if (markers_[i].clip == clip &&
markers_[i].frame == frame &&
markers_[i].track == track) {
*marker = markers_[i];
return true;
@@ -60,7 +60,8 @@ void Tracks::GetMarkersForTrackInClip(int clip,
int track,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
if (clip == markers_[i].clip && track == markers_[i].track) {
if (clip == markers_[i].clip &&
track == markers_[i].track) {
markers->push_back(markers_[i]);
}
}
@@ -70,16 +71,15 @@ void Tracks::GetMarkersInFrame(int clip,
int frame,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip && markers_[i].frame == frame) {
if (markers_[i].clip == clip &&
markers_[i].frame == frame) {
markers->push_back(markers_[i]);
}
}
}
void Tracks::GetMarkersForTracksInBothImages(int clip1,
int frame1,
int clip2,
int frame2,
void Tracks::GetMarkersForTracksInBothImages(int clip1, int frame1,
int clip2, int frame2,
vector<Marker>* markers) const {
std::vector<int> image1_tracks;
std::vector<int> image2_tracks;
@@ -99,19 +99,20 @@ void Tracks::GetMarkersForTracksInBothImages(int clip1,
std::sort(image1_tracks.begin(), image1_tracks.end());
std::sort(image2_tracks.begin(), image2_tracks.end());
std::vector<int> intersection;
std::set_intersection(image1_tracks.begin(),
image1_tracks.end(),
image2_tracks.begin(),
image2_tracks.end(),
std::set_intersection(image1_tracks.begin(), image1_tracks.end(),
image2_tracks.begin(), image2_tracks.end(),
std::back_inserter(intersection));
// Scan through and get the relevant tracks from the two images.
for (int i = 0; i < markers_.size(); ++i) {
// Save markers that are in either frame and are in our candidate set.
if (((markers_[i].clip == clip1 && markers_[i].frame == frame1) ||
(markers_[i].clip == clip2 && markers_[i].frame == frame2)) &&
std::binary_search(
intersection.begin(), intersection.end(), markers_[i].track)) {
if (((markers_[i].clip == clip1 &&
markers_[i].frame == frame1) ||
(markers_[i].clip == clip2 &&
markers_[i].frame == frame2)) &&
std::binary_search(intersection.begin(),
intersection.end(),
markers_[i].track)) {
markers->push_back(markers_[i]);
}
}
@@ -121,7 +122,8 @@ void Tracks::AddMarker(const Marker& marker) {
// TODO(keir): This is quadratic for repeated insertions. Fix this by adding
// a smarter data structure like a set<>.
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == marker.clip && markers_[i].frame == marker.frame &&
if (markers_[i].clip == marker.clip &&
markers_[i].frame == marker.frame &&
markers_[i].track == marker.track) {
markers_[i] = marker;
return;
@@ -137,7 +139,8 @@ void Tracks::SetMarkers(vector<Marker>* markers) {
bool Tracks::RemoveMarker(int clip, int frame, int track) {
int size = markers_.size();
for (int i = 0; i < markers_.size(); ++i) {
if (markers_[i].clip == clip && markers_[i].frame == frame &&
if (markers_[i].clip == clip &&
markers_[i].frame == frame &&
markers_[i].track == track) {
markers_[i] = markers_[size - 1];
markers_.resize(size - 1);

View File

@@ -23,8 +23,8 @@
#ifndef LIBMV_AUTOTRACK_TRACKS_H_
#define LIBMV_AUTOTRACK_TRACKS_H_
#include "libmv/autotrack/marker.h"
#include "libmv/base/vector.h"
#include "libmv/autotrack/marker.h"
namespace mv {
@@ -33,8 +33,8 @@ using libmv::vector;
// The Tracks container stores correspondences between frames.
class Tracks {
public:
Tracks() {}
Tracks(const Tracks& other);
Tracks() { }
Tracks(const Tracks &other);
// Create a tracks object with markers already initialized. Copies markers.
explicit Tracks(const vector<Marker>& markers);
@@ -51,10 +51,8 @@ class Tracks {
//
// This is not the same as the union of the markers in frame1 and
// frame2; each marker is for a track that appears in both images.
void GetMarkersForTracksInBothImages(int clip1,
int frame1,
int clip2,
int frame2,
void GetMarkersForTracksInBothImages(int clip1, int frame1,
int clip2, int frame2,
vector<Marker>* markers) const;
void AddMarker(const Marker& marker);

View File

@@ -22,8 +22,8 @@
#include "libmv/autotrack/tracks.h"
#include "libmv/logging/logging.h"
#include "testing/testing.h"
#include "libmv/logging/logging.h"
namespace mv {

View File

@@ -41,11 +41,11 @@
namespace libmv {
void* aligned_malloc(int size, int alignment) {
void *aligned_malloc(int size, int alignment) {
#ifdef _WIN32
return _aligned_malloc(size, alignment);
#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__APPLE__)
void* result;
void *result;
if (posix_memalign(&result, alignment, size)) {
// non-zero means allocation error
@@ -58,7 +58,7 @@ void* aligned_malloc(int size, int alignment) {
#endif
}
void aligned_free(void* ptr) {
void aligned_free(void *ptr) {
#ifdef _WIN32
_aligned_free(ptr);
#else

View File

@@ -24,10 +24,10 @@
namespace libmv {
// Allocate block of size bytes at least aligned to a given value.
void* aligned_malloc(int size, int alignment);
void *aligned_malloc(int size, int alignment);
// Free memory allocated by aligned_malloc.
void aligned_free(void* ptr);
void aligned_free(void *ptr);
} // namespace libmv

View File

@@ -28,7 +28,6 @@ class IdGenerator {
public:
IdGenerator() : next_(0) {}
ID Generate() { return next_++; }
private:
ID next_;
};

View File

@@ -26,8 +26,8 @@
namespace libmv {
using std::make_pair;
using std::map;
using std::make_pair;
} // namespace libmv

View File

@@ -30,44 +30,44 @@ namespace libmv {
* A handle for a heap-allocated resource that should be freed when it goes out
* of scope. This looks similar to the one found in TR1.
*/
template <typename T>
template<typename T>
class scoped_ptr {
public:
scoped_ptr(T* resource) : resource_(resource) {}
scoped_ptr(T *resource) : resource_(resource) {}
~scoped_ptr() { reset(0); }
T* get() const { return resource_; }
T* operator->() const { return resource_; }
T& operator*() const { return *resource_; }
T *get() const { return resource_; }
T *operator->() const { return resource_; }
T &operator*() const { return *resource_; }
void reset(T* new_resource) {
void reset(T *new_resource) {
if (sizeof(T)) {
delete resource_;
}
resource_ = new_resource;
}
T* release() {
T* released_resource = resource_;
T *release() {
T *released_resource = resource_;
resource_ = 0;
return released_resource;
}
private:
// No copying allowed.
T* resource_;
T *resource_;
};
// Same as scoped_ptr but caller must allocate the data
// with new[] and the destructor will free the memory
// using delete[].
template <typename T>
template<typename T>
class scoped_array {
public:
scoped_array(T* array) : array_(array) {}
scoped_array(T *array) : array_(array) {}
~scoped_array() { reset(NULL); }
T* get() const { return array_; }
T *get() const { return array_; }
T& operator[](std::ptrdiff_t i) const {
assert(i >= 0);
@@ -75,27 +75,25 @@ class scoped_array {
return array_[i];
}
void reset(T* new_array) {
void reset(T *new_array) {
if (sizeof(T)) {
delete array_;
}
array_ = new_array;
}
T* release() {
T* released_array = array_;
T *release() {
T *released_array = array_;
array_ = NULL;
return released_array;
}
private:
T* array_;
T *array_;
// Forbid comparison of different scoped_array types.
template <typename T2>
bool operator==(scoped_array<T2> const& p2) const;
template <typename T2>
bool operator!=(scoped_array<T2> const& p2) const;
template <typename T2> bool operator==(scoped_array<T2> const& p2) const;
template <typename T2> bool operator!=(scoped_array<T2> const& p2) const;
// Disallow evil constructors
scoped_array(const scoped_array&);

View File

@@ -25,9 +25,9 @@ namespace libmv {
namespace {
struct FreeMe {
FreeMe(int* freed) : freed(freed) {}
FreeMe(int *freed) : freed(freed) {}
~FreeMe() { (*freed)++; }
int* freed;
int *freed;
};
TEST(ScopedPtr, NullDoesNothing) {
@@ -61,8 +61,8 @@ TEST(ScopedPtr, Reset) {
TEST(ScopedPtr, ReleaseAndGet) {
int frees = 0;
FreeMe* allocated = new FreeMe(&frees);
FreeMe* released = NULL;
FreeMe *allocated = new FreeMe(&frees);
FreeMe *released = NULL;
{
scoped_ptr<FreeMe> scoped(allocated);
EXPECT_EQ(0, frees);

View File

@@ -19,9 +19,9 @@
// IN THE SOFTWARE.
#include "libmv/base/vector.h"
#include <algorithm>
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
#include <algorithm>
namespace {
using namespace libmv;
@@ -62,7 +62,7 @@ int foo_destruct_calls = 0;
struct Foo {
public:
Foo() : value(5) { foo_construct_calls++; }
~Foo() { foo_destruct_calls++; }
~Foo() { foo_destruct_calls++; }
int value;
};
@@ -150,7 +150,7 @@ TEST_F(VectorTest, CopyConstructor) {
a.push_back(3);
vector<int> b(a);
EXPECT_EQ(a.size(), b.size());
EXPECT_EQ(a.size(), b.size());
for (int i = 0; i < a.size(); ++i) {
EXPECT_EQ(a[i], b[i]);
}
@@ -164,7 +164,7 @@ TEST_F(VectorTest, OperatorEquals) {
b = a;
EXPECT_EQ(a.size(), b.size());
EXPECT_EQ(a.size(), b.size());
for (int i = 0; i < a.size(); ++i) {
EXPECT_EQ(a[i], b[i]);
}

View File

@@ -18,13 +18,14 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#ifndef LIBMV_BASE_VECTOR_UTILS_H_
#define LIBMV_BASE_VECTOR_UTILS_H_
/// Delete the contents of a container.
template <class Array>
void DeleteElements(Array* array) {
for (int i = 0; i < array->size(); ++i) {
void DeleteElements(Array *array) {
for (int i = 0; i < array->size(); ++i) {
delete (*array)[i];
}
array->clear();

View File

@@ -18,17 +18,18 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
#include <cmath>
#include <iostream>
#include "libmv/image/image.h"
#include <iostream>
#include <cmath>
namespace libmv {
void FloatArrayToScaledByteArray(const Array3Df& float_array,
Array3Du* byte_array,
bool automatic_range_detection) {
void FloatArrayToScaledByteArray(const Array3Df &float_array,
Array3Du *byte_array,
bool automatic_range_detection
) {
byte_array->ResizeLike(float_array);
float minval = HUGE_VAL;
float minval = HUGE_VAL;
float maxval = -HUGE_VAL;
if (automatic_range_detection) {
for (int i = 0; i < float_array.Height(); ++i) {
@@ -53,8 +54,8 @@ void FloatArrayToScaledByteArray(const Array3Df& float_array,
}
}
void ByteArrayToScaledFloatArray(const Array3Du& byte_array,
Array3Df* float_array) {
void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
Array3Df *float_array) {
float_array->ResizeLike(byte_array);
for (int i = 0; i < byte_array.Height(); ++i) {
for (int j = 0; j < byte_array.Width(); ++j) {
@@ -65,10 +66,10 @@ void ByteArrayToScaledFloatArray(const Array3Du& byte_array,
}
}
void SplitChannels(const Array3Df& input,
Array3Df* channel0,
Array3Df* channel1,
Array3Df* channel2) {
void SplitChannels(const Array3Df &input,
Array3Df *channel0,
Array3Df *channel1,
Array3Df *channel2) {
assert(input.Depth() >= 3);
channel0->Resize(input.Height(), input.Width());
channel1->Resize(input.Height(), input.Width());
@@ -82,7 +83,7 @@ void SplitChannels(const Array3Df& input,
}
}
void PrintArray(const Array3Df& array) {
void PrintArray(const Array3Df &array) {
using namespace std;
printf("[\n");

View File

@@ -44,13 +44,13 @@ class ArrayND : public BaseArray {
ArrayND() : data_(NULL), own_data_(true) { Resize(Index(0)); }
/// Create an array with the specified shape.
ArrayND(const Index& shape) : data_(NULL), own_data_(true) { Resize(shape); }
ArrayND(const Index &shape) : data_(NULL), own_data_(true) { Resize(shape); }
/// Create an array with the specified shape.
ArrayND(int* shape) : data_(NULL), own_data_(true) { Resize(shape); }
ArrayND(int *shape) : data_(NULL), own_data_(true) { Resize(shape); }
/// Copy constructor.
ArrayND(const ArrayND<T, N>& b) : data_(NULL), own_data_(true) {
ArrayND(const ArrayND<T, N> &b) : data_(NULL), own_data_(true) {
ResizeLike(b);
std::memcpy(Data(), b.Data(), sizeof(T) * Size());
}
@@ -58,7 +58,7 @@ class ArrayND : public BaseArray {
ArrayND(int s0) : data_(NULL), own_data_(true) { Resize(s0); }
ArrayND(int s0, int s1) : data_(NULL), own_data_(true) { Resize(s0, s1); }
ArrayND(int s0, int s1, int s2) : data_(NULL), own_data_(true) {
Resize(s0, s1, s2);
Resize(s0, s1, s2);
}
ArrayND(T* data, int s0, int s1, int s2)
@@ -69,24 +69,28 @@ class ArrayND : public BaseArray {
/// Destructor deletes pixel data.
~ArrayND() {
if (own_data_) {
delete[] data_;
delete [] data_;
}
}
/// Assignation copies pixel data.
ArrayND& operator=(const ArrayND<T, N>& b) {
ArrayND &operator=(const ArrayND<T, N> &b) {
assert(this != &b);
ResizeLike(b);
std::memcpy(Data(), b.Data(), sizeof(T) * Size());
return *this;
}
const Index& Shapes() const { return shape_; }
const Index &Shapes() const {
return shape_;
}
const Index& Strides() const { return strides_; }
const Index &Strides() const {
return strides_;
}
/// Create an array of shape s.
void Resize(const Index& new_shape) {
void Resize(const Index &new_shape) {
if (data_ != NULL && shape_ == new_shape) {
// Don't bother realloacting if the shapes match.
return;
@@ -97,7 +101,7 @@ class ArrayND : public BaseArray {
strides_(i - 1) = strides_(i) * shape_(i);
}
if (own_data_) {
delete[] data_;
delete [] data_;
data_ = NULL;
if (Size() > 0) {
data_ = new T[Size()];
@@ -105,13 +109,15 @@ class ArrayND : public BaseArray {
}
}
template <typename D>
void ResizeLike(const ArrayND<D, N>& other) {
template<typename D>
void ResizeLike(const ArrayND<D, N> &other) {
Resize(other.Shape());
}
/// Resizes the array to shape s. All data is lost.
void Resize(const int* new_shape_array) { Resize(Index(new_shape_array)); }
void Resize(const int *new_shape_array) {
Resize(Index(new_shape_array));
}
/// Resize a 1D array to length s0.
void Resize(int s0) {
@@ -130,7 +136,9 @@ class ArrayND : public BaseArray {
}
// Match Eigen2's API.
void resize(int rows, int cols) { Resize(rows, cols); }
void resize(int rows, int cols) {
Resize(rows, cols);
}
/// Resize a 3D array to shape (s0,s1,s2).
void Resize(int s0, int s1, int s2) {
@@ -139,11 +147,11 @@ class ArrayND : public BaseArray {
Resize(shape);
}
template <typename D>
void CopyFrom(const ArrayND<D, N>& other) {
template<typename D>
void CopyFrom(const ArrayND<D, N> &other) {
ResizeLike(other);
T* data = Data();
const D* other_data = other.Data();
T *data = Data();
const D *other_data = other.Data();
for (int i = 0; i < Size(); ++i) {
data[i] = T(other_data[i]);
}
@@ -163,13 +171,19 @@ class ArrayND : public BaseArray {
}
/// Return a tuple containing the length of each axis.
const Index& Shape() const { return shape_; }
const Index &Shape() const {
return shape_;
}
/// Return the length of an axis.
int Shape(int axis) const { return shape_(axis); }
int Shape(int axis) const {
return shape_(axis);
}
/// Return the distance between neighboring elements along axis.
int Stride(int axis) const { return strides_(axis); }
int Stride(int axis) const {
return strides_(axis);
}
/// Return the number of elements of the array.
int Size() const {
@@ -180,16 +194,18 @@ class ArrayND : public BaseArray {
}
/// Return the total amount of memory used by the array.
int MemorySizeInBytes() const { return sizeof(*this) + Size() * sizeof(T); }
int MemorySizeInBytes() const {
return sizeof(*this) + Size() * sizeof(T);
}
/// Pointer to the first element of the array.
T* Data() { return data_; }
T *Data() { return data_; }
/// Constant pointer to the first element of the array.
const T* Data() const { return data_; }
const T *Data() const { return data_; }
/// Distance between the first element and the element at position index.
int Offset(const Index& index) const {
int Offset(const Index &index) const {
int offset = 0;
for (int i = 0; i < N; ++i)
offset += index(i) * Stride(i);
@@ -215,23 +231,25 @@ class ArrayND : public BaseArray {
}
/// Return a reference to the element at position index.
T& operator()(const Index& index) {
T &operator()(const Index &index) {
// TODO(pau) Boundary checking in debug mode.
return *(Data() + Offset(index));
return *( Data() + Offset(index) );
}
/// 1D specialization.
T& operator()(int i0) { return *(Data() + Offset(i0)); }
T &operator()(int i0) {
return *( Data() + Offset(i0) );
}
/// 2D specialization.
T& operator()(int i0, int i1) {
T &operator()(int i0, int i1) {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
return *(Data() + Offset(i0, i1));
}
/// 3D specialization.
T& operator()(int i0, int i1, int i2) {
T &operator()(int i0, int i1, int i2) {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
assert(0 <= i2 && i2 < Shape(2));
@@ -239,27 +257,29 @@ class ArrayND : public BaseArray {
}
/// Return a constant reference to the element at position index.
const T& operator()(const Index& index) const {
const T &operator()(const Index &index) const {
return *(Data() + Offset(index));
}
/// 1D specialization.
const T& operator()(int i0) const { return *(Data() + Offset(i0)); }
const T &operator()(int i0) const {
return *(Data() + Offset(i0));
}
/// 2D specialization.
const T& operator()(int i0, int i1) const {
const T &operator()(int i0, int i1) const {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
return *(Data() + Offset(i0, i1));
}
/// 3D specialization.
const T& operator()(int i0, int i1, int i2) const {
const T &operator()(int i0, int i1, int i2) const {
return *(Data() + Offset(i0, i1, i2));
}
/// True if index is inside array.
bool Contains(const Index& index) const {
bool Contains(const Index &index) const {
for (int i = 0; i < N; ++i)
if (index(i) < 0 || index(i) >= Shape(i))
return false;
@@ -267,24 +287,26 @@ class ArrayND : public BaseArray {
}
/// 1D specialization.
bool Contains(int i0) const { return 0 <= i0 && i0 < Shape(0); }
bool Contains(int i0) const {
return 0 <= i0 && i0 < Shape(0);
}
/// 2D specialization.
bool Contains(int i0, int i1) const {
return 0 <= i0 && i0 < Shape(0) && 0 <= i1 && i1 < Shape(1);
return 0 <= i0 && i0 < Shape(0)
&& 0 <= i1 && i1 < Shape(1);
}
/// 3D specialization.
bool Contains(int i0, int i1, int i2) const {
return 0 <= i0 && i0 < Shape(0) && 0 <= i1 && i1 < Shape(1) && 0 <= i2 &&
i2 < Shape(2);
return 0 <= i0 && i0 < Shape(0)
&& 0 <= i1 && i1 < Shape(1)
&& 0 <= i2 && i2 < Shape(2);
}
bool operator==(const ArrayND<T, N>& other) const {
if (shape_ != other.shape_)
return false;
if (strides_ != other.strides_)
return false;
bool operator==(const ArrayND<T, N> &other) const {
if (shape_ != other.shape_) return false;
if (strides_ != other.strides_) return false;
for (int i = 0; i < Size(); ++i) {
if (this->Data()[i] != other.Data()[i])
return false;
@@ -292,11 +314,11 @@ class ArrayND : public BaseArray {
return true;
}
bool operator!=(const ArrayND<T, N>& other) const {
bool operator!=(const ArrayND<T, N> &other) const {
return !(*this == other);
}
ArrayND<T, N> operator*(const ArrayND<T, N>& other) const {
ArrayND<T, N> operator*(const ArrayND<T, N> &other) const {
assert(Shape() = other.Shape());
ArrayND<T, N> res;
res.ResizeLike(*this);
@@ -314,7 +336,7 @@ class ArrayND : public BaseArray {
Index strides_;
/// Pointer to the first element of the array.
T* data_;
T *data_;
/// Flag if this Array either own or reference the data
bool own_data_;
@@ -324,20 +346,30 @@ class ArrayND : public BaseArray {
template <typename T>
class Array3D : public ArrayND<T, 3> {
typedef ArrayND<T, 3> Base;
public:
Array3D() : Base() {}
Array3D(int height, int width, int depth = 1) : Base(height, width, depth) {}
Array3D()
: Base() {
}
Array3D(int height, int width, int depth = 1)
: Base(height, width, depth) {
}
Array3D(T* data, int height, int width, int depth = 1)
: Base(data, height, width, depth) {}
: Base(data, height, width, depth) {
}
void Resize(int height, int width, int depth = 1) {
Base::Resize(height, width, depth);
}
int Height() const { return Base::Shape(0); }
int Width() const { return Base::Shape(1); }
int Depth() const { return Base::Shape(2); }
int Height() const {
return Base::Shape(0);
}
int Width() const {
return Base::Shape(1);
}
int Depth() const {
return Base::Shape(2);
}
// Match Eigen2's API so that Array3D's and Mat*'s can work together via
// template magic.
@@ -345,15 +377,15 @@ class Array3D : public ArrayND<T, 3> {
int cols() const { return Width(); }
int depth() const { return Depth(); }
int Get_Step() const { return Width() * Depth(); }
int Get_Step() const { return Width()*Depth(); }
/// Enable accessing with 2 indices for grayscale images.
T& operator()(int i0, int i1, int i2 = 0) {
T &operator()(int i0, int i1, int i2 = 0) {
assert(0 <= i0 && i0 < Height());
assert(0 <= i1 && i1 < Width());
return Base::operator()(i0, i1, i2);
}
const T& operator()(int i0, int i1, int i2 = 0) const {
const T &operator()(int i0, int i1, int i2 = 0) const {
assert(0 <= i0 && i0 < Height());
assert(0 <= i1 && i1 < Width());
return Base::operator()(i0, i1, i2);
@@ -366,29 +398,31 @@ typedef Array3D<int> Array3Di;
typedef Array3D<float> Array3Df;
typedef Array3D<short> Array3Ds;
void SplitChannels(const Array3Df& input,
Array3Df* channel0,
Array3Df* channel1,
Array3Df* channel2);
void SplitChannels(const Array3Df &input,
Array3Df *channel0,
Array3Df *channel1,
Array3Df *channel2);
void PrintArray(const Array3Df& array);
void PrintArray(const Array3Df &array);
/** Convert a float array into a byte array by scaling values by 255* (max-min).
* where max and min are automatically detected
* where max and min are automatically detected
* (if automatic_range_detection = true)
* \note and TODO this automatic detection only works when the image contains
* at least one pixel of both bounds.
**/
void FloatArrayToScaledByteArray(const Array3Df& float_array,
Array3Du* byte_array,
void FloatArrayToScaledByteArray(const Array3Df &float_array,
Array3Du *byte_array,
bool automatic_range_detection = false);
//! Convert a byte array into a float array by dividing values by 255.
void ByteArrayToScaledFloatArray(const Array3Du& byte_array,
Array3Df* float_array);
void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
Array3Df *float_array);
template <typename AArrayType, typename BArrayType, typename CArrayType>
void MultiplyElements(const AArrayType& a, const BArrayType& b, CArrayType* c) {
void MultiplyElements(const AArrayType &a,
const BArrayType &b,
CArrayType *c) {
// This function does an element-wise multiply between
// the two Arrays A and B, and stores the result in C.
// A and B must have the same dimensions.
@@ -401,7 +435,7 @@ void MultiplyElements(const AArrayType& a, const BArrayType& b, CArrayType* c) {
// The index starts at the maximum value for each dimension
const typename CArrayType::Index& cShape = c->Shape();
for (int i = 0; i < CArrayType::Index::SIZE; ++i)
for ( int i = 0; i < CArrayType::Index::SIZE; ++i )
index(i) = cShape(i) - 1;
// After each multiplication, the highest-dimensional index is reduced.
@@ -409,12 +443,12 @@ void MultiplyElements(const AArrayType& a, const BArrayType& b, CArrayType* c) {
// and decrements the index of the next lower dimension.
// This ripple-action continues until the entire new array has been
// calculated, indicated by dimension zero having a negative index.
while (index(0) >= 0) {
while ( index(0) >= 0 ) {
(*c)(index) = a(index) * b(index);
int dimension = CArrayType::Index::SIZE - 1;
index(dimension) = index(dimension) - 1;
while (dimension > 0 && index(dimension) < 0) {
while ( dimension > 0 && index(dimension) < 0 ) {
index(dimension) = cShape(dimension) - 1;
index(dimension - 1) = index(dimension - 1) - 1;
--dimension;
@@ -423,9 +457,9 @@ void MultiplyElements(const AArrayType& a, const BArrayType& b, CArrayType* c) {
}
template <typename TA, typename TB, typename TC>
void MultiplyElements(const ArrayND<TA, 3>& a,
const ArrayND<TB, 3>& b,
ArrayND<TC, 3>* c) {
void MultiplyElements(const ArrayND<TA, 3> &a,
const ArrayND<TB, 3> &b,
ArrayND<TC, 3> *c) {
// Specialization for N==3
c->ResizeLike(a);
assert(a.Shape(0) == b.Shape(0));
@@ -441,9 +475,9 @@ void MultiplyElements(const ArrayND<TA, 3>& a,
}
template <typename TA, typename TB, typename TC>
void MultiplyElements(const Array3D<TA>& a,
const Array3D<TB>& b,
Array3D<TC>* c) {
void MultiplyElements(const Array3D<TA> &a,
const Array3D<TB> &b,
Array3D<TC> *c) {
// Specialization for N==3
c->ResizeLike(a);
assert(a.Shape(0) == b.Shape(0));

View File

@@ -21,9 +21,9 @@
#include "libmv/image/array_nd.h"
#include "testing/testing.h"
using libmv::ArrayND;
using libmv::Array3D;
using libmv::Array3Df;
using libmv::ArrayND;
namespace {
@@ -100,7 +100,7 @@ TEST(ArrayND, Size) {
int l[] = {0, 1, 2};
ArrayND<int, 3>::Index last(l);
EXPECT_EQ(a.Size(), a.Offset(last) + 1);
EXPECT_EQ(a.Size(), a.Offset(last)+1);
EXPECT_TRUE(a.Contains(last));
EXPECT_FALSE(a.Contains(shape));
}
@@ -120,8 +120,8 @@ TEST(ArrayND, Parenthesis) {
int s[] = {3, 3};
ArrayND<int, 2> a(s);
*(a.Data() + 0) = 0;
*(a.Data() + 5) = 5;
*(a.Data()+0) = 0;
*(a.Data()+5) = 5;
int i1[] = {0, 0};
EXPECT_EQ(0, a(Index(i1)));
@@ -210,7 +210,7 @@ TEST(ArrayND, MultiplyElements) {
b(1, 1, 0) = 3;
ArrayND<int, 3> c;
MultiplyElements(a, b, &c);
EXPECT_FLOAT_EQ(6, c(0, 0, 0));
EXPECT_FLOAT_EQ(6, c(0, 0, 0));
EXPECT_FLOAT_EQ(10, c(0, 1, 0));
EXPECT_FLOAT_EQ(12, c(1, 0, 0));
EXPECT_FLOAT_EQ(12, c(1, 1, 0));

View File

@@ -29,7 +29,7 @@ namespace libmv {
// Compute a Gaussian kernel and derivative, such that you can take the
// derivative of an image by convolving with the kernel horizontally then the
// derivative vertically to get (eg) the y derivative.
void ComputeGaussianKernel(double sigma, Vec* kernel, Vec* derivative) {
void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
assert(sigma >= 0.0);
// 0.004 implies a 3 pixel kernel with 1 pixel sigma.
@@ -37,7 +37,7 @@ void ComputeGaussianKernel(double sigma, Vec* kernel, Vec* derivative) {
// Calculate the kernel size based on sigma such that it is odd.
float precisehalfwidth = GaussianInversePositive(truncation_factor, sigma);
int width = lround(2 * precisehalfwidth);
int width = lround(2*precisehalfwidth);
if (width % 2 == 0) {
width++;
}
@@ -47,7 +47,7 @@ void ComputeGaussianKernel(double sigma, Vec* kernel, Vec* derivative) {
kernel->setZero();
derivative->setZero();
int halfwidth = width / 2;
for (int i = -halfwidth; i <= halfwidth; ++i) {
for (int i = -halfwidth; i <= halfwidth; ++i) {
(*kernel)(i + halfwidth) = Gaussian(i, sigma);
(*derivative)(i + halfwidth) = GaussianDerivative(i, sigma);
}
@@ -57,21 +57,16 @@ void ComputeGaussianKernel(double sigma, Vec* kernel, Vec* derivative) {
// Normalize the derivative differently. See
// www.cs.duke.edu/courses/spring03/cps296.1/handouts/Image%20Processing.pdf
double factor = 0.;
for (int i = -halfwidth; i <= halfwidth; ++i) {
factor -= i * (*derivative)(i + halfwidth);
for (int i = -halfwidth; i <= halfwidth; ++i) {
factor -= i*(*derivative)(i+halfwidth);
}
*derivative /= factor;
}
template <int size, bool vertical>
void FastConvolve(const Vec& kernel,
int width,
int height,
const float* src,
int src_stride,
int src_line_stride,
float* dst,
int dst_stride) {
void FastConvolve(const Vec &kernel, int width, int height,
const float* src, int src_stride, int src_line_stride,
float* dst, int dst_stride) {
double coefficients[2 * size + 1];
for (int k = 0; k < 2 * size + 1; ++k) {
coefficients[k] = kernel(2 * size - k);
@@ -98,14 +93,14 @@ void FastConvolve(const Vec& kernel,
}
}
template <bool vertical>
void Convolve(const Array3Df& in,
const Vec& kernel,
Array3Df* out_pointer,
template<bool vertical>
void Convolve(const Array3Df &in,
const Vec &kernel,
Array3Df *out_pointer,
int plane) {
int width = in.Width();
int height = in.Height();
Array3Df& out = *out_pointer;
Array3Df &out = *out_pointer;
if (plane == -1) {
out.ResizeLike(in);
plane = 0;
@@ -124,62 +119,61 @@ void Convolve(const Array3Df& in,
// fast path.
int half_width = kernel.size() / 2;
switch (half_width) {
#define static_convolution(size) \
case size: \
FastConvolve<size, vertical>(kernel, \
width, \
height, \
src, \
src_stride, \
src_line_stride, \
dst, \
dst_stride); \
break;
static_convolution(1) static_convolution(2) static_convolution(3)
static_convolution(4) static_convolution(5) static_convolution(6)
static_convolution(7)
#define static_convolution(size) case size: \
FastConvolve<size, vertical>(kernel, width, height, src, src_stride, \
src_line_stride, dst, dst_stride); break;
static_convolution(1)
static_convolution(2)
static_convolution(3)
static_convolution(4)
static_convolution(5)
static_convolution(6)
static_convolution(7)
#undef static_convolution
default : int dynamic_size = kernel.size() / 2;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double sum = 0;
// Slow path: this loop cannot be unrolled.
for (int k = -dynamic_size; k <= dynamic_size; ++k) {
if (vertical) {
if (y + k >= 0 && y + k < height) {
sum += src[k * src_line_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
}
} else {
if (x + k >= 0 && x + k < width) {
sum += src[k * src_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
default:
int dynamic_size = kernel.size() / 2;
for (int y = 0; y < height; ++y) {
for (int x = 0; x < width; ++x) {
double sum = 0;
// Slow path: this loop cannot be unrolled.
for (int k = -dynamic_size; k <= dynamic_size; ++k) {
if (vertical) {
if (y + k >= 0 && y + k < height) {
sum += src[k * src_line_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
}
} else {
if (x + k >= 0 && x + k < width) {
sum += src[k * src_stride] *
kernel(2 * dynamic_size - (k + dynamic_size));
}
}
}
dst[0] = static_cast<float>(sum);
src += src_stride;
dst += dst_stride;
}
dst[0] = static_cast<float>(sum);
src += src_stride;
dst += dst_stride;
}
}
}
}
void ConvolveHorizontal(const Array3Df& in,
const Vec& kernel,
Array3Df* out_pointer,
void ConvolveHorizontal(const Array3Df &in,
const Vec &kernel,
Array3Df *out_pointer,
int plane) {
Convolve<false>(in, kernel, out_pointer, plane);
}
void ConvolveVertical(const Array3Df& in,
const Vec& kernel,
Array3Df* out_pointer,
void ConvolveVertical(const Array3Df &in,
const Vec &kernel,
Array3Df *out_pointer,
int plane) {
Convolve<true>(in, kernel, out_pointer, plane);
}
void ConvolveGaussian(const Array3Df& in, double sigma, Array3Df* out_pointer) {
void ConvolveGaussian(const Array3Df &in,
double sigma,
Array3Df *out_pointer) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
@@ -188,10 +182,10 @@ void ConvolveGaussian(const Array3Df& in, double sigma, Array3Df* out_pointer) {
ConvolveHorizontal(tmp, kernel, out_pointer);
}
void ImageDerivatives(const Array3Df& in,
void ImageDerivatives(const Array3Df &in,
double sigma,
Array3Df* gradient_x,
Array3Df* gradient_y) {
Array3Df *gradient_x,
Array3Df *gradient_y) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
Array3Df tmp;
@@ -205,11 +199,11 @@ void ImageDerivatives(const Array3Df& in,
ConvolveVertical(tmp, derivative, gradient_y);
}
void BlurredImageAndDerivatives(const Array3Df& in,
void BlurredImageAndDerivatives(const Array3Df &in,
double sigma,
Array3Df* blurred_image,
Array3Df* gradient_x,
Array3Df* gradient_y) {
Array3Df *blurred_image,
Array3Df *gradient_x,
Array3Df *gradient_y) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
Array3Df tmp;
@@ -230,9 +224,9 @@ void BlurredImageAndDerivatives(const Array3Df& in,
// image, and store the results in three channels. Since the blurred value and
// gradients are closer in memory, this leads to better performance if all
// three values are needed at the same time.
void BlurredImageAndDerivativesChannels(const Array3Df& in,
void BlurredImageAndDerivativesChannels(const Array3Df &in,
double sigma,
Array3Df* blurred_and_gradxy) {
Array3Df *blurred_and_gradxy) {
assert(in.Depth() == 1);
Vec kernel, derivative;
@@ -252,10 +246,10 @@ void BlurredImageAndDerivativesChannels(const Array3Df& in,
ConvolveVertical(tmp, derivative, blurred_and_gradxy, 2);
}
void BoxFilterHorizontal(const Array3Df& in,
void BoxFilterHorizontal(const Array3Df &in,
int window_size,
Array3Df* out_pointer) {
Array3Df& out = *out_pointer;
Array3Df *out_pointer) {
Array3Df &out = *out_pointer;
out.ResizeLike(in);
int half_width = (window_size - 1) / 2;
@@ -272,7 +266,7 @@ void BoxFilterHorizontal(const Array3Df& in,
out(i, j, k) = sum;
}
// Fill interior.
for (int j = half_width + 1; j < in.Width() - half_width; ++j) {
for (int j = half_width + 1; j < in.Width()-half_width; ++j) {
sum -= in(i, j - half_width - 1, k);
sum += in(i, j + half_width, k);
out(i, j, k) = sum;
@@ -286,10 +280,10 @@ void BoxFilterHorizontal(const Array3Df& in,
}
}
void BoxFilterVertical(const Array3Df& in,
void BoxFilterVertical(const Array3Df &in,
int window_size,
Array3Df* out_pointer) {
Array3Df& out = *out_pointer;
Array3Df *out_pointer) {
Array3Df &out = *out_pointer;
out.ResizeLike(in);
int half_width = (window_size - 1) / 2;
@@ -306,7 +300,7 @@ void BoxFilterVertical(const Array3Df& in,
out(i, j, k) = sum;
}
// Fill interior.
for (int i = half_width + 1; i < in.Height() - half_width; ++i) {
for (int i = half_width + 1; i < in.Height()-half_width; ++i) {
sum -= in(i - half_width - 1, j, k);
sum += in(i + half_width, j, k);
out(i, j, k) = sum;
@@ -320,7 +314,9 @@ void BoxFilterVertical(const Array3Df& in,
}
}
void BoxFilter(const Array3Df& in, int box_width, Array3Df* out) {
void BoxFilter(const Array3Df &in,
int box_width,
Array3Df *out) {
Array3Df tmp;
BoxFilterHorizontal(in, box_width, &tmp);
BoxFilterVertical(tmp, box_width, out);
@@ -331,17 +327,17 @@ void LaplaceFilter(unsigned char* src,
int width,
int height,
int strength) {
for (int y = 1; y < height - 1; y++)
for (int x = 1; x < width - 1; x++) {
const unsigned char* s = &src[y * width + x];
int l = 128 + s[-width - 1] + s[-width] + s[-width + 1] + s[1] -
8 * s[0] + s[1] + s[width - 1] + s[width] + s[width + 1];
int d = ((256 - strength) * s[0] + strength * l) / 256;
if (d < 0)
d = 0;
if (d > 255)
d = 255;
dst[y * width + x] = d;
for (int y = 1; y < height-1; y++)
for (int x = 1; x < width-1; x++) {
const unsigned char* s = &src[y*width+x];
int l = 128 +
s[-width-1] + s[-width] + s[-width+1] +
s[1] - 8*s[0] + s[1] +
s[ width-1] + s[ width] + s[ width+1];
int d = ((256-strength)*s[0] + strength*l) / 256;
if (d < 0) d=0;
if (d > 255) d=255;
dst[y*width+x] = d;
}
}

View File

@@ -30,71 +30,70 @@ namespace libmv {
// Zero mean Gaussian.
inline double Gaussian(double x, double sigma) {
return 1 / sqrt(2 * M_PI * sigma * sigma) * exp(-(x * x / 2 / sigma / sigma));
return 1/sqrt(2*M_PI*sigma*sigma) * exp(-(x*x/2/sigma/sigma));
}
// 2D gaussian (zero mean)
// (9) in http://mathworld.wolfram.com/GaussianFunction.html
inline double Gaussian2D(double x, double y, double sigma) {
return 1.0 / (2.0 * M_PI * sigma * sigma) *
exp(-(x * x + y * y) / (2.0 * sigma * sigma));
return 1.0/(2.0*M_PI*sigma*sigma) * exp( -(x*x+y*y)/(2.0*sigma*sigma));
}
inline double GaussianDerivative(double x, double sigma) {
return -x / sigma / sigma * Gaussian(x, sigma);
}
// Solve the inverse of the Gaussian for positive x.
inline double GaussianInversePositive(double y, double sigma) {
return sqrt(-2 * sigma * sigma * log(y * sigma * sqrt(2 * M_PI)));
return sqrt(-2 * sigma * sigma * log(y * sigma * sqrt(2*M_PI)));
}
void ComputeGaussianKernel(double sigma, Vec* kernel, Vec* derivative);
void ConvolveHorizontal(const FloatImage& in,
const Vec& kernel,
FloatImage* out_pointer,
void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative);
void ConvolveHorizontal(const FloatImage &in,
const Vec &kernel,
FloatImage *out_pointer,
int plane = -1);
void ConvolveVertical(const FloatImage& in,
const Vec& kernel,
FloatImage* out_pointer,
void ConvolveVertical(const FloatImage &in,
const Vec &kernel,
FloatImage *out_pointer,
int plane = -1);
void ConvolveGaussian(const FloatImage& in,
void ConvolveGaussian(const FloatImage &in,
double sigma,
FloatImage* out_pointer);
FloatImage *out_pointer);
void ImageDerivatives(const FloatImage& in,
void ImageDerivatives(const FloatImage &in,
double sigma,
FloatImage* gradient_x,
FloatImage* gradient_y);
FloatImage *gradient_x,
FloatImage *gradient_y);
void BlurredImageAndDerivatives(const FloatImage& in,
void BlurredImageAndDerivatives(const FloatImage &in,
double sigma,
FloatImage* blurred_image,
FloatImage* gradient_x,
FloatImage* gradient_y);
FloatImage *blurred_image,
FloatImage *gradient_x,
FloatImage *gradient_y);
// Blur and take the gradients of an image, storing the results inside the
// three channels of blurred_and_gradxy.
void BlurredImageAndDerivativesChannels(const FloatImage& in,
void BlurredImageAndDerivativesChannels(const FloatImage &in,
double sigma,
FloatImage* blurred_and_gradxy);
FloatImage *blurred_and_gradxy);
void BoxFilterHorizontal(const FloatImage& in,
void BoxFilterHorizontal(const FloatImage &in,
int window_size,
FloatImage* out_pointer);
FloatImage *out_pointer);
void BoxFilterVertical(const FloatImage& in,
void BoxFilterVertical(const FloatImage &in,
int window_size,
FloatImage* out_pointer);
FloatImage *out_pointer);
void BoxFilter(const FloatImage& in, int box_width, FloatImage* out);
void BoxFilter(const FloatImage &in,
int box_width,
FloatImage *out);
/*!
Convolve \a src into \a dst with the discrete laplacian operator.
\a src and \a dst should be \a width x \a height images.
\a strength is an interpolation coefficient (0-256) between original image
and the laplacian.
\a strength is an interpolation coefficient (0-256) between original image and the laplacian.
\note Make sure the search region is filtered with the same strength as the
pattern.
\note Make sure the search region is filtered with the same strength as the pattern.
*/
void LaplaceFilter(unsigned char* src,
unsigned char* dst,
@@ -105,3 +104,4 @@ void LaplaceFilter(unsigned char* src,
} // namespace libmv
#endif // LIBMV_IMAGE_CONVOLVE_H_

View File

@@ -85,26 +85,26 @@ TEST(Convolve, BlurredImageAndDerivativesChannelsHorizontalSlope) {
FloatImage image(10, 10), blurred_and_derivatives;
for (int j = 0; j < 10; ++j) {
for (int i = 0; i < 10; ++i) {
image(j, i) = 2 * i;
image(j, i) = 2*i;
}
}
BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 2.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 2.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
}
TEST(Convolve, BlurredImageAndDerivativesChannelsVerticalSlope) {
FloatImage image(10, 10), blurred_and_derivatives;
for (int j = 0; j < 10; ++j) {
for (int i = 0; i < 10; ++i) {
image(j, i) = 2 * j;
image(j, i) = 2*j;
}
}
BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 2.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 2.0, 1e-7);
}
} // namespace

View File

@@ -21,14 +21,14 @@
#ifndef LIBMV_IMAGE_CORRELATION_H
#define LIBMV_IMAGE_CORRELATION_H
#include "libmv/image/image.h"
#include "libmv/logging/logging.h"
#include "libmv/image/image.h"
namespace libmv {
inline double PearsonProductMomentCorrelation(
const FloatImage& image_and_gradient1_sampled,
const FloatImage& image_and_gradient2_sampled) {
const FloatImage &image_and_gradient1_sampled,
const FloatImage &image_and_gradient2_sampled) {
assert(image_and_gradient1_sampled.Width() ==
image_and_gradient2_sampled.Width());
assert(image_and_gradient1_sampled.Height() ==
@@ -63,8 +63,9 @@ inline double PearsonProductMomentCorrelation(
double covariance_xy = sXY - sX * sY;
double correlation = covariance_xy / sqrt(var_x * var_y);
LG << "Covariance xy: " << covariance_xy << ", var 1: " << var_x
<< ", var 2: " << var_y << ", correlation: " << correlation;
LG << "Covariance xy: " << covariance_xy
<< ", var 1: " << var_x << ", var 2: " << var_y
<< ", correlation: " << correlation;
return correlation;
}

View File

@@ -39,11 +39,14 @@ typedef Array3Ds ShortImage;
// is the best solution after all.
class Image {
public:
// Create an image from an array. The image takes ownership of the array.
Image(Array3Du* array) : array_type_(BYTE), array_(array) {}
Image(Array3Df* array) : array_type_(FLOAT), array_(array) {}
Image(const Image& img) : array_type_(NONE), array_(NULL) { *this = img; }
// Create an image from an array. The image takes ownership of the array.
Image(Array3Du *array) : array_type_(BYTE), array_(array) {}
Image(Array3Df *array) : array_type_(FLOAT), array_(array) {}
Image(const Image &img): array_type_(NONE), array_(NULL) {
*this = img;
}
// Underlying data type.
enum DataType {
@@ -59,18 +62,20 @@ class Image {
int size;
switch (array_type_) {
case BYTE:
size = reinterpret_cast<Array3Du*>(array_)->MemorySizeInBytes();
break;
size = reinterpret_cast<Array3Du *>(array_)->MemorySizeInBytes();
break;
case FLOAT:
size = reinterpret_cast<Array3Df*>(array_)->MemorySizeInBytes();
break;
size = reinterpret_cast<Array3Df *>(array_)->MemorySizeInBytes();
break;
case INT:
size = reinterpret_cast<Array3Di*>(array_)->MemorySizeInBytes();
break;
size = reinterpret_cast<Array3Di *>(array_)->MemorySizeInBytes();
break;
case SHORT:
size = reinterpret_cast<Array3Ds*>(array_)->MemorySizeInBytes();
break;
default: size = 0; assert(0);
size = reinterpret_cast<Array3Ds *>(array_)->MemorySizeInBytes();
break;
default :
size = 0;
assert(0);
}
size += sizeof(*this);
return size;
@@ -78,57 +83,71 @@ class Image {
~Image() {
switch (array_type_) {
case BYTE: delete reinterpret_cast<Array3Du*>(array_); break;
case FLOAT: delete reinterpret_cast<Array3Df*>(array_); break;
case INT: delete reinterpret_cast<Array3Di*>(array_); break;
case SHORT: delete reinterpret_cast<Array3Ds*>(array_); break;
default: assert(0);
}
case BYTE:
delete reinterpret_cast<Array3Du *>(array_);
break;
case FLOAT:
delete reinterpret_cast<Array3Df *>(array_);
break;
case INT:
delete reinterpret_cast<Array3Di *>(array_);
break;
case SHORT:
delete reinterpret_cast<Array3Ds *>(array_);
break;
default:
assert(0);
}
}
Image& operator=(const Image& f) {
Image& operator= (const Image& f) {
if (this != &f) {
array_type_ = f.array_type_;
switch (array_type_) {
case BYTE:
delete reinterpret_cast<Array3Du*>(array_);
array_ = new Array3Du(*(Array3Du*)f.array_);
break;
delete reinterpret_cast<Array3Du *>(array_);
array_ = new Array3Du(*(Array3Du *)f.array_);
break;
case FLOAT:
delete reinterpret_cast<Array3Df*>(array_);
array_ = new Array3Df(*(Array3Df*)f.array_);
break;
delete reinterpret_cast<Array3Df *>(array_);
array_ = new Array3Df(*(Array3Df *)f.array_);
break;
case INT:
delete reinterpret_cast<Array3Di*>(array_);
array_ = new Array3Di(*(Array3Di*)f.array_);
break;
delete reinterpret_cast<Array3Di *>(array_);
array_ = new Array3Di(*(Array3Di *)f.array_);
break;
case SHORT:
delete reinterpret_cast<Array3Ds*>(array_);
array_ = new Array3Ds(*(Array3Ds*)f.array_);
break;
default: assert(0);
delete reinterpret_cast<Array3Ds *>(array_);
array_ = new Array3Ds(*(Array3Ds *)f.array_);
break;
default:
assert(0);
}
}
return *this;
}
Array3Du* AsArray3Du() const {
Array3Du *AsArray3Du() const {
if (array_type_ == BYTE) {
return reinterpret_cast<Array3Du*>(array_);
return reinterpret_cast<Array3Du *>(array_);
}
return NULL;
}
Array3Df* AsArray3Df() const {
Array3Df *AsArray3Df() const {
if (array_type_ == FLOAT) {
return reinterpret_cast<Array3Df*>(array_);
return reinterpret_cast<Array3Df *>(array_);
}
return NULL;
}
private:
DataType array_type_;
BaseArray* array_;
BaseArray *array_;
};
} // namespace libmv

View File

@@ -28,7 +28,7 @@ namespace libmv {
// The factor comes from http://www.easyrgb.com/
// RGB to XYZ : Y is the luminance channel
// var_R * 0.2126 + var_G * 0.7152 + var_B * 0.0722
template <typename T>
template<typename T>
inline T RGB2GRAY(const T r, const T g, const T b) {
return static_cast<T>(r * 0.2126 + g * 0.7152 + b * 0.0722);
}
@@ -42,8 +42,8 @@ inline unsigned char RGB2GRAY<unsigned char>(const unsigned char r,
return (unsigned char)(r * 0.2126 + g * 0.7152 + b * 0.0722 +0.5);
}*/
template <class ImageIn, class ImageOut>
void Rgb2Gray(const ImageIn& imaIn, ImageOut* imaOut) {
template<class ImageIn, class ImageOut>
void Rgb2Gray(const ImageIn &imaIn, ImageOut *imaOut) {
// It is all fine to cnvert RGBA image here as well,
// all the additional channels will be nicely ignored.
assert(imaIn.Depth() >= 3);
@@ -52,22 +52,21 @@ void Rgb2Gray(const ImageIn& imaIn, ImageOut* imaOut) {
// Convert each RGB pixel into Gray value (luminance)
for (int j = 0; j < imaIn.Height(); ++j) {
for (int i = 0; i < imaIn.Width(); ++i) {
(*imaOut)(j, i) =
RGB2GRAY(imaIn(j, i, 0), imaIn(j, i, 1), imaIn(j, i, 2));
for (int i = 0; i < imaIn.Width(); ++i) {
(*imaOut)(j, i) = RGB2GRAY(imaIn(j, i, 0) , imaIn(j, i, 1), imaIn(j, i, 2));
}
}
}
// Convert given float image to an unsigned char array of pixels.
template <class Image>
unsigned char* FloatImageToUCharArray(const Image& image) {
unsigned char* buffer =
template<class Image>
unsigned char *FloatImageToUCharArray(const Image &image) {
unsigned char *buffer =
new unsigned char[image.Width() * image.Height() * image.Depth()];
for (int y = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int d = 0; d < image.Depth(); d++) {
for (int x = 0; x < image.Width(); x++) {
for (int d = 0; d < image.Depth(); d++) {
int index = (y * image.Width() + x) * image.Depth() + d;
buffer[index] = 255.0 * image(y, x, d);
}

View File

@@ -34,9 +34,9 @@ namespace libmv {
/// Put the pixel in the image to the given color only if the point (xc,yc)
/// is inside the image.
template <class Image, class Color>
inline void safePutPixel(int yc, int xc, const Color& col, Image* pim) {
inline void safePutPixel(int yc, int xc, const Color & col, Image *pim) {
if (!pim)
return;
return;
if (pim->Contains(yc, xc)) {
(*pim)(yc, xc) = col;
}
@@ -45,9 +45,9 @@ inline void safePutPixel(int yc, int xc, const Color& col, Image* pim) {
/// is inside the image. This function support multi-channel color
/// \note The color pointer col must have size as the image depth
template <class Image, class Color>
inline void safePutPixel(int yc, int xc, const Color* col, Image* pim) {
inline void safePutPixel(int yc, int xc, const Color *col, Image *pim) {
if (!pim)
return;
return;
if (pim->Contains(yc, xc)) {
for (int i = 0; i < pim->Depth(); ++i)
(*pim)(yc, xc, i) = *(col + i);
@@ -59,23 +59,19 @@ inline void safePutPixel(int yc, int xc, const Color* col, Image* pim) {
// Add the rotation of the ellipse.
// As the algo. use symmetry we must use 4 rotations.
template <class Image, class Color>
void DrawEllipse(int xc,
int yc,
int radiusA,
int radiusB,
const Color& col,
Image* pim,
double angle = 0.0) {
void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
const Color &col, Image *pim, double angle = 0.0) {
int a = radiusA;
int b = radiusB;
// Counter Clockwise rotation matrix.
double matXY[4] = {cos(angle), sin(angle), -sin(angle), cos(angle)};
double matXY[4] = { cos(angle), sin(angle),
-sin(angle), cos(angle)};
int x, y;
double d1, d2;
x = 0;
y = b;
d1 = b * b - a * a * b + a * a / 4;
d1 = b*b - a*a*b + a*a/4;
float rotX = (matXY[0] * x + matXY[1] * y);
float rotY = (matXY[2] * x + matXY[3] * y);
@@ -90,12 +86,12 @@ void DrawEllipse(int xc,
rotY = (-matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
while (a * a * (y - .5) > b * b * (x + 1)) {
while (a*a*(y-.5) > b*b*(x+1)) {
if (d1 < 0) {
d1 += b * b * (2 * x + 3);
d1 += b*b*(2*x+3);
++x;
} else {
d1 += b * b * (2 * x + 3) + a * a * (-2 * y + 2);
d1 += b*b*(2*x+3) + a*a*(-2*y+2);
++x;
--y;
}
@@ -112,14 +108,14 @@ void DrawEllipse(int xc,
rotY = (-matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
}
d2 = b * b * (x + .5) * (x + .5) + a * a * (y - 1) * (y - 1) - a * a * b * b;
d2 = b*b*(x+.5)*(x+.5) + a*a*(y-1)*(y-1) - a*a*b*b;
while (y > 0) {
if (d2 < 0) {
d2 += b * b * (2 * x + 2) + a * a * (-2 * y + 3);
d2 += b*b*(2*x+2) + a*a*(-2*y+3);
--y;
++x;
} else {
d2 += a * a * (-2 * y + 3);
d2 += a*a*(-2*y+3);
--y;
}
rotX = (matXY[0] * x + matXY[1] * y);
@@ -141,23 +137,23 @@ void DrawEllipse(int xc,
// So it's better the use the Andres method.
// http://fr.wikipedia.org/wiki/Algorithme_de_tracé_de_cercle_d'Andres.
template <class Image, class Color>
void DrawCircle(int x, int y, int radius, const Color& col, Image* pim) {
Image& im = *pim;
if (im.Contains(y + radius, x + radius) ||
im.Contains(y + radius, x - radius) ||
im.Contains(y - radius, x + radius) ||
im.Contains(y - radius, x - radius)) {
void DrawCircle(int x, int y, int radius, const Color &col, Image *pim) {
Image &im = *pim;
if ( im.Contains(y + radius, x + radius)
|| im.Contains(y + radius, x - radius)
|| im.Contains(y - radius, x + radius)
|| im.Contains(y - radius, x - radius)) {
int x1 = 0;
int y1 = radius;
int d = radius - 1;
while (y1 >= x1) {
// Draw the point for each octant.
safePutPixel(y1 + y, x1 + x, col, pim);
safePutPixel(x1 + y, y1 + x, col, pim);
safePutPixel(y1 + y, -x1 + x, col, pim);
safePutPixel(x1 + y, -y1 + x, col, pim);
safePutPixel(-y1 + y, x1 + x, col, pim);
safePutPixel(-x1 + y, y1 + x, col, pim);
safePutPixel( y1 + y, x1 + x, col, pim);
safePutPixel( x1 + y, y1 + x, col, pim);
safePutPixel( y1 + y, -x1 + x, col, pim);
safePutPixel( x1 + y, -y1 + x, col, pim);
safePutPixel(-y1 + y, x1 + x, col, pim);
safePutPixel(-x1 + y, y1 + x, col, pim);
safePutPixel(-y1 + y, -x1 + x, col, pim);
safePutPixel(-x1 + y, -y1 + x, col, pim);
if (d >= 2 * x1) {
@@ -167,7 +163,7 @@ void DrawCircle(int x, int y, int radius, const Color& col, Image* pim) {
if (d <= 2 * (radius - y1)) {
d = d + 2 * y1 - 1;
y1 -= 1;
} else {
} else {
d = d + 2 * (y1 - x1 - 1);
y1 -= 1;
x1 += 1;
@@ -179,8 +175,8 @@ void DrawCircle(int x, int y, int radius, const Color& col, Image* pim) {
// Bresenham algorithm
template <class Image, class Color>
void DrawLine(int xa, int ya, int xb, int yb, const Color& col, Image* pim) {
Image& im = *pim;
void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
Image &im = *pim;
// If one point is outside the image
// Replace the outside point by the intersection of the line and
@@ -189,37 +185,35 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color& col, Image* pim) {
int width = pim->Width();
int height = pim->Height();
const bool xdir = xa < xb, ydir = ya < yb;
float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb, &xleft = xdir ? nx0 : nx1,
&yleft = xdir ? ny0 : ny1, &xright = xdir ? nx1 : nx0,
&yright = xdir ? ny1 : ny0, &xup = ydir ? nx0 : nx1,
&yup = ydir ? ny0 : ny1, &xdown = ydir ? nx1 : nx0,
&ydown = ydir ? ny1 : ny0;
float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb,
&xleft = xdir?nx0:nx1, &yleft = xdir?ny0:ny1,
&xright = xdir?nx1:nx0, &yright = xdir?ny1:ny0,
&xup = ydir?nx0:nx1, &yup = ydir?ny0:ny1,
&xdown = ydir?nx1:nx0, &ydown = ydir?ny1:ny0;
if (xright < 0 || xleft >= width)
return;
if (xright < 0 || xleft >= width) return;
if (xleft < 0) {
yleft -= xleft * (yright - yleft) / (xright - xleft);
xleft = 0;
yleft -= xleft*(yright - yleft)/(xright - xleft);
xleft = 0;
}
if (xright >= width) {
yright -= (xright - width) * (yright - yleft) / (xright - xleft);
xright = width - 1;
yright -= (xright - width)*(yright - yleft)/(xright - xleft);
xright = width - 1;
}
if (ydown < 0 || yup >= height)
return;
if (ydown < 0 || yup >= height) return;
if (yup < 0) {
xup -= yup * (xdown - xup) / (ydown - yup);
yup = 0;
xup -= yup*(xdown - xup)/(ydown - yup);
yup = 0;
}
if (ydown >= height) {
xdown -= (ydown - height) * (xdown - xup) / (ydown - yup);
ydown = height - 1;
xdown -= (ydown - height)*(xdown - xup)/(ydown - yup);
ydown = height - 1;
}
xa = (int)xleft;
xb = (int)xright;
ya = (int)yleft;
yb = (int)yright;
xa = (int) xleft;
xb = (int) xright;
ya = (int) yleft;
yb = (int) yright;
}
int xbas, xhaut, ybas, yhaut;
@@ -247,7 +241,7 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color& col, Image* pim) {
}
if (dy > 0) { // Positive slope will increment X.
incrmY = 1;
} else { // Negative slope.
} else { // Negative slope.
incrmY = -1;
}
if (dx >= dy) {
@@ -261,9 +255,9 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color& col, Image* pim) {
x += incrmX;
if (dp <= 0) { // Go in direction of the South Pixel.
dp += S;
} else { // Go to the North.
} else { // Go to the North.
dp += N;
y += incrmY;
y+=incrmY;
}
}
} else {
@@ -277,7 +271,7 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color& col, Image* pim) {
y += incrmY;
if (dp <= 0) { // Go in direction of the South Pixel.
dp += S;
} else { // Go to the North.
} else { // Go to the North.
dp += N;
x += incrmX;
}

View File

@@ -23,20 +23,20 @@
#include "libmv/image/image.h"
#include "testing/testing.h"
using libmv::Array3Df;
using libmv::Image;
using libmv::Array3Df;
namespace {
TEST(Image, SimpleImageAccessors) {
Array3Df* array = new Array3Df(2, 3);
Array3Df *array = new Array3Df(2, 3);
Image image(array);
EXPECT_EQ(array, image.AsArray3Df());
EXPECT_TRUE(NULL == image.AsArray3Du());
}
TEST(Image, MemorySizeInBytes) {
Array3Df* array = new Array3Df(2, 3);
Array3Df *array = new Array3Df(2, 3);
Image image(array);
int size = sizeof(image) + array->MemorySizeInBytes();
EXPECT_EQ(size, image.MemorySizeInBytes());

Some files were not shown because too many files have changed in this diff Show More