Compare commits
256 Commits
outliner-c
...
geometry-n
Author | SHA1 | Date | |
---|---|---|---|
83d7dd528d | |||
46e2f4761c | |||
b26cbb5d53 | |||
328aad8c98 | |||
c1d8df47f6 | |||
c48063d1f6 | |||
c645da98d8 | |||
13bcb000fa | |||
97d52daf96 | |||
f39fbb3e60 | |||
b78bebb2af | |||
![]() |
75af3165ca | ||
52e3608fe9 | |||
62c4d0419c | |||
8b815c7ce5 | |||
8b88739848 | |||
7db42b8f2a | |||
cb9f6dfebd | |||
ada79b4707 | |||
6ba6e97407 | |||
af013ff76f | |||
4913b624d8 | |||
64aa6c68d5 | |||
8fd27c152a | |||
979fe4f1be | |||
64ab084ca5 | |||
a3a6d6a670 | |||
4eac03d821 | |||
01db805a82 | |||
52a189936b | |||
fbffff26e4 | |||
96ce1e9a95 | |||
194a57fd63 | |||
a3efa1d798 | |||
6ac5e0b3e8 | |||
c2f76f1550 | |||
b6041bfc6c | |||
a3ce05d00e | |||
83d0810a9f | |||
9b54c81414 | |||
0633a89e18 | |||
e50553c61a | |||
b458ea6b23 | |||
056d7bb175 | |||
2abbcaa021 | |||
![]() |
9769cf1ee6 | ||
4a2734a835 | |||
0be88c7d15 | |||
9d28353b52 | |||
4c193f731e | |||
1ddd717803 | |||
0066e59f2d | |||
e9b955b99c | |||
50ccf346f0 | |||
7e210e68ba | |||
59910f7217 | |||
75ea4b8a1f | |||
7146e9696e | |||
a2e00236d0 | |||
a8f9a24939 | |||
4a3b26dd5e | |||
2e53b646f6 | |||
5035057281 | |||
9d172f007e | |||
12f394ece7 | |||
![]() |
4efd87d56b | ||
![]() |
b35b8c8849 | ||
![]() |
4dc5920525 | ||
![]() |
db7d8281c5 | ||
f00ebd4dba | |||
ccf8df66fe | |||
40b2ce5ea7 | |||
f3ab698951 | |||
ec7fffb033 | |||
4eb57d00bb | |||
2e08500d04 | |||
454b7876ff | |||
ac299bb453 | |||
9a73417337 | |||
cd49afc596 | |||
55e2930c18 | |||
956cf9a48d | |||
c27095b5cb | |||
![]() |
8ef8cb7e34 | ||
0f6bee8e54 | |||
3093f89498 | |||
b6988de22a | |||
d59fa12f2a | |||
a6c1c0427c | |||
ddc6a45a54 | |||
5c01ecd2bf | |||
c43283d10b | |||
f211030344 | |||
923b314a7a | |||
b277025d8e | |||
f17dfd575c | |||
94572a4e30 | |||
bc2230df71 | |||
b9bd47c2e2 | |||
dad228a19c | |||
987732181f | |||
d0c1d93b7e | |||
caa942b033 | |||
5dff952b67 | |||
5877e34eb4 | |||
770bcfac9b | |||
b081108819 | |||
2be7b2aaf9 | |||
912b38001f | |||
a8f1bea590 | |||
c4352f44bc | |||
0feca5f07d | |||
dbbfba9428 | |||
5db114ae0f | |||
2984fb2b49 | |||
9067cd64a5 | |||
9e6553c0d4 | |||
107a0894cc | |||
0d04bcd566 | |||
12dd26a2bb | |||
b4b4532ce0 | |||
8ecc1bea4c | |||
d706aaa53d | |||
beb1460f8e | |||
91ad33ef8f | |||
d7a2032846 | |||
eaf9ae643b | |||
bc090387ac | |||
fb4113defb | |||
88bb29dea6 | |||
08452d9956 | |||
cd2dfacfa5 | |||
![]() |
c08827e659 | ||
![]() |
f93081a01b | ||
89c8b074e7 | |||
934c2c8ac5 | |||
977b6ca305 | |||
f284a40385 | |||
fa81a42539 | |||
e00bb5a4b7 | |||
9e1e9516a0 | |||
![]() |
2ef2b3e0fd | ||
1043ec7991 | |||
b99faa0f56 | |||
88e6341ce8 | |||
40aa69e2eb | |||
c2b3a68f24 | |||
01318b3112 | |||
43b4570dcf | |||
44d8fafd7f | |||
ad0c387fdf | |||
5fb67573b5 | |||
dc9a52a303 | |||
cad2fd99e7 | |||
6b18e13c5b | |||
c9cc03b688 | |||
249e4df110 | |||
fa5190e742 | |||
74ed591f62 | |||
3d5efb4335 | |||
![]() |
140b7cfe0d | ||
af7cc3f8bb | |||
d2c4af9865 | |||
e5c637f5fe | |||
11d12d543d | |||
2c6114b238 | |||
152bd43ae1 | |||
24474a60b6 | |||
0bd0e08790 | |||
45012c3309 | |||
faac263d97 | |||
5177c35498 | |||
49c8042f29 | |||
![]() |
f8c52590c0 | ||
0b897e0308 | |||
97c0158cda | |||
c34438dd78 | |||
e0a4dc9396 | |||
97a9576959 | |||
51fa44522f | |||
343e13f464 | |||
1103809fab | |||
5b89d49b0d | |||
e805bfc768 | |||
67cb4fdbdc | |||
8ed74c9daf | |||
d7cb25e028 | |||
e3395093ff | |||
f73a420e5a | |||
b8b240b329 | |||
ae2827bb52 | |||
bb97284f8a | |||
209e82da45 | |||
e63d43e6b9 | |||
802ba35654 | |||
79c9c11b35 | |||
e1d0ab2dbc | |||
687f994251 | |||
5c7767e0e0 | |||
af9ee8e2bc | |||
562f2e604a | |||
84e81b8cc3 | |||
5ae9527770 | |||
082c17a2d2 | |||
09677d737c | |||
bec7248178 | |||
0103cb2051 | |||
9f38ebefcb | |||
8f934852f0 | |||
cb16db7803 | |||
f8965f12a1 | |||
7f2a20f89b | |||
5566818603 | |||
9a06c6a040 | |||
e097116072 | |||
b844caca6d | |||
046ac5fccd | |||
ef4fbba596 | |||
aa8360d0fa | |||
76c356c12e | |||
e04491073d | |||
a5dda5996e | |||
c48e4b7a44 | |||
4ae2d6206a | |||
47f5a635da | |||
eb8574bc8b | |||
bc4e31afb6 | |||
9d7672be71 | |||
994e7178bb | |||
1719743066 | |||
8910033f57 | |||
2a4c6c612a | |||
b062b922f9 | |||
895f4620a0 | |||
fafed6234b | |||
7ff8094a8b | |||
5aabf67a9c | |||
ab8c7fe946 | |||
a05012d500 | |||
97a93566e9 | |||
da4d697772 | |||
87218899be | |||
ffa0a6df9d | |||
706fa5ad76 | |||
b7f6de490d | |||
7e485b4620 | |||
a7dba81aab | |||
4606e83a75 | |||
1d28de57a4 | |||
3cfcfb938d | |||
bcdc6910a0 | |||
7793e8c884 | |||
05d9bd7c4a | |||
9255ce9247 | |||
a0ce0154e7 | |||
0cd7f7ddd1 |
@@ -1480,10 +1480,12 @@ if(CMAKE_COMPILER_IS_GNUCC)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_INT_IN_BOOL_CONTEXT -Wno-int-in-bool-context)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_FORMAT -Wno-format)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_SWITCH -Wno-switch)
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
|
||||
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_CLASS_MEMACCESS -Wno-class-memaccess)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_COMMENT -Wno-comment)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_TYPEDEFS -Wno-unused-local-typedefs)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCC AND (NOT "${CMAKE_C_COMPILER_VERSION}" VERSION_LESS "7.0"))
|
||||
ADD_CHECK_C_COMPILER_FLAG(C_REMOVE_STRICT_FLAGS C_WARN_NO_IMPLICIT_FALLTHROUGH -Wno-implicit-fallthrough)
|
||||
@@ -1538,6 +1540,7 @@ elseif(CMAKE_C_COMPILER_ID MATCHES "Clang")
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_CXX11_NARROWING -Wno-c++11-narrowing)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_NON_VIRTUAL_DTOR -Wno-non-virtual-dtor)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_MACROS -Wno-unused-macros)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_VARIABLE -Wno-unused-variable)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_REORDER -Wno-reorder)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_COMMENT -Wno-comment)
|
||||
ADD_CHECK_CXX_COMPILER_FLAG(CXX_REMOVE_STRICT_FLAGS CXX_WARN_NO_UNUSED_TYPEDEFS -Wno-unused-local-typedefs)
|
||||
|
@@ -18,12 +18,72 @@
|
||||
|
||||
# <pep8 compliant>
|
||||
|
||||
import dataclasses
|
||||
import json
|
||||
import os
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import codesign.util as util
|
||||
|
||||
|
||||
class ArchiveStateError(Exception):
|
||||
message: str
|
||||
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ArchiveState:
|
||||
"""
|
||||
Additional information (state) of the archive
|
||||
|
||||
Includes information like expected file size of the archive file in the case
|
||||
the archive file is expected to be successfully created.
|
||||
|
||||
If the archive can not be created, this state will contain error message
|
||||
indicating details of error.
|
||||
"""
|
||||
|
||||
# Size in bytes of the corresponding archive.
|
||||
file_size: Optional[int] = None
|
||||
|
||||
# Non-empty value indicates that error has happenned.
|
||||
error_message: str = ''
|
||||
|
||||
def has_error(self) -> bool:
|
||||
"""
|
||||
Check whether the archive is at error state
|
||||
"""
|
||||
|
||||
return self.error_message
|
||||
|
||||
def serialize_to_string(self) -> str:
|
||||
payload = dataclasses.asdict(self)
|
||||
return json.dumps(payload, sort_keys=True, indent=4)
|
||||
|
||||
def serialize_to_file(self, filepath: Path) -> None:
|
||||
string = self.serialize_to_string()
|
||||
filepath.write_text(string)
|
||||
|
||||
@classmethod
|
||||
def deserialize_from_string(cls, string: str) -> 'ArchiveState':
|
||||
try:
|
||||
object_as_dict = json.loads(string)
|
||||
except json.decoder.JSONDecodeError:
|
||||
raise ArchiveStateError('Error parsing JSON')
|
||||
|
||||
return cls(**object_as_dict)
|
||||
|
||||
@classmethod
|
||||
def deserialize_from_file(cls, filepath: Path):
|
||||
string = filepath.read_text()
|
||||
return cls.deserialize_from_string(string)
|
||||
|
||||
|
||||
class ArchiveWithIndicator:
|
||||
"""
|
||||
The idea of this class is to wrap around logic which takes care of keeping
|
||||
@@ -79,6 +139,19 @@ class ArchiveWithIndicator:
|
||||
if not self.ready_indicator_filepath.exists():
|
||||
return False
|
||||
|
||||
try:
|
||||
archive_state = ArchiveState.deserialize_from_file(
|
||||
self.ready_indicator_filepath)
|
||||
except ArchiveStateError as error:
|
||||
print(f'Error deserializing archive state: {error.message}')
|
||||
return False
|
||||
|
||||
if archive_state.has_error():
|
||||
# If the error did happen during codesign procedure there will be no
|
||||
# corresponding archive file.
|
||||
# The caller code will deal with the error check further.
|
||||
return True
|
||||
|
||||
# Sometimes on macOS indicator file appears prior to the actual archive
|
||||
# despite the order of creation and os.sync() used in tag_ready().
|
||||
# So consider archive not ready if there is an indicator without an
|
||||
@@ -88,23 +161,11 @@ class ArchiveWithIndicator:
|
||||
f'({self.archive_filepath}) to appear.')
|
||||
return False
|
||||
|
||||
# Read archive size from indicator/
|
||||
#
|
||||
# Assume that file is either empty or is fully written. This is being checked
|
||||
# by performing ValueError check since empty string will throw this exception
|
||||
# when attempted to be converted to int.
|
||||
expected_archive_size_str = self.ready_indicator_filepath.read_text()
|
||||
try:
|
||||
expected_archive_size = int(expected_archive_size_str)
|
||||
except ValueError:
|
||||
print(f'Invalid archive size "{expected_archive_size_str}"')
|
||||
return False
|
||||
|
||||
# Wait for until archive is fully stored.
|
||||
actual_archive_size = self.archive_filepath.stat().st_size
|
||||
if actual_archive_size != expected_archive_size:
|
||||
if actual_archive_size != archive_state.file_size:
|
||||
print('Partial/invalid archive size (expected '
|
||||
f'{expected_archive_size} got {actual_archive_size})')
|
||||
f'{archive_state.file_size} got {actual_archive_size})')
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -129,7 +190,7 @@ class ArchiveWithIndicator:
|
||||
print(f'Exception checking archive: {e}')
|
||||
return False
|
||||
|
||||
def tag_ready(self) -> None:
|
||||
def tag_ready(self, error_message='') -> None:
|
||||
"""
|
||||
Tag the archive as ready by creating the corresponding indication file.
|
||||
|
||||
@@ -138,13 +199,34 @@ class ArchiveWithIndicator:
|
||||
If it is violated, an assert will fail.
|
||||
"""
|
||||
assert not self.is_ready()
|
||||
|
||||
# Try the best to make sure everything is synced to the file system,
|
||||
# to avoid any possibility of stamp appearing on a network share prior to
|
||||
# an actual file.
|
||||
if util.get_current_platform() != util.Platform.WINDOWS:
|
||||
os.sync()
|
||||
archive_size = self.archive_filepath.stat().st_size
|
||||
self.ready_indicator_filepath.write_text(str(archive_size))
|
||||
|
||||
archive_size = -1
|
||||
if self.archive_filepath.exists():
|
||||
archive_size = self.archive_filepath.stat().st_size
|
||||
|
||||
archive_info = ArchiveState(
|
||||
file_size=archive_size, error_message=error_message)
|
||||
|
||||
self.ready_indicator_filepath.write_text(
|
||||
archive_info.serialize_to_string())
|
||||
|
||||
def get_state(self) -> ArchiveState:
|
||||
"""
|
||||
Get state object for this archive
|
||||
|
||||
The state is read from the corresponding state file.
|
||||
"""
|
||||
|
||||
try:
|
||||
return ArchiveState.deserialize_from_file(self.ready_indicator_filepath)
|
||||
except ArchiveStateError as error:
|
||||
return ArchiveState(error_message=f'Error in information format: {error}')
|
||||
|
||||
def clean(self) -> None:
|
||||
"""
|
||||
|
@@ -58,6 +58,7 @@ import codesign.util as util
|
||||
|
||||
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
|
||||
from codesign.archive_with_indicator import ArchiveWithIndicator
|
||||
from codesign.exception import CodeSignException
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -145,13 +146,13 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
def cleanup_environment_for_builder(self) -> None:
|
||||
# TODO(sergey): Revisit need of cleaning up the existing files.
|
||||
# In practice it wasn't so helpful, and with multiple clients
|
||||
# talking to the same server it becomes even mor etricky.
|
||||
# talking to the same server it becomes even more tricky.
|
||||
pass
|
||||
|
||||
def cleanup_environment_for_signing_server(self) -> None:
|
||||
# TODO(sergey): Revisit need of cleaning up the existing files.
|
||||
# In practice it wasn't so helpful, and with multiple clients
|
||||
# talking to the same server it becomes even mor etricky.
|
||||
# talking to the same server it becomes even more tricky.
|
||||
pass
|
||||
|
||||
def generate_request_id(self) -> str:
|
||||
@@ -220,9 +221,15 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
"""
|
||||
Wait until archive with signed files is available.
|
||||
|
||||
Will only return if the archive with signed files is available. If there
|
||||
was an error during code sign procedure the SystemExit exception is
|
||||
raised, with the message set to the error reported by the codesign
|
||||
server.
|
||||
|
||||
Will only wait for the configured time. If that time exceeds and there
|
||||
is still no responce from the signing server the application will exit
|
||||
with a non-zero exit code.
|
||||
|
||||
"""
|
||||
|
||||
signed_archive_info = self.signed_archive_info_for_request_id(
|
||||
@@ -236,9 +243,17 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
time.sleep(1)
|
||||
time_slept_in_seconds = time.monotonic() - time_start
|
||||
if time_slept_in_seconds > timeout_in_seconds:
|
||||
signed_archive_info.clean()
|
||||
unsigned_archive_info.clean()
|
||||
raise SystemExit("Signing server didn't finish signing in "
|
||||
f"{timeout_in_seconds} seconds, dying :(")
|
||||
f'{timeout_in_seconds} seconds, dying :(')
|
||||
|
||||
archive_state = signed_archive_info.get_state()
|
||||
if archive_state.has_error():
|
||||
signed_archive_info.clean()
|
||||
unsigned_archive_info.clean()
|
||||
raise SystemExit(
|
||||
f'Error happenned during codesign procedure: {archive_state.error_message}')
|
||||
|
||||
def copy_signed_files_to_directory(
|
||||
self, signed_dir: Path, destination_dir: Path) -> None:
|
||||
@@ -396,7 +411,13 @@ class BaseCodeSigner(metaclass=abc.ABCMeta):
|
||||
temp_dir)
|
||||
|
||||
logger_server.info('Signing all requested files...')
|
||||
self.sign_all_files(files)
|
||||
try:
|
||||
self.sign_all_files(files)
|
||||
except CodeSignException as error:
|
||||
signed_archive_info.tag_ready(error_message=error.message)
|
||||
unsigned_archive_info.clean()
|
||||
logger_server.info('Signing is complete with errors.')
|
||||
return
|
||||
|
||||
logger_server.info('Packing signed files...')
|
||||
pack_files(files=files,
|
||||
|
@@ -16,26 +16,11 @@
|
||||
#
|
||||
# ##### END GPL LICENSE BLOCK #####
|
||||
|
||||
import bpy
|
||||
# <pep8 compliant>
|
||||
|
||||
class CodeSignException(Exception):
|
||||
message: str
|
||||
|
||||
class NewSimulation(bpy.types.Operator):
|
||||
"""Create a new simulation data block and edit it in the opened simulation editor"""
|
||||
|
||||
bl_idname = "simulation.new"
|
||||
bl_label = "New Simulation"
|
||||
bl_options = {'REGISTER', 'UNDO'}
|
||||
|
||||
@classmethod
|
||||
def poll(cls, context):
|
||||
return context.area.type == 'NODE_EDITOR' and context.space_data.tree_type == 'SimulationNodeTree'
|
||||
|
||||
def execute(self, context):
|
||||
simulation = bpy.data.simulations.new("Simulation")
|
||||
context.space_data.simulation = simulation
|
||||
return {'FINISHED'}
|
||||
|
||||
|
||||
classes = (
|
||||
NewSimulation,
|
||||
)
|
||||
def __init__(self, message):
|
||||
self.message = message
|
||||
super().__init__(self.message)
|
@@ -33,6 +33,7 @@ from buildbot_utils import Builder
|
||||
|
||||
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
|
||||
from codesign.base_code_signer import BaseCodeSigner
|
||||
from codesign.exception import CodeSignException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger_server = logger.getChild('server')
|
||||
@@ -45,6 +46,10 @@ EXTENSIONS_TO_BE_SIGNED = {'.dylib', '.so', '.dmg'}
|
||||
NAME_PREFIXES_TO_BE_SIGNED = {'python'}
|
||||
|
||||
|
||||
class NotarizationException(CodeSignException):
|
||||
pass
|
||||
|
||||
|
||||
def is_file_from_bundle(file: AbsoluteAndRelativeFileName) -> bool:
|
||||
"""
|
||||
Check whether file is coming from an .app bundle
|
||||
@@ -186,7 +191,7 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
file.absolute_filepath]
|
||||
self.run_command_or_mock(command, util.Platform.MACOS)
|
||||
|
||||
def codesign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> bool:
|
||||
def codesign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
"""
|
||||
Run codesign tool on all eligible files in the given list.
|
||||
|
||||
@@ -225,8 +230,6 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
file_index + 1, num_signed_files,
|
||||
signed_file.relative_filepath)
|
||||
|
||||
return True
|
||||
|
||||
def codesign_bundles(
|
||||
self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
"""
|
||||
@@ -273,8 +276,6 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
|
||||
files.extend(extra_files)
|
||||
|
||||
return True
|
||||
|
||||
############################################################################
|
||||
# Notarization.
|
||||
|
||||
@@ -334,7 +335,40 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
logger_server.error('xcrun command did not report RequestUUID')
|
||||
return None
|
||||
|
||||
def notarize_wait_result(self, request_uuid: str) -> bool:
|
||||
def notarize_review_status(self, xcrun_output: str) -> bool:
|
||||
"""
|
||||
Review status returned by xcrun's notarization info
|
||||
|
||||
Returns truth if the notarization process has finished.
|
||||
If there are errors during notarization, a NotarizationException()
|
||||
exception is thrown with status message from the notarial office.
|
||||
"""
|
||||
|
||||
# Parse status and message
|
||||
status = xcrun_field_value_from_output('Status', xcrun_output)
|
||||
status_message = xcrun_field_value_from_output(
|
||||
'Status Message', xcrun_output)
|
||||
|
||||
if status == 'success':
|
||||
logger_server.info(
|
||||
'Package successfully notarized: %s', status_message)
|
||||
return True
|
||||
|
||||
if status == 'invalid':
|
||||
logger_server.error(xcrun_output)
|
||||
logger_server.error(
|
||||
'Package notarization has failed: %s', status_message)
|
||||
raise NotarizationException(status_message)
|
||||
|
||||
if status == 'in progress':
|
||||
return False
|
||||
|
||||
logger_server.info(
|
||||
'Unknown notarization status %s (%s)', status, status_message)
|
||||
|
||||
return False
|
||||
|
||||
def notarize_wait_result(self, request_uuid: str) -> None:
|
||||
"""
|
||||
Wait for until notarial office have a reply
|
||||
"""
|
||||
@@ -351,29 +385,11 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
timeout_in_seconds = self.config.MACOS_NOTARIZE_TIMEOUT_IN_SECONDS
|
||||
|
||||
while True:
|
||||
output = self.check_output_or_mock(
|
||||
xcrun_output = self.check_output_or_mock(
|
||||
command, util.Platform.MACOS, allow_nonzero_exit_code=True)
|
||||
# Parse status and message
|
||||
status = xcrun_field_value_from_output('Status', output)
|
||||
status_message = xcrun_field_value_from_output(
|
||||
'Status Message', output)
|
||||
|
||||
# Review status.
|
||||
if status:
|
||||
if status == 'success':
|
||||
logger_server.info(
|
||||
'Package successfully notarized: %s', status_message)
|
||||
return True
|
||||
elif status == 'invalid':
|
||||
logger_server.error(output)
|
||||
logger_server.error(
|
||||
'Package notarization has failed: %s', status_message)
|
||||
return False
|
||||
elif status == 'in progress':
|
||||
pass
|
||||
else:
|
||||
logger_server.info(
|
||||
'Unknown notarization status %s (%s)', status, status_message)
|
||||
if self.notarize_review_status(xcrun_output):
|
||||
break
|
||||
|
||||
logger_server.info('Keep waiting for notarization office.')
|
||||
time.sleep(30)
|
||||
@@ -394,8 +410,6 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
command = ['xcrun', 'stapler', 'staple', '-v', file.absolute_filepath]
|
||||
self.check_output_or_mock(command, util.Platform.MACOS)
|
||||
|
||||
return True
|
||||
|
||||
def notarize_dmg(self, file: AbsoluteAndRelativeFileName) -> bool:
|
||||
"""
|
||||
Run entire pipeline to get DMG notarized.
|
||||
@@ -414,10 +428,7 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
return False
|
||||
|
||||
# Staple.
|
||||
if not self.notarize_staple(file):
|
||||
return False
|
||||
|
||||
return True
|
||||
self.notarize_staple(file)
|
||||
|
||||
def notarize_all_dmg(
|
||||
self, files: List[AbsoluteAndRelativeFileName]) -> bool:
|
||||
@@ -432,10 +443,7 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
if not self.check_file_is_to_be_signed(file):
|
||||
continue
|
||||
|
||||
if not self.notarize_dmg(file):
|
||||
return False
|
||||
|
||||
return True
|
||||
self.notarize_dmg(file)
|
||||
|
||||
############################################################################
|
||||
# Entry point.
|
||||
@@ -443,11 +451,6 @@ class MacOSCodeSigner(BaseCodeSigner):
|
||||
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
# TODO(sergey): Handle errors somehow.
|
||||
|
||||
if not self.codesign_all_files(files):
|
||||
return
|
||||
|
||||
if not self.codesign_bundles(files):
|
||||
return
|
||||
|
||||
if not self.notarize_all_dmg(files):
|
||||
return
|
||||
self.codesign_all_files(files)
|
||||
self.codesign_bundles(files)
|
||||
self.notarize_all_dmg(files)
|
||||
|
@@ -29,6 +29,7 @@ from buildbot_utils import Builder
|
||||
|
||||
from codesign.absolute_and_relative_filename import AbsoluteAndRelativeFileName
|
||||
from codesign.base_code_signer import BaseCodeSigner
|
||||
from codesign.exception import CodeSignException
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger_server = logger.getChild('server')
|
||||
@@ -40,6 +41,9 @@ BLACKLIST_FILE_PREFIXES = (
|
||||
'api-ms-', 'concrt', 'msvcp', 'ucrtbase', 'vcomp', 'vcruntime')
|
||||
|
||||
|
||||
class SigntoolException(CodeSignException):
|
||||
pass
|
||||
|
||||
class WindowsCodeSigner(BaseCodeSigner):
|
||||
def check_file_is_to_be_signed(
|
||||
self, file: AbsoluteAndRelativeFileName) -> bool:
|
||||
@@ -50,12 +54,41 @@ class WindowsCodeSigner(BaseCodeSigner):
|
||||
|
||||
return file.relative_filepath.suffix in EXTENSIONS_TO_BE_SIGNED
|
||||
|
||||
|
||||
def get_sign_command_prefix(self) -> List[str]:
|
||||
return [
|
||||
'signtool', 'sign', '/v',
|
||||
'/f', self.config.WIN_CERTIFICATE_FILEPATH,
|
||||
'/tr', self.config.WIN_TIMESTAMP_AUTHORITY_URL]
|
||||
|
||||
|
||||
def run_codesign_tool(self, filepath: Path) -> None:
|
||||
command = self.get_sign_command_prefix() + [filepath]
|
||||
codesign_output = self.check_output_or_mock(command, util.Platform.WINDOWS)
|
||||
logger_server.info(f'signtool output:\n{codesign_output}')
|
||||
|
||||
got_number_of_success = False
|
||||
|
||||
for line in codesign_output.split('\n'):
|
||||
line_clean = line.strip()
|
||||
line_clean_lower = line_clean.lower()
|
||||
|
||||
if line_clean_lower.startswith('number of warnings') or \
|
||||
line_clean_lower.startswith('number of errors'):
|
||||
number = int(line_clean_lower.split(':')[1])
|
||||
if number != 0:
|
||||
raise SigntoolException('Non-clean success of signtool')
|
||||
|
||||
if line_clean_lower.startswith('number of files successfully signed'):
|
||||
got_number_of_success = True
|
||||
number = int(line_clean_lower.split(':')[1])
|
||||
if number != 1:
|
||||
raise SigntoolException('Signtool did not consider codesign a success')
|
||||
|
||||
if not got_number_of_success:
|
||||
raise SigntoolException('Signtool did not report number of files signed')
|
||||
|
||||
|
||||
def sign_all_files(self, files: List[AbsoluteAndRelativeFileName]) -> None:
|
||||
# NOTE: Sign files one by one to avoid possible command line length
|
||||
# overflow (which could happen if we ever decide to sign every binary
|
||||
@@ -73,12 +106,7 @@ class WindowsCodeSigner(BaseCodeSigner):
|
||||
file_index + 1, num_files, file.relative_filepath)
|
||||
continue
|
||||
|
||||
command = self.get_sign_command_prefix()
|
||||
command.append(file.absolute_filepath)
|
||||
logger_server.info(
|
||||
'Running signtool command for file [%d/%d] %s...',
|
||||
file_index + 1, num_files, file.relative_filepath)
|
||||
# TODO(sergey): Check the status somehow. With a missing certificate
|
||||
# the command still exists with a zero code.
|
||||
self.run_command_or_mock(command, util.Platform.WINDOWS)
|
||||
# TODO(sergey): Report number of signed and ignored files.
|
||||
self.run_codesign_tool(file.absolute_filepath)
|
||||
|
@@ -44,6 +44,7 @@ set(WITH_OPENMP ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB_BLOSC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_POTRACE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_PYTHON_INSTALL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_QUADRIFLOW ON CACHE BOOL "" FORCE)
|
||||
|
@@ -51,6 +51,7 @@ set(WITH_OPENIMAGEIO OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENMP OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_QUADRIFLOW OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_SDL OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_TBB OFF CACHE BOOL "" FORCE)
|
||||
|
@@ -45,6 +45,7 @@ set(WITH_OPENMP ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENSUBDIV ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB_BLOSC ON CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB ON CACHE BOOL "" FORCE)
|
||||
set(WITH_POTRACE ON CACHE BOOL "" FORCE)
|
||||
set(WITH_PYTHON_INSTALL ON CACHE BOOL "" FORCE)
|
||||
set(WITH_QUADRIFLOW ON CACHE BOOL "" FORCE)
|
||||
|
@@ -28,6 +28,7 @@ set(WITH_OPENCOLLADA OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_INTERNATIONAL OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_BULLET OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_OPENVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_NANOVDB OFF CACHE BOOL "" FORCE)
|
||||
set(WITH_ALEMBIC OFF CACHE BOOL "" FORCE)
|
||||
|
||||
# Depends on Python install, do this to quiet warning.
|
||||
|
1137
extern/ceres/ChangeLog
vendored
1137
extern/ceres/ChangeLog
vendored
File diff suppressed because it is too large
Load Diff
2
extern/ceres/README.blender
vendored
2
extern/ceres/README.blender
vendored
@@ -1,4 +1,4 @@
|
||||
Project: Ceres Solver
|
||||
URL: http://ceres-solver.org/
|
||||
Upstream version 1.11 (aef9c9563b08d5f39eee1576af133a84749d1b48)
|
||||
Upstream version 2.0.0
|
||||
Local modifications: None
|
||||
|
4
extern/ceres/bundle.sh
vendored
4
extern/ceres/bundle.sh
vendored
@@ -8,8 +8,8 @@ else
|
||||
fi
|
||||
|
||||
repo="https://ceres-solver.googlesource.com/ceres-solver"
|
||||
branch="master"
|
||||
tag=""
|
||||
#branch="master"
|
||||
tag="2.0.0"
|
||||
tmp=`mktemp -d`
|
||||
checkout="$tmp/ceres"
|
||||
|
||||
|
@@ -153,28 +153,44 @@ template <typename CostFunctor,
|
||||
int... Ns> // Number of parameters in each parameter block.
|
||||
class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
|
||||
public:
|
||||
// Takes ownership of functor. Uses the template-provided value for the
|
||||
// number of residuals ("kNumResiduals").
|
||||
explicit AutoDiffCostFunction(CostFunctor* functor) : functor_(functor) {
|
||||
// Takes ownership of functor by default. Uses the template-provided
|
||||
// value for the number of residuals ("kNumResiduals").
|
||||
explicit AutoDiffCostFunction(CostFunctor* functor,
|
||||
Ownership ownership = TAKE_OWNERSHIP)
|
||||
: functor_(functor), ownership_(ownership) {
|
||||
static_assert(kNumResiduals != DYNAMIC,
|
||||
"Can't run the fixed-size constructor if the number of "
|
||||
"residuals is set to ceres::DYNAMIC.");
|
||||
}
|
||||
|
||||
// Takes ownership of functor. Ignores the template-provided
|
||||
// Takes ownership of functor by default. Ignores the template-provided
|
||||
// kNumResiduals in favor of the "num_residuals" argument provided.
|
||||
//
|
||||
// This allows for having autodiff cost functions which return varying
|
||||
// numbers of residuals at runtime.
|
||||
AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
|
||||
: functor_(functor) {
|
||||
AutoDiffCostFunction(CostFunctor* functor,
|
||||
int num_residuals,
|
||||
Ownership ownership = TAKE_OWNERSHIP)
|
||||
: functor_(functor), ownership_(ownership) {
|
||||
static_assert(kNumResiduals == DYNAMIC,
|
||||
"Can't run the dynamic-size constructor if the number of "
|
||||
"residuals is not ceres::DYNAMIC.");
|
||||
SizedCostFunction<kNumResiduals, Ns...>::set_num_residuals(num_residuals);
|
||||
}
|
||||
|
||||
virtual ~AutoDiffCostFunction() {}
|
||||
explicit AutoDiffCostFunction(AutoDiffCostFunction&& other)
|
||||
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
|
||||
|
||||
virtual ~AutoDiffCostFunction() {
|
||||
// Manually release pointer if configured to not take ownership rather than
|
||||
// deleting only if ownership is taken.
|
||||
// This is to stay maximally compatible to old user code which may have
|
||||
// forgotten to implement a virtual destructor, from when the
|
||||
// AutoDiffCostFunction always took ownership.
|
||||
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
}
|
||||
}
|
||||
|
||||
// Implementation details follow; clients of the autodiff cost function should
|
||||
// not have to examine below here.
|
||||
@@ -201,6 +217,7 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
|
||||
|
||||
private:
|
||||
std::unique_ptr<CostFunctor> functor_;
|
||||
Ownership ownership_;
|
||||
};
|
||||
|
||||
} // namespace ceres
|
||||
|
2
extern/ceres/include/ceres/c_api.h
vendored
2
extern/ceres/include/ceres/c_api.h
vendored
@@ -38,8 +38,10 @@
|
||||
#ifndef CERES_PUBLIC_C_API_H_
|
||||
#define CERES_PUBLIC_C_API_H_
|
||||
|
||||
// clang-format off
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/internal/disable_warnings.h"
|
||||
// clang-format on
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@@ -144,8 +144,7 @@ class CostFunctionToFunctor {
|
||||
|
||||
// Extract parameter block pointers from params.
|
||||
using Indices =
|
||||
std::make_integer_sequence<int,
|
||||
ParameterDims::kNumParameterBlocks>;
|
||||
std::make_integer_sequence<int, ParameterDims::kNumParameterBlocks>;
|
||||
std::array<const T*, ParameterDims::kNumParameterBlocks> parameter_blocks =
|
||||
GetParameterPointers<T>(params, Indices());
|
||||
|
||||
|
10
extern/ceres/include/ceres/covariance.h
vendored
10
extern/ceres/include/ceres/covariance.h
vendored
@@ -51,7 +51,7 @@ class CovarianceImpl;
|
||||
// =======
|
||||
// It is very easy to use this class incorrectly without understanding
|
||||
// the underlying mathematics. Please read and understand the
|
||||
// documentation completely before attempting to use this class.
|
||||
// documentation completely before attempting to use it.
|
||||
//
|
||||
//
|
||||
// This class allows the user to evaluate the covariance for a
|
||||
@@ -73,7 +73,7 @@ class CovarianceImpl;
|
||||
// the maximum likelihood estimate of x given observations y is the
|
||||
// solution to the non-linear least squares problem:
|
||||
//
|
||||
// x* = arg min_x |f(x)|^2
|
||||
// x* = arg min_x |f(x) - y|^2
|
||||
//
|
||||
// And the covariance of x* is given by
|
||||
//
|
||||
@@ -220,11 +220,11 @@ class CERES_EXPORT Covariance {
|
||||
// 1. DENSE_SVD uses Eigen's JacobiSVD to perform the
|
||||
// computations. It computes the singular value decomposition
|
||||
//
|
||||
// U * S * V' = J
|
||||
// U * D * V' = J
|
||||
//
|
||||
// and then uses it to compute the pseudo inverse of J'J as
|
||||
//
|
||||
// pseudoinverse[J'J]^ = V * pseudoinverse[S] * V'
|
||||
// pseudoinverse[J'J] = V * pseudoinverse[D^2] * V'
|
||||
//
|
||||
// It is an accurate but slow method and should only be used
|
||||
// for small to moderate sized problems. It can handle
|
||||
@@ -235,7 +235,7 @@ class CERES_EXPORT Covariance {
|
||||
//
|
||||
// Q * R = J
|
||||
//
|
||||
// [J'J]^-1 = [R*R']^-1
|
||||
// [J'J]^-1 = [R'*R]^-1
|
||||
//
|
||||
// SPARSE_QR is not capable of computing the covariance if the
|
||||
// Jacobian is rank deficient. Depending on the value of
|
||||
|
@@ -40,6 +40,7 @@
|
||||
#include "ceres/dynamic_cost_function.h"
|
||||
#include "ceres/internal/fixed_array.h"
|
||||
#include "ceres/jet.h"
|
||||
#include "ceres/types.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -78,10 +79,24 @@ namespace ceres {
|
||||
template <typename CostFunctor, int Stride = 4>
|
||||
class DynamicAutoDiffCostFunction : public DynamicCostFunction {
|
||||
public:
|
||||
explicit DynamicAutoDiffCostFunction(CostFunctor* functor)
|
||||
: functor_(functor) {}
|
||||
// Takes ownership by default.
|
||||
DynamicAutoDiffCostFunction(CostFunctor* functor,
|
||||
Ownership ownership = TAKE_OWNERSHIP)
|
||||
: functor_(functor), ownership_(ownership) {}
|
||||
|
||||
virtual ~DynamicAutoDiffCostFunction() {}
|
||||
explicit DynamicAutoDiffCostFunction(DynamicAutoDiffCostFunction&& other)
|
||||
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
|
||||
|
||||
virtual ~DynamicAutoDiffCostFunction() {
|
||||
// Manually release pointer if configured to not take ownership
|
||||
// rather than deleting only if ownership is taken. This is to
|
||||
// stay maximally compatible to old user code which may have
|
||||
// forgotten to implement a virtual destructor, from when the
|
||||
// AutoDiffCostFunction always took ownership.
|
||||
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
}
|
||||
}
|
||||
|
||||
bool Evaluate(double const* const* parameters,
|
||||
double* residuals,
|
||||
@@ -151,6 +166,9 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
|
||||
}
|
||||
}
|
||||
|
||||
if (num_active_parameters == 0) {
|
||||
return (*functor_)(parameters, residuals);
|
||||
}
|
||||
// When `num_active_parameters % Stride != 0` then it can be the case
|
||||
// that `active_parameter_count < Stride` while parameter_cursor is less
|
||||
// than the total number of parameters and with no remaining non-constant
|
||||
@@ -248,6 +266,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
|
||||
|
||||
private:
|
||||
std::unique_ptr<CostFunctor> functor_;
|
||||
Ownership ownership_;
|
||||
};
|
||||
|
||||
} // namespace ceres
|
||||
|
@@ -44,6 +44,7 @@
|
||||
#include "ceres/internal/numeric_diff.h"
|
||||
#include "ceres/internal/parameter_dims.h"
|
||||
#include "ceres/numeric_diff_options.h"
|
||||
#include "ceres/types.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -84,6 +85,10 @@ class DynamicNumericDiffCostFunction : public DynamicCostFunction {
|
||||
const NumericDiffOptions& options = NumericDiffOptions())
|
||||
: functor_(functor), ownership_(ownership), options_(options) {}
|
||||
|
||||
explicit DynamicNumericDiffCostFunction(
|
||||
DynamicNumericDiffCostFunction&& other)
|
||||
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
|
||||
|
||||
virtual ~DynamicNumericDiffCostFunction() {
|
||||
if (ownership_ != TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
|
@@ -62,7 +62,8 @@ class CERES_EXPORT GradientProblemSolver {
|
||||
// Minimizer options ----------------------------------------
|
||||
LineSearchDirectionType line_search_direction_type = LBFGS;
|
||||
LineSearchType line_search_type = WOLFE;
|
||||
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
|
||||
NonlinearConjugateGradientType nonlinear_conjugate_gradient_type =
|
||||
FLETCHER_REEVES;
|
||||
|
||||
// The LBFGS hessian approximation is a low rank approximation to
|
||||
// the inverse of the Hessian matrix. The rank of the
|
||||
|
@@ -198,7 +198,7 @@ struct Make1stOrderPerturbation {
|
||||
template <int N, int Offset, typename T, typename JetT>
|
||||
struct Make1stOrderPerturbation<N, N, Offset, T, JetT> {
|
||||
public:
|
||||
static void Apply(const T* /*src*/, JetT* /*dst*/) {}
|
||||
static void Apply(const T* src, JetT* dst) {}
|
||||
};
|
||||
|
||||
// Calls Make1stOrderPerturbation for every parameter block.
|
||||
@@ -229,7 +229,9 @@ struct Make1stOrderPerturbations<std::integer_sequence<int, N, Ns...>,
|
||||
|
||||
// End of 'recursion'. Nothing more to do.
|
||||
template <int ParameterIdx, int Total>
|
||||
struct Make1stOrderPerturbations<std::integer_sequence<int>, ParameterIdx, Total> {
|
||||
struct Make1stOrderPerturbations<std::integer_sequence<int>,
|
||||
ParameterIdx,
|
||||
Total> {
|
||||
template <typename T, typename JetT>
|
||||
static void Apply(T const* const* /* NOT USED */, JetT* /* NOT USED */) {}
|
||||
};
|
||||
|
@@ -34,11 +34,11 @@
|
||||
#define CERES_WARNINGS_DISABLED
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( push )
|
||||
#pragma warning(push)
|
||||
// Disable the warning C4251 which is triggered by stl classes in
|
||||
// Ceres' public interface. To quote MSDN: "C4251 can be ignored "
|
||||
// "if you are deriving from a type in the Standard C++ Library"
|
||||
#pragma warning( disable : 4251 )
|
||||
#pragma warning(disable : 4251)
|
||||
#endif
|
||||
|
||||
#endif // CERES_WARNINGS_DISABLED
|
||||
|
23
extern/ceres/include/ceres/internal/eigen.h
vendored
23
extern/ceres/include/ceres/internal/eigen.h
vendored
@@ -36,31 +36,26 @@
|
||||
namespace ceres {
|
||||
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> Vector;
|
||||
typedef Eigen::Matrix<double,
|
||||
Eigen::Dynamic,
|
||||
Eigen::Dynamic,
|
||||
Eigen::RowMajor> Matrix;
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
|
||||
Matrix;
|
||||
typedef Eigen::Map<Vector> VectorRef;
|
||||
typedef Eigen::Map<Matrix> MatrixRef;
|
||||
typedef Eigen::Map<const Vector> ConstVectorRef;
|
||||
typedef Eigen::Map<const Matrix> ConstMatrixRef;
|
||||
|
||||
// Column major matrices for DenseSparseMatrix/DenseQRSolver
|
||||
typedef Eigen::Matrix<double,
|
||||
Eigen::Dynamic,
|
||||
Eigen::Dynamic,
|
||||
Eigen::ColMajor> ColMajorMatrix;
|
||||
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>
|
||||
ColMajorMatrix;
|
||||
|
||||
typedef Eigen::Map<ColMajorMatrix, 0,
|
||||
Eigen::Stride<Eigen::Dynamic, 1>> ColMajorMatrixRef;
|
||||
typedef Eigen::Map<ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>
|
||||
ColMajorMatrixRef;
|
||||
|
||||
typedef Eigen::Map<const ColMajorMatrix,
|
||||
0,
|
||||
Eigen::Stride<Eigen::Dynamic, 1>> ConstColMajorMatrixRef;
|
||||
typedef Eigen::Map<const ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>
|
||||
ConstColMajorMatrixRef;
|
||||
|
||||
// C++ does not support templated typdefs, thus the need for this
|
||||
// struct so that we can support statically sized Matrix and Maps.
|
||||
template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
|
||||
template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
|
||||
struct EigenTypes {
|
||||
typedef Eigen::Matrix<double,
|
||||
num_rows,
|
||||
|
@@ -30,6 +30,7 @@
|
||||
#ifndef CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
|
||||
#define CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
|
||||
|
||||
#include <Eigen/Core> // For Eigen::aligned_allocator
|
||||
#include <algorithm>
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
@@ -37,8 +38,6 @@
|
||||
#include <tuple>
|
||||
#include <type_traits>
|
||||
|
||||
#include <Eigen/Core> // For Eigen::aligned_allocator
|
||||
|
||||
#include "ceres/internal/memory.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
|
@@ -62,7 +62,8 @@ struct SumImpl;
|
||||
// Strip of and sum the first number.
|
||||
template <typename T, T N, T... Ns>
|
||||
struct SumImpl<std::integer_sequence<T, N, Ns...>> {
|
||||
static constexpr T Value = N + SumImpl<std::integer_sequence<T, Ns...>>::Value;
|
||||
static constexpr T Value =
|
||||
N + SumImpl<std::integer_sequence<T, Ns...>>::Value;
|
||||
};
|
||||
|
||||
// Strip of and sum the first two numbers.
|
||||
@@ -129,10 +130,14 @@ template <typename T, T Sum, typename SeqIn, typename SeqOut>
|
||||
struct ExclusiveScanImpl;
|
||||
|
||||
template <typename T, T Sum, T N, T... Ns, T... Rs>
|
||||
struct ExclusiveScanImpl<T, Sum, std::integer_sequence<T, N, Ns...>,
|
||||
struct ExclusiveScanImpl<T,
|
||||
Sum,
|
||||
std::integer_sequence<T, N, Ns...>,
|
||||
std::integer_sequence<T, Rs...>> {
|
||||
using Type =
|
||||
typename ExclusiveScanImpl<T, Sum + N, std::integer_sequence<T, Ns...>,
|
||||
typename ExclusiveScanImpl<T,
|
||||
Sum + N,
|
||||
std::integer_sequence<T, Ns...>,
|
||||
std::integer_sequence<T, Rs..., Sum>>::Type;
|
||||
};
|
||||
|
||||
|
118
extern/ceres/include/ceres/internal/numeric_diff.h
vendored
118
extern/ceres/include/ceres/internal/numeric_diff.h
vendored
@@ -47,15 +47,17 @@
|
||||
#include "ceres/types.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
// This is split from the main class because C++ doesn't allow partial template
|
||||
// specializations for member functions. The alternative is to repeat the main
|
||||
// class for differing numbers of parameters, which is also unfortunate.
|
||||
template <typename CostFunctor, NumericDiffMethodType kMethod,
|
||||
int kNumResiduals, typename ParameterDims, int kParameterBlock,
|
||||
template <typename CostFunctor,
|
||||
NumericDiffMethodType kMethod,
|
||||
int kNumResiduals,
|
||||
typename ParameterDims,
|
||||
int kParameterBlock,
|
||||
int kParameterBlockSize>
|
||||
struct NumericDiff {
|
||||
// Mutates parameters but must restore them before return.
|
||||
@@ -66,23 +68,23 @@ struct NumericDiff {
|
||||
int num_residuals,
|
||||
int parameter_block_index,
|
||||
int parameter_block_size,
|
||||
double **parameters,
|
||||
double *jacobian) {
|
||||
double** parameters,
|
||||
double* jacobian) {
|
||||
using Eigen::ColMajor;
|
||||
using Eigen::Map;
|
||||
using Eigen::Matrix;
|
||||
using Eigen::RowMajor;
|
||||
using Eigen::ColMajor;
|
||||
|
||||
DCHECK(jacobian);
|
||||
|
||||
const int num_residuals_internal =
|
||||
(kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
|
||||
const int parameter_block_index_internal =
|
||||
(kParameterBlock != ceres::DYNAMIC ? kParameterBlock :
|
||||
parameter_block_index);
|
||||
(kParameterBlock != ceres::DYNAMIC ? kParameterBlock
|
||||
: parameter_block_index);
|
||||
const int parameter_block_size_internal =
|
||||
(kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize :
|
||||
parameter_block_size);
|
||||
(kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize
|
||||
: parameter_block_size);
|
||||
|
||||
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
|
||||
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
|
||||
@@ -97,17 +99,17 @@ struct NumericDiff {
|
||||
(kParameterBlockSize == 1) ? ColMajor : RowMajor>
|
||||
JacobianMatrix;
|
||||
|
||||
Map<JacobianMatrix> parameter_jacobian(jacobian,
|
||||
num_residuals_internal,
|
||||
parameter_block_size_internal);
|
||||
Map<JacobianMatrix> parameter_jacobian(
|
||||
jacobian, num_residuals_internal, parameter_block_size_internal);
|
||||
|
||||
Map<ParameterVector> x_plus_delta(
|
||||
parameters[parameter_block_index_internal],
|
||||
parameter_block_size_internal);
|
||||
ParameterVector x(x_plus_delta);
|
||||
ParameterVector step_size = x.array().abs() *
|
||||
((kMethod == RIDDERS) ? options.ridders_relative_initial_step_size :
|
||||
options.relative_step_size);
|
||||
ParameterVector step_size =
|
||||
x.array().abs() * ((kMethod == RIDDERS)
|
||||
? options.ridders_relative_initial_step_size
|
||||
: options.relative_step_size);
|
||||
|
||||
// It is not a good idea to make the step size arbitrarily
|
||||
// small. This will lead to problems with round off and numerical
|
||||
@@ -118,8 +120,8 @@ struct NumericDiff {
|
||||
// For Ridders' method, the initial step size is required to be large,
|
||||
// thus ridders_relative_initial_step_size is used.
|
||||
if (kMethod == RIDDERS) {
|
||||
min_step_size = std::max(min_step_size,
|
||||
options.ridders_relative_initial_step_size);
|
||||
min_step_size =
|
||||
std::max(min_step_size, options.ridders_relative_initial_step_size);
|
||||
}
|
||||
|
||||
// For each parameter in the parameter block, use finite differences to
|
||||
@@ -133,7 +135,9 @@ struct NumericDiff {
|
||||
const double delta = std::max(min_step_size, step_size(j));
|
||||
|
||||
if (kMethod == RIDDERS) {
|
||||
if (!EvaluateRiddersJacobianColumn(functor, j, delta,
|
||||
if (!EvaluateRiddersJacobianColumn(functor,
|
||||
j,
|
||||
delta,
|
||||
options,
|
||||
num_residuals_internal,
|
||||
parameter_block_size_internal,
|
||||
@@ -146,7 +150,9 @@ struct NumericDiff {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (!EvaluateJacobianColumn(functor, j, delta,
|
||||
if (!EvaluateJacobianColumn(functor,
|
||||
j,
|
||||
delta,
|
||||
num_residuals_internal,
|
||||
parameter_block_size_internal,
|
||||
x.data(),
|
||||
@@ -182,8 +188,7 @@ struct NumericDiff {
|
||||
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
|
||||
|
||||
Map<const ParameterVector> x(x_ptr, parameter_block_size);
|
||||
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr,
|
||||
parameter_block_size);
|
||||
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
|
||||
|
||||
Map<ResidualVector> residuals(residuals_ptr, num_residuals);
|
||||
Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
|
||||
@@ -191,9 +196,8 @@ struct NumericDiff {
|
||||
// Mutate 1 element at a time and then restore.
|
||||
x_plus_delta(parameter_index) = x(parameter_index) + delta;
|
||||
|
||||
if (!VariadicEvaluate<ParameterDims>(*functor,
|
||||
parameters,
|
||||
residuals.data())) {
|
||||
if (!VariadicEvaluate<ParameterDims>(
|
||||
*functor, parameters, residuals.data())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -206,9 +210,8 @@ struct NumericDiff {
|
||||
// Compute the function on the other side of x(parameter_index).
|
||||
x_plus_delta(parameter_index) = x(parameter_index) - delta;
|
||||
|
||||
if (!VariadicEvaluate<ParameterDims>(*functor,
|
||||
parameters,
|
||||
temp_residuals.data())) {
|
||||
if (!VariadicEvaluate<ParameterDims>(
|
||||
*functor, parameters, temp_residuals.data())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -217,8 +220,7 @@ struct NumericDiff {
|
||||
} else {
|
||||
// Forward difference only; reuse existing residuals evaluation.
|
||||
residuals -=
|
||||
Map<const ResidualVector>(residuals_at_eval_point,
|
||||
num_residuals);
|
||||
Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
|
||||
}
|
||||
|
||||
// Restore x_plus_delta.
|
||||
@@ -254,17 +256,17 @@ struct NumericDiff {
|
||||
double* x_plus_delta_ptr,
|
||||
double* temp_residuals_ptr,
|
||||
double* residuals_ptr) {
|
||||
using Eigen::aligned_allocator;
|
||||
using Eigen::Map;
|
||||
using Eigen::Matrix;
|
||||
using Eigen::aligned_allocator;
|
||||
|
||||
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
|
||||
typedef Matrix<double, kNumResiduals, Eigen::Dynamic> ResidualCandidateMatrix;
|
||||
typedef Matrix<double, kNumResiduals, Eigen::Dynamic>
|
||||
ResidualCandidateMatrix;
|
||||
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
|
||||
|
||||
Map<const ParameterVector> x(x_ptr, parameter_block_size);
|
||||
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr,
|
||||
parameter_block_size);
|
||||
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
|
||||
|
||||
Map<ResidualVector> residuals(residuals_ptr, num_residuals);
|
||||
Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
|
||||
@@ -275,18 +277,16 @@ struct NumericDiff {
|
||||
// As the derivative is estimated, the step size decreases.
|
||||
// By default, the step sizes are chosen so that the middle column
|
||||
// of the Romberg tableau uses the input delta.
|
||||
double current_step_size = delta *
|
||||
pow(options.ridders_step_shrink_factor,
|
||||
options.max_num_ridders_extrapolations / 2);
|
||||
double current_step_size =
|
||||
delta * pow(options.ridders_step_shrink_factor,
|
||||
options.max_num_ridders_extrapolations / 2);
|
||||
|
||||
// Double-buffering temporary differential candidate vectors
|
||||
// from previous step size.
|
||||
ResidualCandidateMatrix stepsize_candidates_a(
|
||||
num_residuals,
|
||||
options.max_num_ridders_extrapolations);
|
||||
num_residuals, options.max_num_ridders_extrapolations);
|
||||
ResidualCandidateMatrix stepsize_candidates_b(
|
||||
num_residuals,
|
||||
options.max_num_ridders_extrapolations);
|
||||
num_residuals, options.max_num_ridders_extrapolations);
|
||||
ResidualCandidateMatrix* current_candidates = &stepsize_candidates_a;
|
||||
ResidualCandidateMatrix* previous_candidates = &stepsize_candidates_b;
|
||||
|
||||
@@ -304,7 +304,9 @@ struct NumericDiff {
|
||||
// 3. Extrapolation becomes numerically unstable.
|
||||
for (int i = 0; i < options.max_num_ridders_extrapolations; ++i) {
|
||||
// Compute the numerical derivative at this step size.
|
||||
if (!EvaluateJacobianColumn(functor, parameter_index, current_step_size,
|
||||
if (!EvaluateJacobianColumn(functor,
|
||||
parameter_index,
|
||||
current_step_size,
|
||||
num_residuals,
|
||||
parameter_block_size,
|
||||
x.data(),
|
||||
@@ -327,23 +329,24 @@ struct NumericDiff {
|
||||
|
||||
// Extrapolation factor for Richardson acceleration method (see below).
|
||||
double richardson_factor = options.ridders_step_shrink_factor *
|
||||
options.ridders_step_shrink_factor;
|
||||
options.ridders_step_shrink_factor;
|
||||
for (int k = 1; k <= i; ++k) {
|
||||
// Extrapolate the various orders of finite differences using
|
||||
// the Richardson acceleration method.
|
||||
current_candidates->col(k) =
|
||||
(richardson_factor * current_candidates->col(k - 1) -
|
||||
previous_candidates->col(k - 1)) / (richardson_factor - 1.0);
|
||||
previous_candidates->col(k - 1)) /
|
||||
(richardson_factor - 1.0);
|
||||
|
||||
richardson_factor *= options.ridders_step_shrink_factor *
|
||||
options.ridders_step_shrink_factor;
|
||||
options.ridders_step_shrink_factor;
|
||||
|
||||
// Compute the difference between the previous value and the current.
|
||||
double candidate_error = std::max(
|
||||
(current_candidates->col(k) -
|
||||
current_candidates->col(k - 1)).norm(),
|
||||
(current_candidates->col(k) -
|
||||
previous_candidates->col(k - 1)).norm());
|
||||
(current_candidates->col(k) - current_candidates->col(k - 1))
|
||||
.norm(),
|
||||
(current_candidates->col(k) - previous_candidates->col(k - 1))
|
||||
.norm());
|
||||
|
||||
// If the error has decreased, update results.
|
||||
if (candidate_error <= norm_error) {
|
||||
@@ -365,8 +368,9 @@ struct NumericDiff {
|
||||
// Check to see if the current gradient estimate is numerically unstable.
|
||||
// If so, bail out and return the last stable result.
|
||||
if (i > 0) {
|
||||
double tableau_error = (current_candidates->col(i) -
|
||||
previous_candidates->col(i - 1)).norm();
|
||||
double tableau_error =
|
||||
(current_candidates->col(i) - previous_candidates->col(i - 1))
|
||||
.norm();
|
||||
|
||||
// Compare current error to the chosen candidate's error.
|
||||
if (tableau_error >= 2 * norm_error) {
|
||||
@@ -482,14 +486,18 @@ struct EvaluateJacobianForParameterBlocks<ParameterDims,
|
||||
|
||||
// End of 'recursion'. Nothing more to do.
|
||||
template <typename ParameterDims, int ParameterIdx>
|
||||
struct EvaluateJacobianForParameterBlocks<ParameterDims, std::integer_sequence<int>,
|
||||
struct EvaluateJacobianForParameterBlocks<ParameterDims,
|
||||
std::integer_sequence<int>,
|
||||
ParameterIdx> {
|
||||
template <NumericDiffMethodType method, int kNumResiduals,
|
||||
template <NumericDiffMethodType method,
|
||||
int kNumResiduals,
|
||||
typename CostFunctor>
|
||||
static bool Apply(const CostFunctor* /* NOT USED*/,
|
||||
const double* /* NOT USED*/,
|
||||
const NumericDiffOptions& /* NOT USED*/, int /* NOT USED*/,
|
||||
double** /* NOT USED*/, double** /* NOT USED*/) {
|
||||
const NumericDiffOptions& /* NOT USED*/,
|
||||
int /* NOT USED*/,
|
||||
double** /* NOT USED*/,
|
||||
double** /* NOT USED*/) {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
92
extern/ceres/include/ceres/internal/port.h
vendored
92
extern/ceres/include/ceres/internal/port.h
vendored
@@ -35,17 +35,17 @@
|
||||
#include "ceres/internal/config.h"
|
||||
|
||||
#if defined(CERES_USE_OPENMP)
|
||||
# if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
|
||||
# error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
|
||||
# endif
|
||||
#if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
|
||||
#error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
|
||||
#endif
|
||||
#elif defined(CERES_USE_CXX_THREADS)
|
||||
# if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
|
||||
# error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
|
||||
# endif
|
||||
#if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
|
||||
#error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
|
||||
#endif
|
||||
#elif defined(CERES_NO_THREADS)
|
||||
# if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
|
||||
# error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
|
||||
# endif
|
||||
#if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
|
||||
#error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
|
||||
#endif
|
||||
#else
|
||||
# error One of CERES_USE_OPENMP, CERES_USE_CXX_THREADS or CERES_NO_THREADS must be defined.
|
||||
#endif
|
||||
@@ -54,37 +54,57 @@
|
||||
// compiled without any sparse back-end. Verify that it has not subsequently
|
||||
// been inconsistently redefined.
|
||||
#if defined(CERES_NO_SPARSE)
|
||||
# if !defined(CERES_NO_SUITESPARSE)
|
||||
# error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
|
||||
# endif
|
||||
# if !defined(CERES_NO_CXSPARSE)
|
||||
# error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
|
||||
# endif
|
||||
# if !defined(CERES_NO_ACCELERATE_SPARSE)
|
||||
# error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
|
||||
# endif
|
||||
# if defined(CERES_USE_EIGEN_SPARSE)
|
||||
# error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
|
||||
# endif
|
||||
#if !defined(CERES_NO_SUITESPARSE)
|
||||
#error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
|
||||
#endif
|
||||
#if !defined(CERES_NO_CXSPARSE)
|
||||
#error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
|
||||
#endif
|
||||
#if !defined(CERES_NO_ACCELERATE_SPARSE)
|
||||
#error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
|
||||
#endif
|
||||
#if defined(CERES_USE_EIGEN_SPARSE)
|
||||
#error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
|
||||
#endif
|
||||
#endif
|
||||
|
||||
// A macro to signal which functions and classes are exported when
|
||||
// building a DLL with MSVC.
|
||||
//
|
||||
// Note that the ordering here is important, CERES_BUILDING_SHARED_LIBRARY
|
||||
// is only defined locally when Ceres is compiled, it is never exported to
|
||||
// users. However, in order that we do not have to configure config.h
|
||||
// separately for building vs installing, if we are using MSVC and building
|
||||
// a shared library, then both CERES_BUILDING_SHARED_LIBRARY and
|
||||
// CERES_USING_SHARED_LIBRARY will be defined when Ceres is compiled.
|
||||
// Hence it is important that the check for CERES_BUILDING_SHARED_LIBRARY
|
||||
// happens first.
|
||||
#if defined(_MSC_VER) && defined(CERES_BUILDING_SHARED_LIBRARY)
|
||||
# define CERES_EXPORT __declspec(dllexport)
|
||||
#elif defined(_MSC_VER) && defined(CERES_USING_SHARED_LIBRARY)
|
||||
# define CERES_EXPORT __declspec(dllimport)
|
||||
// building a shared library.
|
||||
#if defined(_MSC_VER)
|
||||
#define CERES_API_SHARED_IMPORT __declspec(dllimport)
|
||||
#define CERES_API_SHARED_EXPORT __declspec(dllexport)
|
||||
#elif defined(__GNUC__)
|
||||
#define CERES_API_SHARED_IMPORT __attribute__((visibility("default")))
|
||||
#define CERES_API_SHARED_EXPORT __attribute__((visibility("default")))
|
||||
#else
|
||||
# define CERES_EXPORT
|
||||
#define CERES_API_SHARED_IMPORT
|
||||
#define CERES_API_SHARED_EXPORT
|
||||
#endif
|
||||
|
||||
// CERES_BUILDING_SHARED_LIBRARY is only defined locally when Ceres itself is
|
||||
// compiled as a shared library, it is never exported to users. In order that
|
||||
// we do not have to configure config.h separately when building Ceres as either
|
||||
// a static or dynamic library, we define both CERES_USING_SHARED_LIBRARY and
|
||||
// CERES_BUILDING_SHARED_LIBRARY when building as a shared library.
|
||||
#if defined(CERES_USING_SHARED_LIBRARY)
|
||||
#if defined(CERES_BUILDING_SHARED_LIBRARY)
|
||||
// Compiling Ceres itself as a shared library.
|
||||
#define CERES_EXPORT CERES_API_SHARED_EXPORT
|
||||
#else
|
||||
// Using Ceres as a shared library.
|
||||
#define CERES_EXPORT CERES_API_SHARED_IMPORT
|
||||
#endif
|
||||
#else
|
||||
// Ceres was compiled as a static library, export everything.
|
||||
#define CERES_EXPORT
|
||||
#endif
|
||||
|
||||
// Unit tests reach in and test internal functionality so we need a way to make
|
||||
// those symbols visible
|
||||
#ifdef CERES_EXPORT_INTERNAL_SYMBOLS
|
||||
#define CERES_EXPORT_INTERNAL CERES_EXPORT
|
||||
#else
|
||||
#define CERES_EXPORT_INTERNAL
|
||||
#endif
|
||||
|
||||
#endif // CERES_PUBLIC_INTERNAL_PORT_H_
|
||||
|
@@ -32,7 +32,7 @@
|
||||
#undef CERES_WARNINGS_DISABLED
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning( pop )
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
#endif // CERES_WARNINGS_DISABLED
|
||||
|
@@ -46,8 +46,10 @@ namespace internal {
|
||||
|
||||
// For fixed size cost functors
|
||||
template <typename Functor, typename T, int... Indices>
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
T* output, std::false_type /*is_dynamic*/,
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
T const* const* input,
|
||||
T* output,
|
||||
std::false_type /*is_dynamic*/,
|
||||
std::integer_sequence<int, Indices...>) {
|
||||
static_assert(sizeof...(Indices),
|
||||
"Invalid number of parameter blocks. At least one parameter "
|
||||
@@ -57,26 +59,31 @@ inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
|
||||
// For dynamic sized cost functors
|
||||
template <typename Functor, typename T>
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
T* output, std::true_type /*is_dynamic*/,
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
T const* const* input,
|
||||
T* output,
|
||||
std::true_type /*is_dynamic*/,
|
||||
std::integer_sequence<int>) {
|
||||
return functor(input, output);
|
||||
}
|
||||
|
||||
// For ceres cost functors (not ceres::CostFunction)
|
||||
template <typename ParameterDims, typename Functor, typename T>
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
T* output, const void* /* NOT USED */) {
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
T const* const* input,
|
||||
T* output,
|
||||
const void* /* NOT USED */) {
|
||||
using ParameterBlockIndices =
|
||||
std::make_integer_sequence<int, ParameterDims::kNumParameterBlocks>;
|
||||
using IsDynamic = std::integral_constant<bool, ParameterDims::kIsDynamic>;
|
||||
return VariadicEvaluateImpl(functor, input, output, IsDynamic(),
|
||||
ParameterBlockIndices());
|
||||
return VariadicEvaluateImpl(
|
||||
functor, input, output, IsDynamic(), ParameterBlockIndices());
|
||||
}
|
||||
|
||||
// For ceres::CostFunction
|
||||
template <typename ParameterDims, typename Functor, typename T>
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
inline bool VariadicEvaluateImpl(const Functor& functor,
|
||||
T const* const* input,
|
||||
T* output,
|
||||
const CostFunction* /* NOT USED */) {
|
||||
return functor.Evaluate(input, output, nullptr);
|
||||
@@ -95,7 +102,8 @@ inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
|
||||
// blocks. The signature of the functor must have the following signature
|
||||
// 'bool()(const T* i_1, const T* i_2, ... const T* i_n, T* output)'.
|
||||
template <typename ParameterDims, typename Functor, typename T>
|
||||
inline bool VariadicEvaluate(const Functor& functor, T const* const* input,
|
||||
inline bool VariadicEvaluate(const Functor& functor,
|
||||
T const* const* input,
|
||||
T* output) {
|
||||
return VariadicEvaluateImpl<ParameterDims>(functor, input, output, &functor);
|
||||
}
|
||||
|
@@ -73,7 +73,7 @@ struct CERES_EXPORT IterationSummary {
|
||||
bool step_is_successful = false;
|
||||
|
||||
// Value of the objective function.
|
||||
double cost = 0.90;
|
||||
double cost = 0.0;
|
||||
|
||||
// Change in the value of the objective function in this
|
||||
// iteration. This can be positive or negative.
|
||||
|
17
extern/ceres/include/ceres/jet.h
vendored
17
extern/ceres/include/ceres/jet.h
vendored
@@ -388,6 +388,8 @@ using std::cbrt;
|
||||
using std::ceil;
|
||||
using std::cos;
|
||||
using std::cosh;
|
||||
using std::erf;
|
||||
using std::erfc;
|
||||
using std::exp;
|
||||
using std::exp2;
|
||||
using std::floor;
|
||||
@@ -573,6 +575,21 @@ inline Jet<T, N> fmin(const Jet<T, N>& x, const Jet<T, N>& y) {
|
||||
return y < x ? y : x;
|
||||
}
|
||||
|
||||
// erf is defined as an integral that cannot be expressed analyticaly
|
||||
// however, the derivative is trivial to compute
|
||||
// erf(x + h) = erf(x) + h * 2*exp(-x^2)/sqrt(pi)
|
||||
template <typename T, int N>
|
||||
inline Jet<T, N> erf(const Jet<T, N>& x) {
|
||||
return Jet<T, N>(erf(x.a), x.v * M_2_SQRTPI * exp(-x.a * x.a));
|
||||
}
|
||||
|
||||
// erfc(x) = 1-erf(x)
|
||||
// erfc(x + h) = erfc(x) + h * (-2*exp(-x^2)/sqrt(pi))
|
||||
template <typename T, int N>
|
||||
inline Jet<T, N> erfc(const Jet<T, N>& x) {
|
||||
return Jet<T, N>(erfc(x.a), -x.v * M_2_SQRTPI * exp(-x.a * x.a));
|
||||
}
|
||||
|
||||
// Bessel functions of the first kind with integer order equal to 0, 1, n.
|
||||
//
|
||||
// Microsoft has deprecated the j[0,1,n]() POSIX Bessel functions in favour of
|
||||
|
@@ -90,8 +90,8 @@ namespace ceres {
|
||||
//
|
||||
// An example that occurs commonly in Structure from Motion problems
|
||||
// is when camera rotations are parameterized using Quaternion. There,
|
||||
// it is useful only make updates orthogonal to that 4-vector defining
|
||||
// the quaternion. One way to do this is to let delta be a 3
|
||||
// it is useful to only make updates orthogonal to that 4-vector
|
||||
// defining the quaternion. One way to do this is to let delta be a 3
|
||||
// dimensional vector and define Plus to be
|
||||
//
|
||||
// Plus(x, delta) = [cos(|delta|), sin(|delta|) delta / |delta|] * x
|
||||
@@ -99,7 +99,7 @@ namespace ceres {
|
||||
// The multiplication between the two 4-vectors on the RHS is the
|
||||
// standard quaternion product.
|
||||
//
|
||||
// Given g and a point x, optimizing f can now be restated as
|
||||
// Given f and a point x, optimizing f can now be restated as
|
||||
//
|
||||
// min f(Plus(x, delta))
|
||||
// delta
|
||||
@@ -306,6 +306,7 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
|
||||
public:
|
||||
ProductParameterization(const ProductParameterization&) = delete;
|
||||
ProductParameterization& operator=(const ProductParameterization&) = delete;
|
||||
virtual ~ProductParameterization() {}
|
||||
//
|
||||
// NOTE: The constructor takes ownership of the input local
|
||||
// parameterizations.
|
||||
@@ -341,7 +342,8 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
|
||||
bool Plus(const double* x,
|
||||
const double* delta,
|
||||
double* x_plus_delta) const override;
|
||||
bool ComputeJacobian(const double* x, double* jacobian) const override;
|
||||
bool ComputeJacobian(const double* x,
|
||||
double* jacobian) const override;
|
||||
int GlobalSize() const override { return global_size_; }
|
||||
int LocalSize() const override { return local_size_; }
|
||||
|
||||
@@ -354,8 +356,8 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
|
||||
|
||||
} // namespace ceres
|
||||
|
||||
// clang-format off
|
||||
#include "ceres/internal/reenable_warnings.h"
|
||||
#include "ceres/internal/line_parameterization.h"
|
||||
|
||||
#endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
|
||||
|
||||
|
@@ -192,7 +192,10 @@ class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
|
||||
}
|
||||
}
|
||||
|
||||
~NumericDiffCostFunction() {
|
||||
explicit NumericDiffCostFunction(NumericDiffCostFunction&& other)
|
||||
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
|
||||
|
||||
virtual ~NumericDiffCostFunction() {
|
||||
if (ownership_ != TAKE_OWNERSHIP) {
|
||||
functor_.release();
|
||||
}
|
||||
|
48
extern/ceres/include/ceres/problem.h
vendored
48
extern/ceres/include/ceres/problem.h
vendored
@@ -453,13 +453,15 @@ class CERES_EXPORT Problem {
|
||||
// problem.AddResidualBlock(new MyCostFunction, nullptr, &x);
|
||||
//
|
||||
// double cost = 0.0;
|
||||
// problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
|
||||
// problem.Evaluate(Problem::EvaluateOptions(), &cost,
|
||||
// nullptr, nullptr, nullptr);
|
||||
//
|
||||
// The cost is evaluated at x = 1. If you wish to evaluate the
|
||||
// problem at x = 2, then
|
||||
//
|
||||
// x = 2;
|
||||
// problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
|
||||
// problem.Evaluate(Problem::EvaluateOptions(), &cost,
|
||||
// nullptr, nullptr, nullptr);
|
||||
//
|
||||
// is the way to do so.
|
||||
//
|
||||
@@ -475,7 +477,7 @@ class CERES_EXPORT Problem {
|
||||
// at the end of an iteration during a solve.
|
||||
//
|
||||
// Note 4: If an EvaluationCallback is associated with the problem,
|
||||
// then its PrepareForEvaluation method will be called everytime
|
||||
// then its PrepareForEvaluation method will be called every time
|
||||
// this method is called with new_point = true.
|
||||
bool Evaluate(const EvaluateOptions& options,
|
||||
double* cost,
|
||||
@@ -509,23 +511,41 @@ class CERES_EXPORT Problem {
|
||||
// apply_loss_function as the name implies allows the user to switch
|
||||
// the application of the loss function on and off.
|
||||
//
|
||||
// WARNING: If an EvaluationCallback is associated with the problem
|
||||
// then it is the user's responsibility to call it before calling
|
||||
// this method.
|
||||
//
|
||||
// This is because, if the user calls this method multiple times, we
|
||||
// cannot tell if the underlying parameter blocks have changed
|
||||
// between calls or not. So if EvaluateResidualBlock was responsible
|
||||
// for calling the EvaluationCallback, it will have to do it
|
||||
// everytime it is called. Which makes the common case where the
|
||||
// parameter blocks do not change, inefficient. So we leave it to
|
||||
// the user to call the EvaluationCallback as needed.
|
||||
// If an EvaluationCallback is associated with the problem, then its
|
||||
// PrepareForEvaluation method will be called every time this method
|
||||
// is called with new_point = true. This conservatively assumes that
|
||||
// the user may have changed the parameter values since the previous
|
||||
// call to evaluate / solve. For improved efficiency, and only if
|
||||
// you know that the parameter values have not changed between
|
||||
// calls, see EvaluateResidualBlockAssumingParametersUnchanged().
|
||||
bool EvaluateResidualBlock(ResidualBlockId residual_block_id,
|
||||
bool apply_loss_function,
|
||||
double* cost,
|
||||
double* residuals,
|
||||
double** jacobians) const;
|
||||
|
||||
// Same as EvaluateResidualBlock except that if an
|
||||
// EvaluationCallback is associated with the problem, then its
|
||||
// PrepareForEvaluation method will be called every time this method
|
||||
// is called with new_point = false.
|
||||
//
|
||||
// This means, if an EvaluationCallback is associated with the
|
||||
// problem then it is the user's responsibility to call
|
||||
// PrepareForEvaluation before calling this method if necessary,
|
||||
// i.e. iff the parameter values have been changed since the last
|
||||
// call to evaluate / solve.'
|
||||
//
|
||||
// This is because, as the name implies, we assume that the
|
||||
// parameter blocks did not change since the last time
|
||||
// PrepareForEvaluation was called (via Solve, Evaluate or
|
||||
// EvaluateResidualBlock).
|
||||
bool EvaluateResidualBlockAssumingParametersUnchanged(
|
||||
ResidualBlockId residual_block_id,
|
||||
bool apply_loss_function,
|
||||
double* cost,
|
||||
double* residuals,
|
||||
double** jacobians) const;
|
||||
|
||||
private:
|
||||
friend class Solver;
|
||||
friend class Covariance;
|
||||
|
4
extern/ceres/include/ceres/rotation.h
vendored
4
extern/ceres/include/ceres/rotation.h
vendored
@@ -320,8 +320,8 @@ inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void RotationMatrixToQuaternion(const T* R, T* angle_axis) {
|
||||
RotationMatrixToQuaternion(ColumnMajorAdapter3x3(R), angle_axis);
|
||||
void RotationMatrixToQuaternion(const T* R, T* quaternion) {
|
||||
RotationMatrixToQuaternion(ColumnMajorAdapter3x3(R), quaternion);
|
||||
}
|
||||
|
||||
// This algorithm comes from "Quaternion Calculus and Fast Animation",
|
||||
|
5
extern/ceres/include/ceres/solver.h
vendored
5
extern/ceres/include/ceres/solver.h
vendored
@@ -360,7 +360,8 @@ class CERES_EXPORT Solver {
|
||||
//
|
||||
// If Solver::Options::preconditioner_type == SUBSET, then
|
||||
// residual_blocks_for_subset_preconditioner must be non-empty.
|
||||
std::unordered_set<ResidualBlockId> residual_blocks_for_subset_preconditioner;
|
||||
std::unordered_set<ResidualBlockId>
|
||||
residual_blocks_for_subset_preconditioner;
|
||||
|
||||
// Ceres supports using multiple dense linear algebra libraries
|
||||
// for dense matrix factorizations. Currently EIGEN and LAPACK are
|
||||
@@ -838,7 +839,7 @@ class CERES_EXPORT Solver {
|
||||
int num_linear_solves = -1;
|
||||
|
||||
// Time (in seconds) spent evaluating the residual vector.
|
||||
double residual_evaluation_time_in_seconds = 1.0;
|
||||
double residual_evaluation_time_in_seconds = -1.0;
|
||||
|
||||
// Number of residual only evaluations.
|
||||
int num_residual_evaluations = -1;
|
||||
|
53
extern/ceres/include/ceres/types.h
vendored
53
extern/ceres/include/ceres/types.h
vendored
@@ -50,7 +50,7 @@ namespace ceres {
|
||||
// delete on it upon completion.
|
||||
enum Ownership {
|
||||
DO_NOT_TAKE_OWNERSHIP,
|
||||
TAKE_OWNERSHIP
|
||||
TAKE_OWNERSHIP,
|
||||
};
|
||||
|
||||
// TODO(keir): Considerably expand the explanations of each solver type.
|
||||
@@ -185,19 +185,19 @@ enum SparseLinearAlgebraLibraryType {
|
||||
|
||||
enum DenseLinearAlgebraLibraryType {
|
||||
EIGEN,
|
||||
LAPACK
|
||||
LAPACK,
|
||||
};
|
||||
|
||||
// Logging options
|
||||
// The options get progressively noisier.
|
||||
enum LoggingType {
|
||||
SILENT,
|
||||
PER_MINIMIZER_ITERATION
|
||||
PER_MINIMIZER_ITERATION,
|
||||
};
|
||||
|
||||
enum MinimizerType {
|
||||
LINE_SEARCH,
|
||||
TRUST_REGION
|
||||
TRUST_REGION,
|
||||
};
|
||||
|
||||
enum LineSearchDirectionType {
|
||||
@@ -412,7 +412,7 @@ enum DumpFormatType {
|
||||
// specified for the number of residuals. If specified, then the
|
||||
// number of residuas for that cost function can vary at runtime.
|
||||
enum DimensionType {
|
||||
DYNAMIC = -1
|
||||
DYNAMIC = -1,
|
||||
};
|
||||
|
||||
// The differentiation method used to compute numerical derivatives in
|
||||
@@ -433,7 +433,7 @@ enum NumericDiffMethodType {
|
||||
enum LineSearchInterpolationType {
|
||||
BISECTION,
|
||||
QUADRATIC,
|
||||
CUBIC
|
||||
CUBIC,
|
||||
};
|
||||
|
||||
enum CovarianceAlgorithmType {
|
||||
@@ -448,8 +448,7 @@ enum CovarianceAlgorithmType {
|
||||
// did not write to that memory location.
|
||||
const double kImpossibleValue = 1e302;
|
||||
|
||||
CERES_EXPORT const char* LinearSolverTypeToString(
|
||||
LinearSolverType type);
|
||||
CERES_EXPORT const char* LinearSolverTypeToString(LinearSolverType type);
|
||||
CERES_EXPORT bool StringToLinearSolverType(std::string value,
|
||||
LinearSolverType* type);
|
||||
|
||||
@@ -459,25 +458,23 @@ CERES_EXPORT bool StringToPreconditionerType(std::string value,
|
||||
|
||||
CERES_EXPORT const char* VisibilityClusteringTypeToString(
|
||||
VisibilityClusteringType type);
|
||||
CERES_EXPORT bool StringToVisibilityClusteringType(std::string value,
|
||||
VisibilityClusteringType* type);
|
||||
CERES_EXPORT bool StringToVisibilityClusteringType(
|
||||
std::string value, VisibilityClusteringType* type);
|
||||
|
||||
CERES_EXPORT const char* SparseLinearAlgebraLibraryTypeToString(
|
||||
SparseLinearAlgebraLibraryType type);
|
||||
CERES_EXPORT bool StringToSparseLinearAlgebraLibraryType(
|
||||
std::string value,
|
||||
SparseLinearAlgebraLibraryType* type);
|
||||
std::string value, SparseLinearAlgebraLibraryType* type);
|
||||
|
||||
CERES_EXPORT const char* DenseLinearAlgebraLibraryTypeToString(
|
||||
DenseLinearAlgebraLibraryType type);
|
||||
CERES_EXPORT bool StringToDenseLinearAlgebraLibraryType(
|
||||
std::string value,
|
||||
DenseLinearAlgebraLibraryType* type);
|
||||
std::string value, DenseLinearAlgebraLibraryType* type);
|
||||
|
||||
CERES_EXPORT const char* TrustRegionStrategyTypeToString(
|
||||
TrustRegionStrategyType type);
|
||||
CERES_EXPORT bool StringToTrustRegionStrategyType(std::string value,
|
||||
TrustRegionStrategyType* type);
|
||||
CERES_EXPORT bool StringToTrustRegionStrategyType(
|
||||
std::string value, TrustRegionStrategyType* type);
|
||||
|
||||
CERES_EXPORT const char* DoglegTypeToString(DoglegType type);
|
||||
CERES_EXPORT bool StringToDoglegType(std::string value, DoglegType* type);
|
||||
@@ -487,41 +484,39 @@ CERES_EXPORT bool StringToMinimizerType(std::string value, MinimizerType* type);
|
||||
|
||||
CERES_EXPORT const char* LineSearchDirectionTypeToString(
|
||||
LineSearchDirectionType type);
|
||||
CERES_EXPORT bool StringToLineSearchDirectionType(std::string value,
|
||||
LineSearchDirectionType* type);
|
||||
CERES_EXPORT bool StringToLineSearchDirectionType(
|
||||
std::string value, LineSearchDirectionType* type);
|
||||
|
||||
CERES_EXPORT const char* LineSearchTypeToString(LineSearchType type);
|
||||
CERES_EXPORT bool StringToLineSearchType(std::string value, LineSearchType* type);
|
||||
CERES_EXPORT bool StringToLineSearchType(std::string value,
|
||||
LineSearchType* type);
|
||||
|
||||
CERES_EXPORT const char* NonlinearConjugateGradientTypeToString(
|
||||
NonlinearConjugateGradientType type);
|
||||
CERES_EXPORT bool StringToNonlinearConjugateGradientType(
|
||||
std::string value,
|
||||
NonlinearConjugateGradientType* type);
|
||||
std::string value, NonlinearConjugateGradientType* type);
|
||||
|
||||
CERES_EXPORT const char* LineSearchInterpolationTypeToString(
|
||||
LineSearchInterpolationType type);
|
||||
CERES_EXPORT bool StringToLineSearchInterpolationType(
|
||||
std::string value,
|
||||
LineSearchInterpolationType* type);
|
||||
std::string value, LineSearchInterpolationType* type);
|
||||
|
||||
CERES_EXPORT const char* CovarianceAlgorithmTypeToString(
|
||||
CovarianceAlgorithmType type);
|
||||
CERES_EXPORT bool StringToCovarianceAlgorithmType(
|
||||
std::string value,
|
||||
CovarianceAlgorithmType* type);
|
||||
std::string value, CovarianceAlgorithmType* type);
|
||||
|
||||
CERES_EXPORT const char* NumericDiffMethodTypeToString(
|
||||
NumericDiffMethodType type);
|
||||
CERES_EXPORT bool StringToNumericDiffMethodType(
|
||||
std::string value,
|
||||
NumericDiffMethodType* type);
|
||||
CERES_EXPORT bool StringToNumericDiffMethodType(std::string value,
|
||||
NumericDiffMethodType* type);
|
||||
|
||||
CERES_EXPORT const char* LoggingTypeToString(LoggingType type);
|
||||
CERES_EXPORT bool StringtoLoggingType(std::string value, LoggingType* type);
|
||||
|
||||
CERES_EXPORT const char* DumpFormatTypeToString(DumpFormatType type);
|
||||
CERES_EXPORT bool StringtoDumpFormatType(std::string value, DumpFormatType* type);
|
||||
CERES_EXPORT bool StringtoDumpFormatType(std::string value,
|
||||
DumpFormatType* type);
|
||||
CERES_EXPORT bool StringtoDumpFormatType(std::string value, LoggingType* type);
|
||||
|
||||
CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
|
||||
|
7
extern/ceres/include/ceres/version.h
vendored
7
extern/ceres/include/ceres/version.h
vendored
@@ -41,8 +41,9 @@
|
||||
#define CERES_TO_STRING(x) CERES_TO_STRING_HELPER(x)
|
||||
|
||||
// The Ceres version as a string; for example "1.9.0".
|
||||
#define CERES_VERSION_STRING CERES_TO_STRING(CERES_VERSION_MAJOR) "." \
|
||||
CERES_TO_STRING(CERES_VERSION_MINOR) "." \
|
||||
CERES_TO_STRING(CERES_VERSION_REVISION)
|
||||
#define CERES_VERSION_STRING \
|
||||
CERES_TO_STRING(CERES_VERSION_MAJOR) \
|
||||
"." CERES_TO_STRING(CERES_VERSION_MINOR) "." CERES_TO_STRING( \
|
||||
CERES_VERSION_REVISION)
|
||||
|
||||
#endif // CERES_PUBLIC_VERSION_H_
|
||||
|
87
extern/ceres/internal/ceres/accelerate_sparse.cc
vendored
87
extern/ceres/internal/ceres/accelerate_sparse.cc
vendored
@@ -33,18 +33,19 @@
|
||||
|
||||
#ifndef CERES_NO_ACCELERATE_SPARSE
|
||||
|
||||
#include "ceres/accelerate_sparse.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/accelerate_sparse.h"
|
||||
#include "ceres/compressed_col_sparse_matrix_utils.h"
|
||||
#include "ceres/compressed_row_sparse_matrix.h"
|
||||
#include "ceres/triplet_sparse_matrix.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
#define CASESTR(x) case x: return #x
|
||||
#define CASESTR(x) \
|
||||
case x: \
|
||||
return #x
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -68,7 +69,7 @@ const char* SparseStatusToString(SparseStatus_t status) {
|
||||
// aligned to kAccelerateRequiredAlignment and returns a pointer to the
|
||||
// aligned start.
|
||||
void* ResizeForAccelerateAlignment(const size_t required_size,
|
||||
std::vector<uint8_t> *workspace) {
|
||||
std::vector<uint8_t>* workspace) {
|
||||
// As per the Accelerate documentation, all workspace memory passed to the
|
||||
// sparse solver functions must be 16-byte aligned.
|
||||
constexpr int kAccelerateRequiredAlignment = 16;
|
||||
@@ -80,29 +81,28 @@ void* ResizeForAccelerateAlignment(const size_t required_size,
|
||||
size_t size_from_aligned_start = workspace->size();
|
||||
void* aligned_solve_workspace_start =
|
||||
reinterpret_cast<void*>(workspace->data());
|
||||
aligned_solve_workspace_start =
|
||||
std::align(kAccelerateRequiredAlignment,
|
||||
required_size,
|
||||
aligned_solve_workspace_start,
|
||||
size_from_aligned_start);
|
||||
aligned_solve_workspace_start = std::align(kAccelerateRequiredAlignment,
|
||||
required_size,
|
||||
aligned_solve_workspace_start,
|
||||
size_from_aligned_start);
|
||||
CHECK(aligned_solve_workspace_start != nullptr)
|
||||
<< "required_size: " << required_size
|
||||
<< ", workspace size: " << workspace->size();
|
||||
return aligned_solve_workspace_start;
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
void AccelerateSparse<Scalar>::Solve(NumericFactorization* numeric_factor,
|
||||
DenseVector* rhs_and_solution) {
|
||||
// From SparseSolve() documentation in Solve.h
|
||||
const int required_size =
|
||||
numeric_factor->solveWorkspaceRequiredStatic +
|
||||
numeric_factor->solveWorkspaceRequiredPerRHS;
|
||||
SparseSolve(*numeric_factor, *rhs_and_solution,
|
||||
const int required_size = numeric_factor->solveWorkspaceRequiredStatic +
|
||||
numeric_factor->solveWorkspaceRequiredPerRHS;
|
||||
SparseSolve(*numeric_factor,
|
||||
*rhs_and_solution,
|
||||
ResizeForAccelerateAlignment(required_size, &solve_workspace_));
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
typename AccelerateSparse<Scalar>::ASSparseMatrix
|
||||
AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
|
||||
CompressedRowSparseMatrix* A) {
|
||||
@@ -112,7 +112,7 @@ AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
|
||||
//
|
||||
// Accelerate's columnStarts is a long*, not an int*. These types might be
|
||||
// different (e.g. ARM on iOS) so always make a copy.
|
||||
column_starts_.resize(A->num_rows() +1); // +1 for final column length.
|
||||
column_starts_.resize(A->num_rows() + 1); // +1 for final column length.
|
||||
std::copy_n(A->rows(), column_starts_.size(), &column_starts_[0]);
|
||||
|
||||
ASSparseMatrix At;
|
||||
@@ -136,29 +136,31 @@ AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
|
||||
return At;
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
typename AccelerateSparse<Scalar>::SymbolicFactorization
|
||||
AccelerateSparse<Scalar>::AnalyzeCholesky(ASSparseMatrix* A) {
|
||||
return SparseFactor(SparseFactorizationCholesky, A->structure);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
typename AccelerateSparse<Scalar>::NumericFactorization
|
||||
AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
|
||||
SymbolicFactorization* symbolic_factor) {
|
||||
return SparseFactor(*symbolic_factor, *A);
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
|
||||
NumericFactorization* numeric_factor) {
|
||||
// From SparseRefactor() documentation in Solve.h
|
||||
const int required_size = std::is_same<Scalar, double>::value
|
||||
? numeric_factor->symbolicFactorization.workspaceSize_Double
|
||||
: numeric_factor->symbolicFactorization.workspaceSize_Float;
|
||||
return SparseRefactor(*A, numeric_factor,
|
||||
ResizeForAccelerateAlignment(required_size,
|
||||
&factorization_workspace_));
|
||||
const int required_size =
|
||||
std::is_same<Scalar, double>::value
|
||||
? numeric_factor->symbolicFactorization.workspaceSize_Double
|
||||
: numeric_factor->symbolicFactorization.workspaceSize_Float;
|
||||
return SparseRefactor(
|
||||
*A,
|
||||
numeric_factor,
|
||||
ResizeForAccelerateAlignment(required_size, &factorization_workspace_));
|
||||
}
|
||||
|
||||
// Instantiate only for the specific template types required/supported s/t the
|
||||
@@ -166,34 +168,33 @@ void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
|
||||
template class AccelerateSparse<double>;
|
||||
template class AccelerateSparse<float>;
|
||||
|
||||
template<typename Scalar>
|
||||
std::unique_ptr<SparseCholesky>
|
||||
AppleAccelerateCholesky<Scalar>::Create(OrderingType ordering_type) {
|
||||
template <typename Scalar>
|
||||
std::unique_ptr<SparseCholesky> AppleAccelerateCholesky<Scalar>::Create(
|
||||
OrderingType ordering_type) {
|
||||
return std::unique_ptr<SparseCholesky>(
|
||||
new AppleAccelerateCholesky<Scalar>(ordering_type));
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
AppleAccelerateCholesky<Scalar>::AppleAccelerateCholesky(
|
||||
const OrderingType ordering_type)
|
||||
: ordering_type_(ordering_type) {}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
AppleAccelerateCholesky<Scalar>::~AppleAccelerateCholesky() {
|
||||
FreeSymbolicFactorization();
|
||||
FreeNumericFactorization();
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
CompressedRowSparseMatrix::StorageType
|
||||
AppleAccelerateCholesky<Scalar>::StorageType() const {
|
||||
return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
LinearSolverTerminationType
|
||||
AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
|
||||
std::string* message) {
|
||||
template <typename Scalar>
|
||||
LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Factorize(
|
||||
CompressedRowSparseMatrix* lhs, std::string* message) {
|
||||
CHECK_EQ(lhs->storage_type(), StorageType());
|
||||
if (lhs == NULL) {
|
||||
*message = "Failure: Input lhs is NULL.";
|
||||
@@ -234,11 +235,9 @@ AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
|
||||
return LINEAR_SOLVER_SUCCESS;
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
LinearSolverTerminationType
|
||||
AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
|
||||
double* solution,
|
||||
std::string* message) {
|
||||
template <typename Scalar>
|
||||
LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Solve(
|
||||
const double* rhs, double* solution, std::string* message) {
|
||||
CHECK_EQ(numeric_factor_->status, SparseStatusOK)
|
||||
<< "Solve called without a call to Factorize first ("
|
||||
<< SparseStatusToString(numeric_factor_->status) << ").";
|
||||
@@ -262,7 +261,7 @@ AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
|
||||
return LINEAR_SOLVER_SUCCESS;
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
|
||||
if (symbolic_factor_) {
|
||||
SparseCleanup(*symbolic_factor_);
|
||||
@@ -270,7 +269,7 @@ void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
|
||||
}
|
||||
}
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
|
||||
if (numeric_factor_) {
|
||||
SparseCleanup(*numeric_factor_);
|
||||
@@ -283,7 +282,7 @@ void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
|
||||
template class AppleAccelerateCholesky<double>;
|
||||
template class AppleAccelerateCholesky<float>;
|
||||
|
||||
}
|
||||
}
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
||||
#endif // CERES_NO_ACCELERATE_SPARSE
|
||||
|
31
extern/ceres/internal/ceres/accelerate_sparse.h
vendored
31
extern/ceres/internal/ceres/accelerate_sparse.h
vendored
@@ -40,9 +40,9 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "Accelerate.h"
|
||||
#include "ceres/linear_solver.h"
|
||||
#include "ceres/sparse_cholesky.h"
|
||||
#include "Accelerate.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -50,11 +50,10 @@ namespace internal {
|
||||
class CompressedRowSparseMatrix;
|
||||
class TripletSparseMatrix;
|
||||
|
||||
template<typename Scalar>
|
||||
struct SparseTypesTrait {
|
||||
};
|
||||
template <typename Scalar>
|
||||
struct SparseTypesTrait {};
|
||||
|
||||
template<>
|
||||
template <>
|
||||
struct SparseTypesTrait<double> {
|
||||
typedef DenseVector_Double DenseVector;
|
||||
typedef SparseMatrix_Double SparseMatrix;
|
||||
@@ -62,7 +61,7 @@ struct SparseTypesTrait<double> {
|
||||
typedef SparseOpaqueFactorization_Double NumericFactorization;
|
||||
};
|
||||
|
||||
template<>
|
||||
template <>
|
||||
struct SparseTypesTrait<float> {
|
||||
typedef DenseVector_Float DenseVector;
|
||||
typedef SparseMatrix_Float SparseMatrix;
|
||||
@@ -70,14 +69,16 @@ struct SparseTypesTrait<float> {
|
||||
typedef SparseOpaqueFactorization_Float NumericFactorization;
|
||||
};
|
||||
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
class AccelerateSparse {
|
||||
public:
|
||||
using DenseVector = typename SparseTypesTrait<Scalar>::DenseVector;
|
||||
// Use ASSparseMatrix to avoid collision with ceres::internal::SparseMatrix.
|
||||
using ASSparseMatrix = typename SparseTypesTrait<Scalar>::SparseMatrix;
|
||||
using SymbolicFactorization = typename SparseTypesTrait<Scalar>::SymbolicFactorization;
|
||||
using NumericFactorization = typename SparseTypesTrait<Scalar>::NumericFactorization;
|
||||
using SymbolicFactorization =
|
||||
typename SparseTypesTrait<Scalar>::SymbolicFactorization;
|
||||
using NumericFactorization =
|
||||
typename SparseTypesTrait<Scalar>::NumericFactorization;
|
||||
|
||||
// Solves a linear system given its symbolic (reference counted within
|
||||
// NumericFactorization) and numeric factorization.
|
||||
@@ -109,7 +110,7 @@ class AccelerateSparse {
|
||||
|
||||
// An implementation of SparseCholesky interface using Apple's Accelerate
|
||||
// framework.
|
||||
template<typename Scalar>
|
||||
template <typename Scalar>
|
||||
class AppleAccelerateCholesky : public SparseCholesky {
|
||||
public:
|
||||
// Factory
|
||||
@@ -122,7 +123,7 @@ class AppleAccelerateCholesky : public SparseCholesky {
|
||||
std::string* message) final;
|
||||
LinearSolverTerminationType Solve(const double* rhs,
|
||||
double* solution,
|
||||
std::string* message) final ;
|
||||
std::string* message) final;
|
||||
|
||||
private:
|
||||
AppleAccelerateCholesky(const OrderingType ordering_type);
|
||||
@@ -132,15 +133,15 @@ class AppleAccelerateCholesky : public SparseCholesky {
|
||||
const OrderingType ordering_type_;
|
||||
AccelerateSparse<Scalar> as_;
|
||||
std::unique_ptr<typename AccelerateSparse<Scalar>::SymbolicFactorization>
|
||||
symbolic_factor_;
|
||||
symbolic_factor_;
|
||||
std::unique_ptr<typename AccelerateSparse<Scalar>::NumericFactorization>
|
||||
numeric_factor_;
|
||||
numeric_factor_;
|
||||
// Copy of rhs/solution if Scalar != double (necessitating a copy).
|
||||
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> scalar_rhs_and_solution_;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
||||
#endif // CERES_NO_ACCELERATE_SPARSE
|
||||
|
||||
|
14
extern/ceres/internal/ceres/array_utils.cc
vendored
14
extern/ceres/internal/ceres/array_utils.cc
vendored
@@ -35,6 +35,7 @@
|
||||
#include <cstddef>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/stringprintf.h"
|
||||
#include "ceres/types.h"
|
||||
namespace ceres {
|
||||
@@ -45,7 +46,7 @@ using std::string;
|
||||
bool IsArrayValid(const int size, const double* x) {
|
||||
if (x != NULL) {
|
||||
for (int i = 0; i < size; ++i) {
|
||||
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
|
||||
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@@ -59,7 +60,7 @@ int FindInvalidValue(const int size, const double* x) {
|
||||
}
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
|
||||
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
|
||||
return i;
|
||||
}
|
||||
}
|
||||
@@ -92,14 +93,13 @@ void AppendArrayToString(const int size, const double* x, string* result) {
|
||||
void MapValuesToContiguousRange(const int size, int* array) {
|
||||
std::vector<int> unique_values(array, array + size);
|
||||
std::sort(unique_values.begin(), unique_values.end());
|
||||
unique_values.erase(std::unique(unique_values.begin(),
|
||||
unique_values.end()),
|
||||
unique_values.erase(std::unique(unique_values.begin(), unique_values.end()),
|
||||
unique_values.end());
|
||||
|
||||
for (int i = 0; i < size; ++i) {
|
||||
array[i] = std::lower_bound(unique_values.begin(),
|
||||
unique_values.end(),
|
||||
array[i]) - unique_values.begin();
|
||||
array[i] =
|
||||
std::lower_bound(unique_values.begin(), unique_values.end(), array[i]) -
|
||||
unique_values.begin();
|
||||
}
|
||||
}
|
||||
|
||||
|
13
extern/ceres/internal/ceres/array_utils.h
vendored
13
extern/ceres/internal/ceres/array_utils.h
vendored
@@ -44,6 +44,7 @@
|
||||
#define CERES_INTERNAL_ARRAY_UTILS_H_
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -51,20 +52,22 @@ namespace internal {
|
||||
|
||||
// Fill the array x with an impossible value that the user code is
|
||||
// never expected to compute.
|
||||
void InvalidateArray(int size, double* x);
|
||||
CERES_EXPORT_INTERNAL void InvalidateArray(int size, double* x);
|
||||
|
||||
// Check if all the entries of the array x are valid, i.e. all the
|
||||
// values in the array should be finite and none of them should be
|
||||
// equal to the "impossible" value used by InvalidateArray.
|
||||
bool IsArrayValid(int size, const double* x);
|
||||
CERES_EXPORT_INTERNAL bool IsArrayValid(int size, const double* x);
|
||||
|
||||
// If the array contains an invalid value, return the index for it,
|
||||
// otherwise return size.
|
||||
int FindInvalidValue(const int size, const double* x);
|
||||
CERES_EXPORT_INTERNAL int FindInvalidValue(const int size, const double* x);
|
||||
|
||||
// Utility routine to print an array of doubles to a string. If the
|
||||
// array pointer is NULL, it is treated as an array of zeros.
|
||||
void AppendArrayToString(const int size, const double* x, std::string* result);
|
||||
CERES_EXPORT_INTERNAL void AppendArrayToString(const int size,
|
||||
const double* x,
|
||||
std::string* result);
|
||||
|
||||
// This routine takes an array of integer values, sorts and uniques
|
||||
// them and then maps each value in the array to its position in the
|
||||
@@ -79,7 +82,7 @@ void AppendArrayToString(const int size, const double* x, std::string* result);
|
||||
// gets mapped to
|
||||
//
|
||||
// [1 0 2 3 0 1 3]
|
||||
void MapValuesToContiguousRange(int size, int* array);
|
||||
CERES_EXPORT_INTERNAL void MapValuesToContiguousRange(int size, int* array);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
1
extern/ceres/internal/ceres/blas.cc
vendored
1
extern/ceres/internal/ceres/blas.cc
vendored
@@ -29,6 +29,7 @@
|
||||
// Author: sameeragarwal@google.com (Sameer Agarwal)
|
||||
|
||||
#include "ceres/blas.h"
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#include "ceres/block_evaluate_preparer.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/block_sparse_matrix.h"
|
||||
#include "ceres/casts.h"
|
||||
#include "ceres/parameter_block.h"
|
||||
@@ -53,10 +54,8 @@ void BlockEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
|
||||
double** jacobians) {
|
||||
// If the overall jacobian is not available, use the scratch space.
|
||||
if (jacobian == NULL) {
|
||||
scratch_evaluate_preparer_.Prepare(residual_block,
|
||||
residual_block_index,
|
||||
jacobian,
|
||||
jacobians);
|
||||
scratch_evaluate_preparer_.Prepare(
|
||||
residual_block, residual_block_index, jacobian, jacobians);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@@ -30,9 +30,9 @@
|
||||
|
||||
#include "ceres/block_jacobi_preconditioner.h"
|
||||
|
||||
#include "ceres/block_random_access_diagonal_matrix.h"
|
||||
#include "ceres/block_sparse_matrix.h"
|
||||
#include "ceres/block_structure.h"
|
||||
#include "ceres/block_random_access_diagonal_matrix.h"
|
||||
#include "ceres/casts.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
|
||||
@@ -65,13 +65,11 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
|
||||
const int col_block_size = bs->cols[block_id].size;
|
||||
|
||||
int r, c, row_stride, col_stride;
|
||||
CellInfo* cell_info = m_->GetCell(block_id, block_id,
|
||||
&r, &c,
|
||||
&row_stride, &col_stride);
|
||||
CellInfo* cell_info =
|
||||
m_->GetCell(block_id, block_id, &r, &c, &row_stride, &col_stride);
|
||||
MatrixRef m(cell_info->values, row_stride, col_stride);
|
||||
ConstMatrixRef b(values + cells[j].position,
|
||||
row_block_size,
|
||||
col_block_size);
|
||||
ConstMatrixRef b(
|
||||
values + cells[j].position, row_block_size, col_block_size);
|
||||
m.block(r, c, col_block_size, col_block_size) += b.transpose() * b;
|
||||
}
|
||||
}
|
||||
@@ -82,9 +80,7 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
|
||||
for (int i = 0; i < bs->cols.size(); ++i) {
|
||||
const int block_size = bs->cols[i].size;
|
||||
int r, c, row_stride, col_stride;
|
||||
CellInfo* cell_info = m_->GetCell(i, i,
|
||||
&r, &c,
|
||||
&row_stride, &col_stride);
|
||||
CellInfo* cell_info = m_->GetCell(i, i, &r, &c, &row_stride, &col_stride);
|
||||
MatrixRef m(cell_info->values, row_stride, col_stride);
|
||||
m.block(r, c, block_size, block_size).diagonal() +=
|
||||
ConstVectorRef(D + position, block_size).array().square().matrix();
|
||||
|
@@ -32,7 +32,9 @@
|
||||
#define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "ceres/block_random_access_diagonal_matrix.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/preconditioner.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -51,7 +53,8 @@ struct CompressedRowBlockStructure;
|
||||
// update the matrix by running Update(A, D). The values of the matrix A are
|
||||
// inspected to construct the preconditioner. The vector D is applied as the
|
||||
// D^TD diagonal term.
|
||||
class BlockJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
|
||||
class CERES_EXPORT_INTERNAL BlockJacobiPreconditioner
|
||||
: public BlockSparseMatrixPreconditioner {
|
||||
public:
|
||||
// A must remain valid while the BlockJacobiPreconditioner is.
|
||||
explicit BlockJacobiPreconditioner(const BlockSparseMatrix& A);
|
||||
|
@@ -32,11 +32,11 @@
|
||||
|
||||
#include "ceres/block_evaluate_preparer.h"
|
||||
#include "ceres/block_sparse_matrix.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/parameter_block.h"
|
||||
#include "ceres/program.h"
|
||||
#include "ceres/residual_block.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
@@ -39,6 +39,7 @@
|
||||
#define CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/evaluator.h"
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
@@ -52,8 +53,7 @@ class SparseMatrix;
|
||||
// TODO(sameeragarwal): This class needs documemtation.
|
||||
class BlockJacobianWriter {
|
||||
public:
|
||||
BlockJacobianWriter(const Evaluator::Options& options,
|
||||
Program* program);
|
||||
BlockJacobianWriter(const Evaluator::Options& options, Program* program);
|
||||
|
||||
// JacobianWriter interface.
|
||||
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#include "ceres/block_random_access_dense_matrix.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
@@ -59,8 +60,7 @@ BlockRandomAccessDenseMatrix::BlockRandomAccessDenseMatrix(
|
||||
|
||||
// Assume that the user does not hold any locks on any cell blocks
|
||||
// when they are calling SetZero.
|
||||
BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {
|
||||
}
|
||||
BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {}
|
||||
|
||||
CellInfo* BlockRandomAccessDenseMatrix::GetCell(const int row_block_id,
|
||||
const int col_block_id,
|
||||
|
@@ -31,11 +31,10 @@
|
||||
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
|
||||
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
|
||||
|
||||
#include "ceres/block_random_access_matrix.h"
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/block_random_access_matrix.h"
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -51,7 +50,8 @@ namespace internal {
|
||||
// pair.
|
||||
//
|
||||
// ReturnCell is a nop.
|
||||
class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
|
||||
class CERES_EXPORT_INTERNAL BlockRandomAccessDenseMatrix
|
||||
: public BlockRandomAccessMatrix {
|
||||
public:
|
||||
// blocks is a vector of block sizes. The resulting matrix has
|
||||
// blocks.size() * blocks.size() cells.
|
||||
|
@@ -63,9 +63,8 @@ BlockRandomAccessDiagonalMatrix::BlockRandomAccessDiagonalMatrix(
|
||||
num_nonzeros += blocks_[i] * blocks_[i];
|
||||
}
|
||||
|
||||
VLOG(1) << "Matrix Size [" << num_cols
|
||||
<< "," << num_cols
|
||||
<< "] " << num_nonzeros;
|
||||
VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
|
||||
<< num_nonzeros;
|
||||
|
||||
tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
|
||||
tsm_->set_num_nonzeros(num_nonzeros);
|
||||
@@ -116,8 +115,7 @@ CellInfo* BlockRandomAccessDiagonalMatrix::GetCell(int row_block_id,
|
||||
// when they are calling SetZero.
|
||||
void BlockRandomAccessDiagonalMatrix::SetZero() {
|
||||
if (tsm_->num_nonzeros()) {
|
||||
VectorRef(tsm_->mutable_values(),
|
||||
tsm_->num_nonzeros()).setZero();
|
||||
VectorRef(tsm_->mutable_values(), tsm_->num_nonzeros()).setZero();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,11 +124,8 @@ void BlockRandomAccessDiagonalMatrix::Invert() {
|
||||
for (int i = 0; i < blocks_.size(); ++i) {
|
||||
const int block_size = blocks_[i];
|
||||
MatrixRef block(values, block_size, block_size);
|
||||
block =
|
||||
block
|
||||
.selfadjointView<Eigen::Upper>()
|
||||
.llt()
|
||||
.solve(Matrix::Identity(block_size, block_size));
|
||||
block = block.selfadjointView<Eigen::Upper>().llt().solve(
|
||||
Matrix::Identity(block_size, block_size));
|
||||
values += block_size * block_size;
|
||||
}
|
||||
}
|
||||
|
@@ -46,11 +46,13 @@ namespace internal {
|
||||
|
||||
// A thread safe block diagonal matrix implementation of
|
||||
// BlockRandomAccessMatrix.
|
||||
class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
|
||||
class CERES_EXPORT_INTERNAL BlockRandomAccessDiagonalMatrix
|
||||
: public BlockRandomAccessMatrix {
|
||||
public:
|
||||
// blocks is an array of block sizes.
|
||||
explicit BlockRandomAccessDiagonalMatrix(const std::vector<int>& blocks);
|
||||
BlockRandomAccessDiagonalMatrix(const BlockRandomAccessDiagonalMatrix&) = delete;
|
||||
BlockRandomAccessDiagonalMatrix(const BlockRandomAccessDiagonalMatrix&) =
|
||||
delete;
|
||||
void operator=(const BlockRandomAccessDiagonalMatrix&) = delete;
|
||||
|
||||
// The destructor is not thread safe. It assumes that no one is
|
||||
|
@@ -33,8 +33,7 @@
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
BlockRandomAccessMatrix::~BlockRandomAccessMatrix() {
|
||||
}
|
||||
BlockRandomAccessMatrix::~BlockRandomAccessMatrix() {}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
@@ -35,6 +35,8 @@
|
||||
|
||||
#include <mutex>
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
@@ -91,7 +93,7 @@ struct CellInfo {
|
||||
std::mutex m;
|
||||
};
|
||||
|
||||
class BlockRandomAccessMatrix {
|
||||
class CERES_EXPORT_INTERNAL BlockRandomAccessMatrix {
|
||||
public:
|
||||
virtual ~BlockRandomAccessMatrix();
|
||||
|
||||
|
@@ -50,10 +50,8 @@ using std::set;
|
||||
using std::vector;
|
||||
|
||||
BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
|
||||
const vector<int>& blocks,
|
||||
const set<pair<int, int>>& block_pairs)
|
||||
: kMaxRowBlocks(10 * 1000 * 1000),
|
||||
blocks_(blocks) {
|
||||
const vector<int>& blocks, const set<pair<int, int>>& block_pairs)
|
||||
: kMaxRowBlocks(10 * 1000 * 1000), blocks_(blocks) {
|
||||
CHECK_LT(blocks.size(), kMaxRowBlocks);
|
||||
|
||||
// Build the row/column layout vector and count the number of scalar
|
||||
@@ -75,9 +73,8 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
|
||||
num_nonzeros += row_block_size * col_block_size;
|
||||
}
|
||||
|
||||
VLOG(1) << "Matrix Size [" << num_cols
|
||||
<< "," << num_cols
|
||||
<< "] " << num_nonzeros;
|
||||
VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
|
||||
<< num_nonzeros;
|
||||
|
||||
tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
|
||||
tsm_->set_num_nonzeros(num_nonzeros);
|
||||
@@ -105,11 +102,11 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
|
||||
layout_[IntPairToLong(row_block_id, col_block_id)]->values - values;
|
||||
for (int r = 0; r < row_block_size; ++r) {
|
||||
for (int c = 0; c < col_block_size; ++c, ++pos) {
|
||||
rows[pos] = block_positions_[row_block_id] + r;
|
||||
cols[pos] = block_positions_[col_block_id] + c;
|
||||
values[pos] = 1.0;
|
||||
DCHECK_LT(rows[pos], tsm_->num_rows());
|
||||
DCHECK_LT(cols[pos], tsm_->num_rows());
|
||||
rows[pos] = block_positions_[row_block_id] + r;
|
||||
cols[pos] = block_positions_[col_block_id] + c;
|
||||
values[pos] = 1.0;
|
||||
DCHECK_LT(rows[pos], tsm_->num_rows());
|
||||
DCHECK_LT(cols[pos], tsm_->num_rows());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -129,7 +126,7 @@ CellInfo* BlockRandomAccessSparseMatrix::GetCell(int row_block_id,
|
||||
int* col,
|
||||
int* row_stride,
|
||||
int* col_stride) {
|
||||
const LayoutType::iterator it =
|
||||
const LayoutType::iterator it =
|
||||
layout_.find(IntPairToLong(row_block_id, col_block_id));
|
||||
if (it == layout_.end()) {
|
||||
return NULL;
|
||||
@@ -147,8 +144,7 @@ CellInfo* BlockRandomAccessSparseMatrix::GetCell(int row_block_id,
|
||||
// when they are calling SetZero.
|
||||
void BlockRandomAccessSparseMatrix::SetZero() {
|
||||
if (tsm_->num_nonzeros()) {
|
||||
VectorRef(tsm_->mutable_values(),
|
||||
tsm_->num_nonzeros()).setZero();
|
||||
VectorRef(tsm_->mutable_values(), tsm_->num_nonzeros()).setZero();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,7 +160,9 @@ void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
|
||||
const int col_block_pos = block_positions_[col];
|
||||
|
||||
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
|
||||
cell_position_and_data.second, row_block_size, col_block_size,
|
||||
cell_position_and_data.second,
|
||||
row_block_size,
|
||||
col_block_size,
|
||||
x + col_block_pos,
|
||||
y + row_block_pos);
|
||||
|
||||
@@ -174,7 +172,9 @@ void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
|
||||
// triangular multiply also.
|
||||
if (row != col) {
|
||||
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
|
||||
cell_position_and_data.second, row_block_size, col_block_size,
|
||||
cell_position_and_data.second,
|
||||
row_block_size,
|
||||
col_block_size,
|
||||
x + row_block_pos,
|
||||
y + col_block_pos);
|
||||
}
|
||||
|
@@ -39,10 +39,10 @@
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/block_random_access_matrix.h"
|
||||
#include "ceres/triplet_sparse_matrix.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/types.h"
|
||||
#include "ceres/small_blas.h"
|
||||
#include "ceres/triplet_sparse_matrix.h"
|
||||
#include "ceres/types.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -51,7 +51,8 @@ namespace internal {
|
||||
// BlockRandomAccessMatrix. Internally a TripletSparseMatrix is used
|
||||
// for doing the actual storage. This class augments this matrix with
|
||||
// an unordered_map that allows random read/write access.
|
||||
class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
|
||||
class CERES_EXPORT_INTERNAL BlockRandomAccessSparseMatrix
|
||||
: public BlockRandomAccessMatrix {
|
||||
public:
|
||||
// blocks is an array of block sizes. block_pairs is a set of
|
||||
// <row_block_id, col_block_id> pairs to identify the non-zero cells
|
||||
@@ -110,7 +111,7 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
|
||||
|
||||
// A mapping from <row_block_id, col_block_id> to the position in
|
||||
// the values array of tsm_ where the block is stored.
|
||||
typedef std::unordered_map<long int, CellInfo* > LayoutType;
|
||||
typedef std::unordered_map<long int, CellInfo*> LayoutType;
|
||||
LayoutType layout_;
|
||||
|
||||
// In order traversal of contents of the matrix. This allows us to
|
||||
|
@@ -30,9 +30,10 @@
|
||||
|
||||
#include "ceres/block_sparse_matrix.h"
|
||||
|
||||
#include <cstddef>
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/block_structure.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/random.h"
|
||||
@@ -77,8 +78,8 @@ BlockSparseMatrix::BlockSparseMatrix(
|
||||
CHECK_GE(num_rows_, 0);
|
||||
CHECK_GE(num_cols_, 0);
|
||||
CHECK_GE(num_nonzeros_, 0);
|
||||
VLOG(2) << "Allocating values array with "
|
||||
<< num_nonzeros_ * sizeof(double) << " bytes."; // NOLINT
|
||||
VLOG(2) << "Allocating values array with " << num_nonzeros_ * sizeof(double)
|
||||
<< " bytes."; // NOLINT
|
||||
values_.reset(new double[num_nonzeros_]);
|
||||
max_num_nonzeros_ = num_nonzeros_;
|
||||
CHECK(values_ != nullptr);
|
||||
@@ -88,7 +89,7 @@ void BlockSparseMatrix::SetZero() {
|
||||
std::fill(values_.get(), values_.get() + num_nonzeros_, 0.0);
|
||||
}
|
||||
|
||||
void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
|
||||
void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
|
||||
CHECK(x != nullptr);
|
||||
CHECK(y != nullptr);
|
||||
|
||||
@@ -101,7 +102,9 @@ void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
|
||||
int col_block_size = block_structure_->cols[col_block_id].size;
|
||||
int col_block_pos = block_structure_->cols[col_block_id].position;
|
||||
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
|
||||
values_.get() + cells[j].position, row_block_size, col_block_size,
|
||||
values_.get() + cells[j].position,
|
||||
row_block_size,
|
||||
col_block_size,
|
||||
x + col_block_pos,
|
||||
y + row_block_pos);
|
||||
}
|
||||
@@ -121,7 +124,9 @@ void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
|
||||
int col_block_size = block_structure_->cols[col_block_id].size;
|
||||
int col_block_pos = block_structure_->cols[col_block_id].position;
|
||||
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
|
||||
values_.get() + cells[j].position, row_block_size, col_block_size,
|
||||
values_.get() + cells[j].position,
|
||||
row_block_size,
|
||||
col_block_size,
|
||||
x + row_block_pos,
|
||||
y + col_block_pos);
|
||||
}
|
||||
@@ -138,8 +143,8 @@ void BlockSparseMatrix::SquaredColumnNorm(double* x) const {
|
||||
int col_block_id = cells[j].block_id;
|
||||
int col_block_size = block_structure_->cols[col_block_id].size;
|
||||
int col_block_pos = block_structure_->cols[col_block_id].position;
|
||||
const MatrixRef m(values_.get() + cells[j].position,
|
||||
row_block_size, col_block_size);
|
||||
const MatrixRef m(
|
||||
values_.get() + cells[j].position, row_block_size, col_block_size);
|
||||
VectorRef(x + col_block_pos, col_block_size) += m.colwise().squaredNorm();
|
||||
}
|
||||
}
|
||||
@@ -155,8 +160,8 @@ void BlockSparseMatrix::ScaleColumns(const double* scale) {
|
||||
int col_block_id = cells[j].block_id;
|
||||
int col_block_size = block_structure_->cols[col_block_id].size;
|
||||
int col_block_pos = block_structure_->cols[col_block_id].position;
|
||||
MatrixRef m(values_.get() + cells[j].position,
|
||||
row_block_size, col_block_size);
|
||||
MatrixRef m(
|
||||
values_.get() + cells[j].position, row_block_size, col_block_size);
|
||||
m *= ConstVectorRef(scale + col_block_pos, col_block_size).asDiagonal();
|
||||
}
|
||||
}
|
||||
@@ -178,8 +183,8 @@ void BlockSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
|
||||
int col_block_size = block_structure_->cols[col_block_id].size;
|
||||
int col_block_pos = block_structure_->cols[col_block_id].position;
|
||||
int jac_pos = cells[j].position;
|
||||
m.block(row_block_pos, col_block_pos, row_block_size, col_block_size)
|
||||
+= MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
|
||||
m.block(row_block_pos, col_block_pos, row_block_size, col_block_size) +=
|
||||
MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -201,7 +206,7 @@ void BlockSparseMatrix::ToTripletSparseMatrix(
|
||||
int col_block_size = block_structure_->cols[col_block_id].size;
|
||||
int col_block_pos = block_structure_->cols[col_block_id].position;
|
||||
int jac_pos = cells[j].position;
|
||||
for (int r = 0; r < row_block_size; ++r) {
|
||||
for (int r = 0; r < row_block_size; ++r) {
|
||||
for (int c = 0; c < col_block_size; ++c, ++jac_pos) {
|
||||
matrix->mutable_rows()[jac_pos] = row_block_pos + r;
|
||||
matrix->mutable_cols()[jac_pos] = col_block_pos + c;
|
||||
@@ -215,8 +220,7 @@ void BlockSparseMatrix::ToTripletSparseMatrix(
|
||||
|
||||
// Return a pointer to the block structure. We continue to hold
|
||||
// ownership of the object though.
|
||||
const CompressedRowBlockStructure* BlockSparseMatrix::block_structure()
|
||||
const {
|
||||
const CompressedRowBlockStructure* BlockSparseMatrix::block_structure() const {
|
||||
return block_structure_.get();
|
||||
}
|
||||
|
||||
@@ -233,7 +237,8 @@ void BlockSparseMatrix::ToTextFile(FILE* file) const {
|
||||
int jac_pos = cells[j].position;
|
||||
for (int r = 0; r < row_block_size; ++r) {
|
||||
for (int c = 0; c < col_block_size; ++c) {
|
||||
fprintf(file, "% 10d % 10d %17f\n",
|
||||
fprintf(file,
|
||||
"% 10d % 10d %17f\n",
|
||||
row_block_pos + r,
|
||||
col_block_pos + c,
|
||||
values_[jac_pos++]);
|
||||
@@ -369,7 +374,6 @@ BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
|
||||
int row_block_position = 0;
|
||||
int value_position = 0;
|
||||
for (int r = 0; r < options.num_row_blocks; ++r) {
|
||||
|
||||
const int delta_block_size =
|
||||
Uniform(options.max_row_block_size - options.min_row_block_size);
|
||||
const int row_block_size = options.min_row_block_size + delta_block_size;
|
||||
|
@@ -35,9 +35,11 @@
|
||||
#define CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "ceres/block_structure.h"
|
||||
#include "ceres/sparse_matrix.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/sparse_matrix.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -52,7 +54,7 @@ class TripletSparseMatrix;
|
||||
//
|
||||
// internal/ceres/block_structure.h
|
||||
//
|
||||
class BlockSparseMatrix : public SparseMatrix {
|
||||
class CERES_EXPORT_INTERNAL BlockSparseMatrix : public SparseMatrix {
|
||||
public:
|
||||
// Construct a block sparse matrix with a fully initialized
|
||||
// CompressedRowBlockStructure objected. The matrix takes over
|
||||
@@ -77,11 +79,13 @@ class BlockSparseMatrix : public SparseMatrix {
|
||||
void ToDenseMatrix(Matrix* dense_matrix) const final;
|
||||
void ToTextFile(FILE* file) const final;
|
||||
|
||||
// clang-format off
|
||||
int num_rows() const final { return num_rows_; }
|
||||
int num_cols() const final { return num_cols_; }
|
||||
int num_nonzeros() const final { return num_nonzeros_; }
|
||||
const double* values() const final { return values_.get(); }
|
||||
double* mutable_values() final { return values_.get(); }
|
||||
// clang-format on
|
||||
|
||||
void ToTripletSparseMatrix(TripletSparseMatrix* matrix) const;
|
||||
const CompressedRowBlockStructure* block_structure() const;
|
||||
@@ -94,8 +98,7 @@ class BlockSparseMatrix : public SparseMatrix {
|
||||
void DeleteRowBlocks(int delta_row_blocks);
|
||||
|
||||
static BlockSparseMatrix* CreateDiagonalMatrix(
|
||||
const double* diagonal,
|
||||
const std::vector<Block>& column_blocks);
|
||||
const double* diagonal, const std::vector<Block>& column_blocks);
|
||||
|
||||
struct RandomMatrixOptions {
|
||||
int num_row_blocks = 0;
|
||||
|
@@ -35,7 +35,7 @@ namespace internal {
|
||||
|
||||
bool CellLessThan(const Cell& lhs, const Cell& rhs) {
|
||||
if (lhs.block_id == rhs.block_id) {
|
||||
return (lhs.position < rhs.position);
|
||||
return (lhs.position < rhs.position);
|
||||
}
|
||||
return (lhs.block_id < rhs.block_id);
|
||||
}
|
||||
|
@@ -40,6 +40,7 @@
|
||||
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
|
31
extern/ceres/internal/ceres/c_api.cc
vendored
31
extern/ceres/internal/ceres/c_api.cc
vendored
@@ -34,9 +34,10 @@
|
||||
|
||||
#include "ceres/c_api.h"
|
||||
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/cost_function.h"
|
||||
#include "ceres/loss_function.h"
|
||||
#include "ceres/problem.h"
|
||||
@@ -70,8 +71,7 @@ class CallbackCostFunction : public ceres::CostFunction {
|
||||
int num_residuals,
|
||||
int num_parameter_blocks,
|
||||
int* parameter_block_sizes)
|
||||
: cost_function_(cost_function),
|
||||
user_data_(user_data) {
|
||||
: cost_function_(cost_function), user_data_(user_data) {
|
||||
set_num_residuals(num_residuals);
|
||||
for (int i = 0; i < num_parameter_blocks; ++i) {
|
||||
mutable_parameter_block_sizes()->push_back(parameter_block_sizes[i]);
|
||||
@@ -81,12 +81,10 @@ class CallbackCostFunction : public ceres::CostFunction {
|
||||
virtual ~CallbackCostFunction() {}
|
||||
|
||||
bool Evaluate(double const* const* parameters,
|
||||
double* residuals,
|
||||
double** jacobians) const final {
|
||||
return (*cost_function_)(user_data_,
|
||||
const_cast<double**>(parameters),
|
||||
residuals,
|
||||
jacobians);
|
||||
double* residuals,
|
||||
double** jacobians) const final {
|
||||
return (*cost_function_)(
|
||||
user_data_, const_cast<double**>(parameters), residuals, jacobians);
|
||||
}
|
||||
|
||||
private:
|
||||
@@ -100,7 +98,7 @@ class CallbackLossFunction : public ceres::LossFunction {
|
||||
public:
|
||||
explicit CallbackLossFunction(ceres_loss_function_t loss_function,
|
||||
void* user_data)
|
||||
: loss_function_(loss_function), user_data_(user_data) {}
|
||||
: loss_function_(loss_function), user_data_(user_data) {}
|
||||
void Evaluate(double sq_norm, double* rho) const final {
|
||||
(*loss_function_)(user_data_, sq_norm, rho);
|
||||
}
|
||||
@@ -134,8 +132,8 @@ void ceres_free_stock_loss_function_data(void* loss_function_data) {
|
||||
void ceres_stock_loss_function(void* user_data,
|
||||
double squared_norm,
|
||||
double out[3]) {
|
||||
reinterpret_cast<ceres::LossFunction*>(user_data)
|
||||
->Evaluate(squared_norm, out);
|
||||
reinterpret_cast<ceres::LossFunction*>(user_data)->Evaluate(squared_norm,
|
||||
out);
|
||||
}
|
||||
|
||||
ceres_residual_block_id_t* ceres_problem_add_residual_block(
|
||||
@@ -159,16 +157,15 @@ ceres_residual_block_id_t* ceres_problem_add_residual_block(
|
||||
|
||||
ceres::LossFunction* callback_loss_function = NULL;
|
||||
if (loss_function != NULL) {
|
||||
callback_loss_function = new CallbackLossFunction(loss_function,
|
||||
loss_function_data);
|
||||
callback_loss_function =
|
||||
new CallbackLossFunction(loss_function, loss_function_data);
|
||||
}
|
||||
|
||||
std::vector<double*> parameter_blocks(parameters,
|
||||
parameters + num_parameter_blocks);
|
||||
return reinterpret_cast<ceres_residual_block_id_t*>(
|
||||
ceres_problem->AddResidualBlock(callback_cost_function,
|
||||
callback_loss_function,
|
||||
parameter_blocks));
|
||||
ceres_problem->AddResidualBlock(
|
||||
callback_cost_function, callback_loss_function, parameter_blocks));
|
||||
}
|
||||
|
||||
void ceres_solve(ceres_problem_t* c_problem) {
|
||||
|
9
extern/ceres/internal/ceres/callbacks.cc
vendored
9
extern/ceres/internal/ceres/callbacks.cc
vendored
@@ -28,8 +28,10 @@
|
||||
//
|
||||
// Author: sameeragarwal@google.com (Sameer Agarwal)
|
||||
|
||||
#include <iostream> // NO LINT
|
||||
#include "ceres/callbacks.h"
|
||||
|
||||
#include <iostream> // NO LINT
|
||||
|
||||
#include "ceres/program.h"
|
||||
#include "ceres/stringprintf.h"
|
||||
#include "glog/logging.h"
|
||||
@@ -76,8 +78,7 @@ CallbackReturnType GradientProblemSolverStateUpdatingCallback::operator()(
|
||||
|
||||
LoggingCallback::LoggingCallback(const MinimizerType minimizer_type,
|
||||
const bool log_to_stdout)
|
||||
: minimizer_type(minimizer_type),
|
||||
log_to_stdout_(log_to_stdout) {}
|
||||
: minimizer_type(minimizer_type), log_to_stdout_(log_to_stdout) {}
|
||||
|
||||
LoggingCallback::~LoggingCallback() {}
|
||||
|
||||
@@ -99,11 +100,13 @@ CallbackReturnType LoggingCallback::operator()(
|
||||
summary.iteration_time_in_seconds,
|
||||
summary.cumulative_time_in_seconds);
|
||||
} else if (minimizer_type == TRUST_REGION) {
|
||||
// clang-format off
|
||||
if (summary.iteration == 0) {
|
||||
output = "iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time\n"; // NOLINT
|
||||
}
|
||||
const char* kReportRowFormat =
|
||||
"% 4d % 8e % 3.2e % 3.2e % 3.2e % 3.2e % 3.2e % 4d % 3.2e % 3.2e"; // NOLINT
|
||||
// clang-format on
|
||||
output += StringPrintf(kReportRowFormat,
|
||||
summary.iteration,
|
||||
summary.cost,
|
||||
|
5
extern/ceres/internal/ceres/callbacks.h
vendored
5
extern/ceres/internal/ceres/callbacks.h
vendored
@@ -32,8 +32,9 @@
|
||||
#define CERES_INTERNAL_CALLBACKS_H_
|
||||
|
||||
#include <string>
|
||||
#include "ceres/iteration_callback.h"
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/iteration_callback.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -47,6 +48,7 @@ class StateUpdatingCallback : public IterationCallback {
|
||||
StateUpdatingCallback(Program* program, double* parameters);
|
||||
virtual ~StateUpdatingCallback();
|
||||
CallbackReturnType operator()(const IterationSummary& summary) final;
|
||||
|
||||
private:
|
||||
Program* program_;
|
||||
double* parameters_;
|
||||
@@ -61,6 +63,7 @@ class GradientProblemSolverStateUpdatingCallback : public IterationCallback {
|
||||
double* user_parameters);
|
||||
virtual ~GradientProblemSolverStateUpdatingCallback();
|
||||
CallbackReturnType operator()(const IterationSummary& summary) final;
|
||||
|
||||
private:
|
||||
int num_parameters_;
|
||||
const double* internal_parameters_;
|
||||
|
@@ -31,8 +31,8 @@
|
||||
|
||||
#include "ceres/canonical_views_clustering.h"
|
||||
|
||||
#include <unordered_set>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "ceres/graph.h"
|
||||
#include "ceres/map_util.h"
|
||||
@@ -126,8 +126,7 @@ void CanonicalViewsClustering::ComputeClustering(
|
||||
|
||||
// Add canonical view if quality improves, or if minimum is not
|
||||
// yet met, otherwise break.
|
||||
if ((best_difference <= 0) &&
|
||||
(centers->size() >= options_.min_views)) {
|
||||
if ((best_difference <= 0) && (centers->size() >= options_.min_views)) {
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -141,8 +140,7 @@ void CanonicalViewsClustering::ComputeClustering(
|
||||
|
||||
// Return the set of vertices of the graph which have valid vertex
|
||||
// weights.
|
||||
void CanonicalViewsClustering::FindValidViews(
|
||||
IntSet* valid_views) const {
|
||||
void CanonicalViewsClustering::FindValidViews(IntSet* valid_views) const {
|
||||
const IntSet& views = graph_->vertices();
|
||||
for (const auto& view : views) {
|
||||
if (graph_->VertexWeight(view) != WeightedGraph<int>::InvalidWeight()) {
|
||||
@@ -154,8 +152,7 @@ void CanonicalViewsClustering::FindValidViews(
|
||||
// Computes the difference in the quality score if 'candidate' were
|
||||
// added to the set of canonical views.
|
||||
double CanonicalViewsClustering::ComputeClusteringQualityDifference(
|
||||
const int candidate,
|
||||
const vector<int>& centers) const {
|
||||
const int candidate, const vector<int>& centers) const {
|
||||
// View score.
|
||||
double difference =
|
||||
options_.view_score_weight * graph_->VertexWeight(candidate);
|
||||
@@ -179,7 +176,7 @@ double CanonicalViewsClustering::ComputeClusteringQualityDifference(
|
||||
// Orthogonality.
|
||||
for (int i = 0; i < centers.size(); ++i) {
|
||||
difference -= options_.similarity_penalty_weight *
|
||||
graph_->EdgeWeight(centers[i], candidate);
|
||||
graph_->EdgeWeight(centers[i], candidate);
|
||||
}
|
||||
|
||||
return difference;
|
||||
@@ -192,8 +189,7 @@ void CanonicalViewsClustering::UpdateCanonicalViewAssignments(
|
||||
for (const auto& neighbor : neighbors) {
|
||||
const double old_similarity =
|
||||
FindWithDefault(view_to_canonical_view_similarity_, neighbor, 0.0);
|
||||
const double new_similarity =
|
||||
graph_->EdgeWeight(neighbor, canonical_view);
|
||||
const double new_similarity = graph_->EdgeWeight(neighbor, canonical_view);
|
||||
if (new_similarity > old_similarity) {
|
||||
view_to_canonical_view_[neighbor] = canonical_view;
|
||||
view_to_canonical_view_similarity_[neighbor] = new_similarity;
|
||||
@@ -203,8 +199,7 @@ void CanonicalViewsClustering::UpdateCanonicalViewAssignments(
|
||||
|
||||
// Assign a cluster id to each view.
|
||||
void CanonicalViewsClustering::ComputeClusterMembership(
|
||||
const vector<int>& centers,
|
||||
IntMap* membership) const {
|
||||
const vector<int>& centers, IntMap* membership) const {
|
||||
CHECK(membership != nullptr);
|
||||
membership->clear();
|
||||
|
||||
|
@@ -45,6 +45,7 @@
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/graph.h"
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -94,13 +95,13 @@ struct CanonicalViewsClusteringOptions;
|
||||
// It is possible depending on the configuration of the clustering
|
||||
// algorithm that some of the vertices may not be assigned to any
|
||||
// cluster. In this case they are assigned to a cluster with id = -1;
|
||||
void ComputeCanonicalViewsClustering(
|
||||
CERES_EXPORT_INTERNAL void ComputeCanonicalViewsClustering(
|
||||
const CanonicalViewsClusteringOptions& options,
|
||||
const WeightedGraph<int>& graph,
|
||||
std::vector<int>* centers,
|
||||
std::unordered_map<int, int>* membership);
|
||||
|
||||
struct CanonicalViewsClusteringOptions {
|
||||
struct CERES_EXPORT_INTERNAL CanonicalViewsClusteringOptions {
|
||||
// The minimum number of canonical views to compute.
|
||||
int min_views = 3;
|
||||
|
||||
|
10
extern/ceres/internal/ceres/casts.h
vendored
10
extern/ceres/internal/ceres/casts.h
vendored
@@ -56,15 +56,15 @@ struct identity_ {
|
||||
//
|
||||
// base::identity_ is used to make a non-deduced context, which
|
||||
// forces all callers to explicitly specify the template argument.
|
||||
template<typename To>
|
||||
template <typename To>
|
||||
inline To implicit_cast(typename identity_<To>::type to) {
|
||||
return to;
|
||||
}
|
||||
|
||||
// This version of implicit_cast is used when two template arguments
|
||||
// are specified. It's obsolete and should not be used.
|
||||
template<typename To, typename From>
|
||||
inline To implicit_cast(typename identity_<From>::type const &f) {
|
||||
template <typename To, typename From>
|
||||
inline To implicit_cast(typename identity_<From>::type const& f) {
|
||||
return f;
|
||||
}
|
||||
|
||||
@@ -86,8 +86,8 @@ inline To implicit_cast(typename identity_<From>::type const &f) {
|
||||
// if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
|
||||
// You should design the code some other way not to need this.
|
||||
|
||||
template<typename To, typename From> // use like this: down_cast<T*>(foo);
|
||||
inline To down_cast(From* f) { // so we only accept pointers
|
||||
template <typename To, typename From> // use like this: down_cast<T*>(foo);
|
||||
inline To down_cast(From* f) { // so we only accept pointers
|
||||
// Ensures that To is a sub-type of From *. This test is here only
|
||||
// for compile-time type checking, and has no overhead in an
|
||||
// optimized build at run-time, as it will be optimized away
|
||||
|
@@ -33,8 +33,9 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include "ceres/linear_operator.h"
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/linear_operator.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -79,9 +80,8 @@ class SparseMatrix;
|
||||
// Note: This class is not thread safe, since it uses some temporary storage.
|
||||
class CgnrLinearOperator : public LinearOperator {
|
||||
public:
|
||||
CgnrLinearOperator(const LinearOperator& A, const double *D)
|
||||
: A_(A), D_(D), z_(new double[A.num_rows()]) {
|
||||
}
|
||||
CgnrLinearOperator(const LinearOperator& A, const double* D)
|
||||
: A_(A), D_(D), z_(new double[A.num_rows()]) {}
|
||||
virtual ~CgnrLinearOperator() {}
|
||||
|
||||
void RightMultiply(const double* x, double* y) const final {
|
||||
@@ -96,8 +96,8 @@ class CgnrLinearOperator : public LinearOperator {
|
||||
// y = y + DtDx
|
||||
if (D_ != NULL) {
|
||||
int n = A_.num_cols();
|
||||
VectorRef(y, n).array() += ConstVectorRef(D_, n).array().square() *
|
||||
ConstVectorRef(x, n).array();
|
||||
VectorRef(y, n).array() +=
|
||||
ConstVectorRef(D_, n).array().square() * ConstVectorRef(x, n).array();
|
||||
}
|
||||
}
|
||||
|
||||
|
10
extern/ceres/internal/ceres/cgnr_solver.h
vendored
10
extern/ceres/internal/ceres/cgnr_solver.h
vendored
@@ -32,6 +32,7 @@
|
||||
#define CERES_INTERNAL_CGNR_SOLVER_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "ceres/linear_solver.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -55,11 +56,10 @@ class CgnrSolver : public BlockSparseMatrixSolver {
|
||||
void operator=(const CgnrSolver&) = delete;
|
||||
virtual ~CgnrSolver();
|
||||
|
||||
Summary SolveImpl(
|
||||
BlockSparseMatrix* A,
|
||||
const double* b,
|
||||
const LinearSolver::PerSolveOptions& per_solve_options,
|
||||
double* x) final;
|
||||
Summary SolveImpl(BlockSparseMatrix* A,
|
||||
const double* b,
|
||||
const LinearSolver::PerSolveOptions& per_solve_options,
|
||||
double* x) final;
|
||||
|
||||
private:
|
||||
const LinearSolver::Options options_;
|
||||
|
@@ -30,8 +30,9 @@
|
||||
|
||||
#include "ceres/compressed_col_sparse_matrix_utils.h"
|
||||
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
@@ -40,13 +41,12 @@ namespace internal {
|
||||
|
||||
using std::vector;
|
||||
|
||||
void CompressedColumnScalarMatrixToBlockMatrix(
|
||||
const int* scalar_rows,
|
||||
const int* scalar_cols,
|
||||
const vector<int>& row_blocks,
|
||||
const vector<int>& col_blocks,
|
||||
vector<int>* block_rows,
|
||||
vector<int>* block_cols) {
|
||||
void CompressedColumnScalarMatrixToBlockMatrix(const int* scalar_rows,
|
||||
const int* scalar_cols,
|
||||
const vector<int>& row_blocks,
|
||||
const vector<int>& col_blocks,
|
||||
vector<int>* block_rows,
|
||||
vector<int>* block_cols) {
|
||||
CHECK(block_rows != nullptr);
|
||||
CHECK(block_cols != nullptr);
|
||||
block_rows->clear();
|
||||
@@ -71,10 +71,8 @@ void CompressedColumnScalarMatrixToBlockMatrix(
|
||||
for (int col_block = 0; col_block < num_col_blocks; ++col_block) {
|
||||
int column_size = 0;
|
||||
for (int idx = scalar_cols[c]; idx < scalar_cols[c + 1]; ++idx) {
|
||||
vector<int>::const_iterator it =
|
||||
std::lower_bound(row_block_starts.begin(),
|
||||
row_block_starts.end(),
|
||||
scalar_rows[idx]);
|
||||
vector<int>::const_iterator it = std::lower_bound(
|
||||
row_block_starts.begin(), row_block_starts.end(), scalar_rows[idx]);
|
||||
// Since we are using lower_bound, it will return the row id
|
||||
// where the row block starts. For everything but the first row
|
||||
// of the block, where these values will be the same, we can
|
||||
@@ -104,7 +102,7 @@ void BlockOrderingToScalarOrdering(const vector<int>& blocks,
|
||||
|
||||
// block_starts = [0, block1, block1 + block2 ..]
|
||||
vector<int> block_starts(num_blocks);
|
||||
for (int i = 0, cursor = 0; i < num_blocks ; ++i) {
|
||||
for (int i = 0, cursor = 0; i < num_blocks; ++i) {
|
||||
block_starts[i] = cursor;
|
||||
cursor += blocks[i];
|
||||
}
|
||||
|
@@ -32,6 +32,7 @@
|
||||
#define CERES_INTERNAL_COMPRESSED_COL_SPARSE_MATRIX_UTILS_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -47,7 +48,7 @@ namespace internal {
|
||||
// and column block j, then it is expected that A contains at least
|
||||
// one non-zero entry corresponding to the top left entry of c_ij,
|
||||
// as that entry is used to detect the presence of a non-zero c_ij.
|
||||
void CompressedColumnScalarMatrixToBlockMatrix(
|
||||
CERES_EXPORT_INTERNAL void CompressedColumnScalarMatrixToBlockMatrix(
|
||||
const int* scalar_rows,
|
||||
const int* scalar_cols,
|
||||
const std::vector<int>& row_blocks,
|
||||
@@ -58,7 +59,7 @@ void CompressedColumnScalarMatrixToBlockMatrix(
|
||||
// Given a set of blocks and a permutation of these blocks, compute
|
||||
// the corresponding "scalar" ordering, where the scalar ordering of
|
||||
// size sum(blocks).
|
||||
void BlockOrderingToScalarOrdering(
|
||||
CERES_EXPORT_INTERNAL void BlockOrderingToScalarOrdering(
|
||||
const std::vector<int>& blocks,
|
||||
const std::vector<int>& block_ordering,
|
||||
std::vector<int>* scalar_ordering);
|
||||
@@ -101,7 +102,7 @@ void SolveUpperTriangularTransposeInPlace(IntegerType num_cols,
|
||||
const double v = values[idx];
|
||||
rhs_and_solution[c] -= v * rhs_and_solution[r];
|
||||
}
|
||||
rhs_and_solution[c] = rhs_and_solution[c] / values[cols[c + 1] - 1];
|
||||
rhs_and_solution[c] = rhs_and_solution[c] / values[cols[c + 1] - 1];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -132,7 +133,7 @@ void SolveRTRWithSparseRHS(IntegerType num_cols,
|
||||
const double v = values[idx];
|
||||
solution[c] -= v * solution[r];
|
||||
}
|
||||
solution[c] = solution[c] / values[cols[c + 1] - 1];
|
||||
solution[c] = solution[c] / values[cols[c + 1] - 1];
|
||||
}
|
||||
|
||||
SolveUpperTriangularInPlace(num_cols, rows, cols, values, solution);
|
||||
|
@@ -44,23 +44,21 @@
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
using std::adjacent_find;
|
||||
using std::make_pair;
|
||||
using std::pair;
|
||||
using std::vector;
|
||||
using std::adjacent_find;
|
||||
|
||||
void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
|
||||
const Program* program, CompressedRowSparseMatrix* jacobian) {
|
||||
const vector<ParameterBlock*>& parameter_blocks =
|
||||
program->parameter_blocks();
|
||||
const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
|
||||
vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
|
||||
col_blocks.resize(parameter_blocks.size());
|
||||
for (int i = 0; i < parameter_blocks.size(); ++i) {
|
||||
col_blocks[i] = parameter_blocks[i]->LocalSize();
|
||||
}
|
||||
|
||||
const vector<ResidualBlock*>& residual_blocks =
|
||||
program->residual_blocks();
|
||||
const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
|
||||
vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
|
||||
row_blocks.resize(residual_blocks.size());
|
||||
for (int i = 0; i < residual_blocks.size(); ++i) {
|
||||
@@ -69,11 +67,10 @@ void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
|
||||
}
|
||||
|
||||
void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
|
||||
const Program* program,
|
||||
int residual_id,
|
||||
vector<pair<int, int>>* evaluated_jacobian_blocks) {
|
||||
const ResidualBlock* residual_block =
|
||||
program->residual_blocks()[residual_id];
|
||||
const Program* program,
|
||||
int residual_id,
|
||||
vector<pair<int, int>>* evaluated_jacobian_blocks) {
|
||||
const ResidualBlock* residual_block = program->residual_blocks()[residual_id];
|
||||
const int num_parameter_blocks = residual_block->NumParameterBlocks();
|
||||
|
||||
for (int j = 0; j < num_parameter_blocks; ++j) {
|
||||
@@ -88,8 +85,7 @@ void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
|
||||
}
|
||||
|
||||
SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
|
||||
const vector<ResidualBlock*>& residual_blocks =
|
||||
program_->residual_blocks();
|
||||
const vector<ResidualBlock*>& residual_blocks = program_->residual_blocks();
|
||||
|
||||
int total_num_residuals = program_->NumResiduals();
|
||||
int total_num_effective_parameters = program_->NumEffectiveParameters();
|
||||
@@ -112,11 +108,10 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
|
||||
// Allocate more space than needed to store the jacobian so that when the LM
|
||||
// algorithm adds the diagonal, no reallocation is necessary. This reduces
|
||||
// peak memory usage significantly.
|
||||
CompressedRowSparseMatrix* jacobian =
|
||||
new CompressedRowSparseMatrix(
|
||||
total_num_residuals,
|
||||
total_num_effective_parameters,
|
||||
num_jacobian_nonzeros + total_num_effective_parameters);
|
||||
CompressedRowSparseMatrix* jacobian = new CompressedRowSparseMatrix(
|
||||
total_num_residuals,
|
||||
total_num_effective_parameters,
|
||||
num_jacobian_nonzeros + total_num_effective_parameters);
|
||||
|
||||
// At this stage, the CompressedRowSparseMatrix is an invalid state. But this
|
||||
// seems to be the only way to construct it without doing a memory copy.
|
||||
@@ -148,8 +143,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
|
||||
std::string parameter_block_description;
|
||||
for (int j = 0; j < num_parameter_blocks; ++j) {
|
||||
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
|
||||
parameter_block_description +=
|
||||
parameter_block->ToString() + "\n";
|
||||
parameter_block_description += parameter_block->ToString() + "\n";
|
||||
}
|
||||
LOG(FATAL) << "Ceres internal error: "
|
||||
<< "Duplicate parameter blocks detected in a cost function. "
|
||||
@@ -196,7 +190,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
|
||||
|
||||
void CompressedRowJacobianWriter::Write(int residual_id,
|
||||
int residual_offset,
|
||||
double **jacobians,
|
||||
double** jacobians,
|
||||
SparseMatrix* base_jacobian) {
|
||||
CompressedRowSparseMatrix* jacobian =
|
||||
down_cast<CompressedRowSparseMatrix*>(base_jacobian);
|
||||
|
@@ -50,8 +50,7 @@ class CompressedRowJacobianWriter {
|
||||
public:
|
||||
CompressedRowJacobianWriter(Evaluator::Options /* ignored */,
|
||||
Program* program)
|
||||
: program_(program) {
|
||||
}
|
||||
: program_(program) {}
|
||||
|
||||
// PopulateJacobianRowAndColumnBlockVectors sets col_blocks and
|
||||
// row_blocks for a CompressedRowSparseMatrix, based on the
|
||||
@@ -64,8 +63,7 @@ class CompressedRowJacobianWriter {
|
||||
// (Jacobian writers do not fall under any type hierarchy; they only
|
||||
// have to provide an interface as specified in program_evaluator.h).
|
||||
static void PopulateJacobianRowAndColumnBlockVectors(
|
||||
const Program* program,
|
||||
CompressedRowSparseMatrix* jacobian);
|
||||
const Program* program, CompressedRowSparseMatrix* jacobian);
|
||||
|
||||
// It is necessary to determine the order of the jacobian blocks
|
||||
// before copying them into a CompressedRowSparseMatrix (or derived
|
||||
@@ -99,7 +97,7 @@ class CompressedRowJacobianWriter {
|
||||
|
||||
void Write(int residual_id,
|
||||
int residual_offset,
|
||||
double **jacobians,
|
||||
double** jacobians,
|
||||
SparseMatrix* base_jacobian);
|
||||
|
||||
private:
|
||||
|
@@ -33,6 +33,7 @@
|
||||
#include <algorithm>
|
||||
#include <numeric>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/crs_matrix.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/random.h"
|
||||
|
@@ -32,6 +32,7 @@
|
||||
#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/sparse_matrix.h"
|
||||
#include "ceres/types.h"
|
||||
@@ -45,7 +46,7 @@ namespace internal {
|
||||
|
||||
class TripletSparseMatrix;
|
||||
|
||||
class CompressedRowSparseMatrix : public SparseMatrix {
|
||||
class CERES_EXPORT_INTERNAL CompressedRowSparseMatrix : public SparseMatrix {
|
||||
public:
|
||||
enum StorageType {
|
||||
UNSYMMETRIC,
|
||||
|
@@ -152,7 +152,6 @@ class ConcurrentQueue {
|
||||
bool wait_;
|
||||
};
|
||||
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
||||
|
@@ -68,7 +68,8 @@ ConditionedCostFunction::ConditionedCostFunction(
|
||||
|
||||
ConditionedCostFunction::~ConditionedCostFunction() {
|
||||
if (ownership_ == TAKE_OWNERSHIP) {
|
||||
STLDeleteUniqueContainerPointers(conditioners_.begin(), conditioners_.end());
|
||||
STLDeleteUniqueContainerPointers(conditioners_.begin(),
|
||||
conditioners_.end());
|
||||
} else {
|
||||
wrapped_cost_function_.release();
|
||||
}
|
||||
@@ -77,8 +78,8 @@ ConditionedCostFunction::~ConditionedCostFunction() {
|
||||
bool ConditionedCostFunction::Evaluate(double const* const* parameters,
|
||||
double* residuals,
|
||||
double** jacobians) const {
|
||||
bool success = wrapped_cost_function_->Evaluate(parameters, residuals,
|
||||
jacobians);
|
||||
bool success =
|
||||
wrapped_cost_function_->Evaluate(parameters, residuals, jacobians);
|
||||
if (!success) {
|
||||
return false;
|
||||
}
|
||||
@@ -102,9 +103,8 @@ bool ConditionedCostFunction::Evaluate(double const* const* parameters,
|
||||
|
||||
double unconditioned_residual = residuals[r];
|
||||
double* parameter_pointer = &unconditioned_residual;
|
||||
success = conditioners_[r]->Evaluate(¶meter_pointer,
|
||||
&residuals[r],
|
||||
conditioner_derivative_pointer2);
|
||||
success = conditioners_[r]->Evaluate(
|
||||
¶meter_pointer, &residuals[r], conditioner_derivative_pointer2);
|
||||
if (!success) {
|
||||
return false;
|
||||
}
|
||||
@@ -117,7 +117,8 @@ bool ConditionedCostFunction::Evaluate(double const* const* parameters,
|
||||
int parameter_block_size =
|
||||
wrapped_cost_function_->parameter_block_sizes()[i];
|
||||
VectorRef jacobian_row(jacobians[i] + r * parameter_block_size,
|
||||
parameter_block_size, 1);
|
||||
parameter_block_size,
|
||||
1);
|
||||
jacobian_row *= conditioner_derivative;
|
||||
}
|
||||
}
|
||||
|
@@ -41,6 +41,7 @@
|
||||
|
||||
#include <cmath>
|
||||
#include <cstddef>
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/linear_operator.h"
|
||||
#include "ceres/stringprintf.h"
|
||||
@@ -51,16 +52,13 @@ namespace ceres {
|
||||
namespace internal {
|
||||
namespace {
|
||||
|
||||
bool IsZeroOrInfinity(double x) {
|
||||
return ((x == 0.0) || std::isinf(x));
|
||||
}
|
||||
bool IsZeroOrInfinity(double x) { return ((x == 0.0) || std::isinf(x)); }
|
||||
|
||||
} // namespace
|
||||
|
||||
ConjugateGradientsSolver::ConjugateGradientsSolver(
|
||||
const LinearSolver::Options& options)
|
||||
: options_(options) {
|
||||
}
|
||||
: options_(options) {}
|
||||
|
||||
LinearSolver::Summary ConjugateGradientsSolver::Solve(
|
||||
LinearOperator* A,
|
||||
@@ -137,7 +135,10 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
|
||||
summary.termination_type = LINEAR_SOLVER_FAILURE;
|
||||
summary.message = StringPrintf(
|
||||
"Numerical failure. beta = rho_n / rho_{n-1} = %e, "
|
||||
"rho_n = %e, rho_{n-1} = %e", beta, rho, last_rho);
|
||||
"rho_n = %e, rho_{n-1} = %e",
|
||||
beta,
|
||||
rho,
|
||||
last_rho);
|
||||
break;
|
||||
}
|
||||
p = z + beta * p;
|
||||
@@ -152,16 +153,20 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
|
||||
summary.message = StringPrintf(
|
||||
"Matrix is indefinite, no more progress can be made. "
|
||||
"p'q = %e. |p| = %e, |q| = %e",
|
||||
pq, p.norm(), q.norm());
|
||||
pq,
|
||||
p.norm(),
|
||||
q.norm());
|
||||
break;
|
||||
}
|
||||
|
||||
const double alpha = rho / pq;
|
||||
if (std::isinf(alpha)) {
|
||||
summary.termination_type = LINEAR_SOLVER_FAILURE;
|
||||
summary.message =
|
||||
StringPrintf("Numerical failure. alpha = rho / pq = %e, "
|
||||
"rho = %e, pq = %e.", alpha, rho, pq);
|
||||
summary.message = StringPrintf(
|
||||
"Numerical failure. alpha = rho / pq = %e, rho = %e, pq = %e.",
|
||||
alpha,
|
||||
rho,
|
||||
pq);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -223,7 +228,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
|
||||
Q0 = Q1;
|
||||
|
||||
// Residual based termination.
|
||||
norm_r = r. norm();
|
||||
norm_r = r.norm();
|
||||
if (norm_r <= tol_r &&
|
||||
summary.num_iterations >= options_.min_num_iterations) {
|
||||
summary.termination_type = LINEAR_SOLVER_SUCCESS;
|
||||
|
@@ -34,6 +34,7 @@
|
||||
#ifndef CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
|
||||
#define CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/linear_solver.h"
|
||||
|
||||
namespace ceres {
|
||||
@@ -54,7 +55,7 @@ class LinearOperator;
|
||||
// For more details see the documentation for
|
||||
// LinearSolver::PerSolveOptions::r_tolerance and
|
||||
// LinearSolver::PerSolveOptions::q_tolerance in linear_solver.h.
|
||||
class ConjugateGradientsSolver : public LinearSolver {
|
||||
class CERES_EXPORT_INTERNAL ConjugateGradientsSolver : public LinearSolver {
|
||||
public:
|
||||
explicit ConjugateGradientsSolver(const LinearSolver::Options& options);
|
||||
Summary Solve(LinearOperator* A,
|
||||
|
4
extern/ceres/internal/ceres/context.cc
vendored
4
extern/ceres/internal/ceres/context.cc
vendored
@@ -34,8 +34,6 @@
|
||||
|
||||
namespace ceres {
|
||||
|
||||
Context* Context::Create() {
|
||||
return new internal::ContextImpl();
|
||||
}
|
||||
Context* Context::Create() { return new internal::ContextImpl(); }
|
||||
|
||||
} // namespace ceres
|
||||
|
1
extern/ceres/internal/ceres/context_impl.cc
vendored
1
extern/ceres/internal/ceres/context_impl.cc
vendored
@@ -37,7 +37,6 @@ void ContextImpl::EnsureMinimumThreads(int num_threads) {
|
||||
#ifdef CERES_USE_CXX_THREADS
|
||||
thread_pool.Resize(num_threads);
|
||||
#endif // CERES_USE_CXX_THREADS
|
||||
|
||||
}
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
4
extern/ceres/internal/ceres/context_impl.h
vendored
4
extern/ceres/internal/ceres/context_impl.h
vendored
@@ -32,7 +32,9 @@
|
||||
#define CERES_INTERNAL_CONTEXT_IMPL_H_
|
||||
|
||||
// This include must come before any #ifndef check on Ceres compile options.
|
||||
// clang-format off
|
||||
#include "ceres/internal/port.h"
|
||||
// clanf-format on
|
||||
|
||||
#include "ceres/context.h"
|
||||
|
||||
@@ -43,7 +45,7 @@
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
class ContextImpl : public Context {
|
||||
class CERES_EXPORT_INTERNAL ContextImpl : public Context {
|
||||
public:
|
||||
ContextImpl() {}
|
||||
ContextImpl(const ContextImpl&) = delete;
|
||||
|
@@ -64,8 +64,7 @@ CoordinateDescentMinimizer::CoordinateDescentMinimizer(ContextImpl* context)
|
||||
CHECK(context_ != nullptr);
|
||||
}
|
||||
|
||||
CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {
|
||||
}
|
||||
CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {}
|
||||
|
||||
bool CoordinateDescentMinimizer::Init(
|
||||
const Program& program,
|
||||
@@ -82,13 +81,13 @@ bool CoordinateDescentMinimizer::Init(
|
||||
map<int, set<double*>> group_to_elements = ordering.group_to_elements();
|
||||
for (const auto& g_t_e : group_to_elements) {
|
||||
const auto& elements = g_t_e.second;
|
||||
for (double* parameter_block: elements) {
|
||||
for (double* parameter_block : elements) {
|
||||
parameter_blocks_.push_back(parameter_map.find(parameter_block)->second);
|
||||
parameter_block_index[parameter_blocks_.back()] =
|
||||
parameter_blocks_.size() - 1;
|
||||
}
|
||||
independent_set_offsets_.push_back(
|
||||
independent_set_offsets_.back() + elements.size());
|
||||
independent_set_offsets_.push_back(independent_set_offsets_.back() +
|
||||
elements.size());
|
||||
}
|
||||
|
||||
// The ordering does not have to contain all parameter blocks, so
|
||||
@@ -126,10 +125,9 @@ bool CoordinateDescentMinimizer::Init(
|
||||
return true;
|
||||
}
|
||||
|
||||
void CoordinateDescentMinimizer::Minimize(
|
||||
const Minimizer::Options& options,
|
||||
double* parameters,
|
||||
Solver::Summary* summary) {
|
||||
void CoordinateDescentMinimizer::Minimize(const Minimizer::Options& options,
|
||||
double* parameters,
|
||||
Solver::Summary* summary) {
|
||||
// Set the state and mark all parameter blocks constant.
|
||||
for (int i = 0; i < parameter_blocks_.size(); ++i) {
|
||||
ParameterBlock* parameter_block = parameter_blocks_[i];
|
||||
@@ -202,7 +200,7 @@ void CoordinateDescentMinimizer::Minimize(
|
||||
});
|
||||
}
|
||||
|
||||
for (int i = 0; i < parameter_blocks_.size(); ++i) {
|
||||
for (int i = 0; i < parameter_blocks_.size(); ++i) {
|
||||
parameter_blocks_[i]->SetVarying();
|
||||
}
|
||||
|
||||
@@ -251,10 +249,10 @@ bool CoordinateDescentMinimizer::IsOrderingValid(
|
||||
// Verify that each group is an independent set
|
||||
for (const auto& g_t_e : group_to_elements) {
|
||||
if (!program.IsParameterBlockSetIndependent(g_t_e.second)) {
|
||||
*message =
|
||||
StringPrintf("The user-provided "
|
||||
"parameter_blocks_for_inner_iterations does not "
|
||||
"form an independent set. Group Id: %d", g_t_e.first);
|
||||
*message = StringPrintf(
|
||||
"The user-provided parameter_blocks_for_inner_iterations does not "
|
||||
"form an independent set. Group Id: %d",
|
||||
g_t_e.first);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
9
extern/ceres/internal/ceres/corrector.cc
vendored
9
extern/ceres/internal/ceres/corrector.cc
vendored
@@ -30,8 +30,9 @@
|
||||
|
||||
#include "ceres/corrector.h"
|
||||
|
||||
#include <cstddef>
|
||||
#include <cmath>
|
||||
#include <cstddef>
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
@@ -147,9 +148,9 @@ void Corrector::CorrectJacobian(const int num_rows,
|
||||
}
|
||||
|
||||
for (int r = 0; r < num_rows; ++r) {
|
||||
jacobian[r * num_cols + c] = sqrt_rho1_ *
|
||||
(jacobian[r * num_cols + c] -
|
||||
alpha_sq_norm_ * residuals[r] * r_transpose_j);
|
||||
jacobian[r * num_cols + c] =
|
||||
sqrt_rho1_ * (jacobian[r * num_cols + c] -
|
||||
alpha_sq_norm_ * residuals[r] * r_transpose_j);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
4
extern/ceres/internal/ceres/corrector.h
vendored
4
extern/ceres/internal/ceres/corrector.h
vendored
@@ -35,6 +35,8 @@
|
||||
#ifndef CERES_INTERNAL_CORRECTOR_H_
|
||||
#define CERES_INTERNAL_CORRECTOR_H_
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
@@ -46,7 +48,7 @@ namespace internal {
|
||||
// gauss newton approximation and then take its square root to get the
|
||||
// corresponding corrections to the residual and jacobian. For the
|
||||
// full expressions see Eq. 10 and 11 in BANS by Triggs et al.
|
||||
class Corrector {
|
||||
class CERES_EXPORT_INTERNAL Corrector {
|
||||
public:
|
||||
// The constructor takes the squared norm, the value, the first and
|
||||
// second derivatives of the LossFunction. It precalculates some of
|
||||
|
13
extern/ceres/internal/ceres/covariance.cc
vendored
13
extern/ceres/internal/ceres/covariance.cc
vendored
@@ -32,6 +32,7 @@
|
||||
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/covariance_impl.h"
|
||||
#include "ceres/problem.h"
|
||||
#include "ceres/problem_impl.h"
|
||||
@@ -46,8 +47,7 @@ Covariance::Covariance(const Covariance::Options& options) {
|
||||
impl_.reset(new internal::CovarianceImpl(options));
|
||||
}
|
||||
|
||||
Covariance::~Covariance() {
|
||||
}
|
||||
Covariance::~Covariance() {}
|
||||
|
||||
bool Covariance::Compute(
|
||||
const vector<pair<const double*, const double*>>& covariance_blocks,
|
||||
@@ -55,9 +55,8 @@ bool Covariance::Compute(
|
||||
return impl_->Compute(covariance_blocks, problem->impl_.get());
|
||||
}
|
||||
|
||||
bool Covariance::Compute(
|
||||
const vector<const double*>& parameter_blocks,
|
||||
Problem* problem) {
|
||||
bool Covariance::Compute(const vector<const double*>& parameter_blocks,
|
||||
Problem* problem) {
|
||||
return impl_->Compute(parameter_blocks, problem->impl_.get());
|
||||
}
|
||||
|
||||
@@ -89,8 +88,8 @@ bool Covariance::GetCovarianceMatrix(
|
||||
}
|
||||
|
||||
bool Covariance::GetCovarianceMatrixInTangentSpace(
|
||||
const std::vector<const double *>& parameter_blocks,
|
||||
double *covariance_matrix) const {
|
||||
const std::vector<const double*>& parameter_blocks,
|
||||
double* covariance_matrix) const {
|
||||
return impl_->GetCovarianceMatrixInTangentOrAmbientSpace(parameter_blocks,
|
||||
false, // tangent
|
||||
covariance_matrix);
|
||||
|
235
extern/ceres/internal/ceres/covariance_impl.cc
vendored
235
extern/ceres/internal/ceres/covariance_impl.cc
vendored
@@ -39,10 +39,9 @@
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "Eigen/SVD"
|
||||
#include "Eigen/SparseCore"
|
||||
#include "Eigen/SparseQR"
|
||||
#include "Eigen/SVD"
|
||||
|
||||
#include "ceres/compressed_col_sparse_matrix_utils.h"
|
||||
#include "ceres/compressed_row_sparse_matrix.h"
|
||||
#include "ceres/covariance.h"
|
||||
@@ -61,25 +60,17 @@
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
using std::make_pair;
|
||||
using std::map;
|
||||
using std::pair;
|
||||
using std::sort;
|
||||
using std::swap;
|
||||
using std::vector;
|
||||
|
||||
typedef vector<pair<const double*, const double*>> CovarianceBlocks;
|
||||
using CovarianceBlocks = std::vector<std::pair<const double*, const double*>>;
|
||||
|
||||
CovarianceImpl::CovarianceImpl(const Covariance::Options& options)
|
||||
: options_(options),
|
||||
is_computed_(false),
|
||||
is_valid_(false) {
|
||||
: options_(options), is_computed_(false), is_valid_(false) {
|
||||
#ifdef CERES_NO_THREADS
|
||||
if (options_.num_threads > 1) {
|
||||
LOG(WARNING)
|
||||
<< "No threading support is compiled into this binary; "
|
||||
<< "only options.num_threads = 1 is supported. Switching "
|
||||
<< "to single threaded mode.";
|
||||
LOG(WARNING) << "No threading support is compiled into this binary; "
|
||||
<< "only options.num_threads = 1 is supported. Switching "
|
||||
<< "to single threaded mode.";
|
||||
options_.num_threads = 1;
|
||||
}
|
||||
#endif
|
||||
@@ -88,16 +79,16 @@ CovarianceImpl::CovarianceImpl(const Covariance::Options& options)
|
||||
evaluate_options_.apply_loss_function = options_.apply_loss_function;
|
||||
}
|
||||
|
||||
CovarianceImpl::~CovarianceImpl() {
|
||||
}
|
||||
CovarianceImpl::~CovarianceImpl() {}
|
||||
|
||||
template <typename T> void CheckForDuplicates(vector<T> blocks) {
|
||||
template <typename T>
|
||||
void CheckForDuplicates(std::vector<T> blocks) {
|
||||
sort(blocks.begin(), blocks.end());
|
||||
typename vector<T>::iterator it =
|
||||
typename std::vector<T>::iterator it =
|
||||
std::adjacent_find(blocks.begin(), blocks.end());
|
||||
if (it != blocks.end()) {
|
||||
// In case there are duplicates, we search for their location.
|
||||
map<T, vector<int>> blocks_map;
|
||||
std::map<T, std::vector<int>> blocks_map;
|
||||
for (int i = 0; i < blocks.size(); ++i) {
|
||||
blocks_map[blocks[i]].push_back(i);
|
||||
}
|
||||
@@ -122,7 +113,8 @@ template <typename T> void CheckForDuplicates(vector<T> blocks) {
|
||||
|
||||
bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
|
||||
ProblemImpl* problem) {
|
||||
CheckForDuplicates<pair<const double*, const double*>>(covariance_blocks);
|
||||
CheckForDuplicates<std::pair<const double*, const double*>>(
|
||||
covariance_blocks);
|
||||
problem_ = problem;
|
||||
parameter_block_to_row_index_.clear();
|
||||
covariance_matrix_.reset(NULL);
|
||||
@@ -132,14 +124,14 @@ bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
|
||||
return is_valid_;
|
||||
}
|
||||
|
||||
bool CovarianceImpl::Compute(const vector<const double*>& parameter_blocks,
|
||||
bool CovarianceImpl::Compute(const std::vector<const double*>& parameter_blocks,
|
||||
ProblemImpl* problem) {
|
||||
CheckForDuplicates<const double*>(parameter_blocks);
|
||||
CovarianceBlocks covariance_blocks;
|
||||
for (int i = 0; i < parameter_blocks.size(); ++i) {
|
||||
for (int j = i; j < parameter_blocks.size(); ++j) {
|
||||
covariance_blocks.push_back(make_pair(parameter_blocks[i],
|
||||
parameter_blocks[j]));
|
||||
covariance_blocks.push_back(
|
||||
std::make_pair(parameter_blocks[i], parameter_blocks[j]));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,13 +154,11 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
if (constant_parameter_blocks_.count(original_parameter_block1) > 0 ||
|
||||
constant_parameter_blocks_.count(original_parameter_block2) > 0) {
|
||||
const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
|
||||
ParameterBlock* block1 =
|
||||
FindOrDie(parameter_map,
|
||||
const_cast<double*>(original_parameter_block1));
|
||||
ParameterBlock* block1 = FindOrDie(
|
||||
parameter_map, const_cast<double*>(original_parameter_block1));
|
||||
|
||||
ParameterBlock* block2 =
|
||||
FindOrDie(parameter_map,
|
||||
const_cast<double*>(original_parameter_block2));
|
||||
ParameterBlock* block2 = FindOrDie(
|
||||
parameter_map, const_cast<double*>(original_parameter_block2));
|
||||
|
||||
const int block1_size = block1->Size();
|
||||
const int block2_size = block2->Size();
|
||||
@@ -210,8 +200,7 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
|
||||
if (offset == row_size) {
|
||||
LOG(ERROR) << "Unable to find covariance block for "
|
||||
<< original_parameter_block1 << " "
|
||||
<< original_parameter_block2;
|
||||
<< original_parameter_block1 << " " << original_parameter_block2;
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -227,9 +216,8 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
const int block2_size = block2->Size();
|
||||
const int block2_local_size = block2->LocalSize();
|
||||
|
||||
ConstMatrixRef cov(covariance_matrix_->values() + rows[row_begin],
|
||||
block1_size,
|
||||
row_size);
|
||||
ConstMatrixRef cov(
|
||||
covariance_matrix_->values() + rows[row_begin], block1_size, row_size);
|
||||
|
||||
// Fast path when there are no local parameterizations or if the
|
||||
// user does not want it lifted to the ambient space.
|
||||
@@ -237,8 +225,8 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
!lift_covariance_to_ambient_space) {
|
||||
if (transpose) {
|
||||
MatrixRef(covariance_block, block2_local_size, block1_local_size) =
|
||||
cov.block(0, offset, block1_local_size,
|
||||
block2_local_size).transpose();
|
||||
cov.block(0, offset, block1_local_size, block2_local_size)
|
||||
.transpose();
|
||||
} else {
|
||||
MatrixRef(covariance_block, block1_local_size, block2_local_size) =
|
||||
cov.block(0, offset, block1_local_size, block2_local_size);
|
||||
@@ -298,7 +286,7 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
}
|
||||
|
||||
bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
|
||||
const vector<const double*>& parameters,
|
||||
const std::vector<const double*>& parameters,
|
||||
bool lift_covariance_to_ambient_space,
|
||||
double* covariance_matrix) const {
|
||||
CHECK(is_computed_)
|
||||
@@ -310,8 +298,8 @@ bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
|
||||
const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
|
||||
// For OpenMP compatibility we need to define these vectors in advance
|
||||
const int num_parameters = parameters.size();
|
||||
vector<int> parameter_sizes;
|
||||
vector<int> cum_parameter_size;
|
||||
std::vector<int> parameter_sizes;
|
||||
std::vector<int> cum_parameter_size;
|
||||
parameter_sizes.reserve(num_parameters);
|
||||
cum_parameter_size.resize(num_parameters + 1);
|
||||
cum_parameter_size[0] = 0;
|
||||
@@ -324,7 +312,8 @@ bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
|
||||
parameter_sizes.push_back(block->LocalSize());
|
||||
}
|
||||
}
|
||||
std::partial_sum(parameter_sizes.begin(), parameter_sizes.end(),
|
||||
std::partial_sum(parameter_sizes.begin(),
|
||||
parameter_sizes.end(),
|
||||
cum_parameter_size.begin() + 1);
|
||||
const int max_covariance_block_size =
|
||||
*std::max_element(parameter_sizes.begin(), parameter_sizes.end());
|
||||
@@ -343,65 +332,66 @@ bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
|
||||
// i = 1:n, j = i:n.
|
||||
int iteration_count = (num_parameters * (num_parameters + 1)) / 2;
|
||||
problem_->context()->EnsureMinimumThreads(num_threads);
|
||||
ParallelFor(
|
||||
problem_->context(),
|
||||
0,
|
||||
iteration_count,
|
||||
num_threads,
|
||||
[&](int thread_id, int k) {
|
||||
int i, j;
|
||||
LinearIndexToUpperTriangularIndex(k, num_parameters, &i, &j);
|
||||
ParallelFor(problem_->context(),
|
||||
0,
|
||||
iteration_count,
|
||||
num_threads,
|
||||
[&](int thread_id, int k) {
|
||||
int i, j;
|
||||
LinearIndexToUpperTriangularIndex(k, num_parameters, &i, &j);
|
||||
|
||||
int covariance_row_idx = cum_parameter_size[i];
|
||||
int covariance_col_idx = cum_parameter_size[j];
|
||||
int size_i = parameter_sizes[i];
|
||||
int size_j = parameter_sizes[j];
|
||||
double* covariance_block =
|
||||
workspace.get() + thread_id * max_covariance_block_size *
|
||||
max_covariance_block_size;
|
||||
if (!GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
parameters[i], parameters[j],
|
||||
lift_covariance_to_ambient_space, covariance_block)) {
|
||||
success = false;
|
||||
}
|
||||
int covariance_row_idx = cum_parameter_size[i];
|
||||
int covariance_col_idx = cum_parameter_size[j];
|
||||
int size_i = parameter_sizes[i];
|
||||
int size_j = parameter_sizes[j];
|
||||
double* covariance_block =
|
||||
workspace.get() + thread_id * max_covariance_block_size *
|
||||
max_covariance_block_size;
|
||||
if (!GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
parameters[i],
|
||||
parameters[j],
|
||||
lift_covariance_to_ambient_space,
|
||||
covariance_block)) {
|
||||
success = false;
|
||||
}
|
||||
|
||||
covariance.block(covariance_row_idx, covariance_col_idx, size_i,
|
||||
size_j) = MatrixRef(covariance_block, size_i, size_j);
|
||||
covariance.block(
|
||||
covariance_row_idx, covariance_col_idx, size_i, size_j) =
|
||||
MatrixRef(covariance_block, size_i, size_j);
|
||||
|
||||
if (i != j) {
|
||||
covariance.block(covariance_col_idx, covariance_row_idx,
|
||||
size_j, size_i) =
|
||||
MatrixRef(covariance_block, size_i, size_j).transpose();
|
||||
}
|
||||
});
|
||||
if (i != j) {
|
||||
covariance.block(
|
||||
covariance_col_idx, covariance_row_idx, size_j, size_i) =
|
||||
MatrixRef(covariance_block, size_i, size_j).transpose();
|
||||
}
|
||||
});
|
||||
return success;
|
||||
}
|
||||
|
||||
// Determine the sparsity pattern of the covariance matrix based on
|
||||
// the block pairs requested by the user.
|
||||
bool CovarianceImpl::ComputeCovarianceSparsity(
|
||||
const CovarianceBlocks& original_covariance_blocks,
|
||||
ProblemImpl* problem) {
|
||||
const CovarianceBlocks& original_covariance_blocks, ProblemImpl* problem) {
|
||||
EventLogger event_logger("CovarianceImpl::ComputeCovarianceSparsity");
|
||||
|
||||
// Determine an ordering for the parameter block, by sorting the
|
||||
// parameter blocks by their pointers.
|
||||
vector<double*> all_parameter_blocks;
|
||||
std::vector<double*> all_parameter_blocks;
|
||||
problem->GetParameterBlocks(&all_parameter_blocks);
|
||||
const ProblemImpl::ParameterMap& parameter_map = problem->parameter_map();
|
||||
std::unordered_set<ParameterBlock*> parameter_blocks_in_use;
|
||||
vector<ResidualBlock*> residual_blocks;
|
||||
std::vector<ResidualBlock*> residual_blocks;
|
||||
problem->GetResidualBlocks(&residual_blocks);
|
||||
|
||||
for (int i = 0; i < residual_blocks.size(); ++i) {
|
||||
ResidualBlock* residual_block = residual_blocks[i];
|
||||
parameter_blocks_in_use.insert(residual_block->parameter_blocks(),
|
||||
residual_block->parameter_blocks() +
|
||||
residual_block->NumParameterBlocks());
|
||||
residual_block->NumParameterBlocks());
|
||||
}
|
||||
|
||||
constant_parameter_blocks_.clear();
|
||||
vector<double*>& active_parameter_blocks =
|
||||
std::vector<double*>& active_parameter_blocks =
|
||||
evaluate_options_.parameter_blocks;
|
||||
active_parameter_blocks.clear();
|
||||
for (int i = 0; i < all_parameter_blocks.size(); ++i) {
|
||||
@@ -434,8 +424,8 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
|
||||
// triangular part of the matrix.
|
||||
int num_nonzeros = 0;
|
||||
CovarianceBlocks covariance_blocks;
|
||||
for (int i = 0; i < original_covariance_blocks.size(); ++i) {
|
||||
const pair<const double*, const double*>& block_pair =
|
||||
for (int i = 0; i < original_covariance_blocks.size(); ++i) {
|
||||
const std::pair<const double*, const double*>& block_pair =
|
||||
original_covariance_blocks[i];
|
||||
if (constant_parameter_blocks_.count(block_pair.first) > 0 ||
|
||||
constant_parameter_blocks_.count(block_pair.second) > 0) {
|
||||
@@ -450,8 +440,8 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
|
||||
|
||||
// Make sure we are constructing a block upper triangular matrix.
|
||||
if (index1 > index2) {
|
||||
covariance_blocks.push_back(make_pair(block_pair.second,
|
||||
block_pair.first));
|
||||
covariance_blocks.push_back(
|
||||
std::make_pair(block_pair.second, block_pair.first));
|
||||
} else {
|
||||
covariance_blocks.push_back(block_pair);
|
||||
}
|
||||
@@ -466,7 +456,7 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
|
||||
// Sort the block pairs. As a consequence we get the covariance
|
||||
// blocks as they will occur in the CompressedRowSparseMatrix that
|
||||
// will store the covariance.
|
||||
sort(covariance_blocks.begin(), covariance_blocks.end());
|
||||
std::sort(covariance_blocks.begin(), covariance_blocks.end());
|
||||
|
||||
// Fill the sparsity pattern of the covariance matrix.
|
||||
covariance_matrix_.reset(
|
||||
@@ -486,10 +476,10 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
|
||||
// values of the parameter blocks. Thus iterating over the keys of
|
||||
// parameter_block_to_row_index_ corresponds to iterating over the
|
||||
// rows of the covariance matrix in order.
|
||||
int i = 0; // index into covariance_blocks.
|
||||
int i = 0; // index into covariance_blocks.
|
||||
int cursor = 0; // index into the covariance matrix.
|
||||
for (const auto& entry : parameter_block_to_row_index_) {
|
||||
const double* row_block = entry.first;
|
||||
const double* row_block = entry.first;
|
||||
const int row_block_size = problem->ParameterBlockLocalSize(row_block);
|
||||
int row_begin = entry.second;
|
||||
|
||||
@@ -498,7 +488,7 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
|
||||
int num_col_blocks = 0;
|
||||
int num_columns = 0;
|
||||
for (int j = i; j < covariance_blocks.size(); ++j, ++num_col_blocks) {
|
||||
const pair<const double*, const double*>& block_pair =
|
||||
const std::pair<const double*, const double*>& block_pair =
|
||||
covariance_blocks[j];
|
||||
if (block_pair.first != row_block) {
|
||||
break;
|
||||
@@ -519,7 +509,7 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
|
||||
}
|
||||
}
|
||||
|
||||
i+= num_col_blocks;
|
||||
i += num_col_blocks;
|
||||
}
|
||||
|
||||
rows[num_rows] = cursor;
|
||||
@@ -580,9 +570,9 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
|
||||
const int num_cols = jacobian.num_cols;
|
||||
const int num_nonzeros = jacobian.values.size();
|
||||
|
||||
vector<SuiteSparse_long> transpose_rows(num_cols + 1, 0);
|
||||
vector<SuiteSparse_long> transpose_cols(num_nonzeros, 0);
|
||||
vector<double> transpose_values(num_nonzeros, 0);
|
||||
std::vector<SuiteSparse_long> transpose_rows(num_cols + 1, 0);
|
||||
std::vector<SuiteSparse_long> transpose_cols(num_nonzeros, 0);
|
||||
std::vector<double> transpose_values(num_nonzeros, 0);
|
||||
|
||||
for (int idx = 0; idx < num_nonzeros; ++idx) {
|
||||
transpose_rows[jacobian.cols[idx] + 1] += 1;
|
||||
@@ -602,7 +592,7 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
|
||||
}
|
||||
}
|
||||
|
||||
for (int i = transpose_rows.size() - 1; i > 0 ; --i) {
|
||||
for (int i = transpose_rows.size() - 1; i > 0; --i) {
|
||||
transpose_rows[i] = transpose_rows[i - 1];
|
||||
}
|
||||
transpose_rows[0] = 0;
|
||||
@@ -642,14 +632,13 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
|
||||
// more efficient, both in runtime as well as the quality of
|
||||
// ordering computed. So, it maybe worth doing that analysis
|
||||
// separately.
|
||||
const SuiteSparse_long rank =
|
||||
SuiteSparseQR<double>(SPQR_ORDERING_BESTAMD,
|
||||
SPQR_DEFAULT_TOL,
|
||||
cholmod_jacobian.ncol,
|
||||
&cholmod_jacobian,
|
||||
&R,
|
||||
&permutation,
|
||||
&cc);
|
||||
const SuiteSparse_long rank = SuiteSparseQR<double>(SPQR_ORDERING_BESTAMD,
|
||||
SPQR_DEFAULT_TOL,
|
||||
cholmod_jacobian.ncol,
|
||||
&cholmod_jacobian,
|
||||
&R,
|
||||
&permutation,
|
||||
&cc);
|
||||
event_logger.AddEvent("Numeric Factorization");
|
||||
if (R == nullptr) {
|
||||
LOG(ERROR) << "Something is wrong. SuiteSparseQR returned R = nullptr.";
|
||||
@@ -668,7 +657,7 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
|
||||
return false;
|
||||
}
|
||||
|
||||
vector<int> inverse_permutation(num_cols);
|
||||
std::vector<int> inverse_permutation(num_cols);
|
||||
if (permutation) {
|
||||
for (SuiteSparse_long i = 0; i < num_cols; ++i) {
|
||||
inverse_permutation[permutation[i]] = i;
|
||||
@@ -697,19 +686,18 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
|
||||
|
||||
problem_->context()->EnsureMinimumThreads(num_threads);
|
||||
ParallelFor(
|
||||
problem_->context(),
|
||||
0,
|
||||
num_cols,
|
||||
num_threads,
|
||||
[&](int thread_id, int r) {
|
||||
problem_->context(), 0, num_cols, num_threads, [&](int thread_id, int r) {
|
||||
const int row_begin = rows[r];
|
||||
const int row_end = rows[r + 1];
|
||||
if (row_end != row_begin) {
|
||||
double* solution = workspace.get() + thread_id * num_cols;
|
||||
SolveRTRWithSparseRHS<SuiteSparse_long>(
|
||||
num_cols, static_cast<SuiteSparse_long*>(R->i),
|
||||
static_cast<SuiteSparse_long*>(R->p), static_cast<double*>(R->x),
|
||||
inverse_permutation[r], solution);
|
||||
num_cols,
|
||||
static_cast<SuiteSparse_long*>(R->i),
|
||||
static_cast<SuiteSparse_long*>(R->p),
|
||||
static_cast<double*>(R->x),
|
||||
inverse_permutation[r],
|
||||
solution);
|
||||
for (int idx = row_begin; idx < row_end; ++idx) {
|
||||
const int c = cols[idx];
|
||||
values[idx] = solution[inverse_permutation[c]];
|
||||
@@ -801,10 +789,9 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
|
||||
1.0 / (singular_values[i] * singular_values[i]);
|
||||
}
|
||||
|
||||
Matrix dense_covariance =
|
||||
svd.matrixV() *
|
||||
inverse_squared_singular_values.asDiagonal() *
|
||||
svd.matrixV().transpose();
|
||||
Matrix dense_covariance = svd.matrixV() *
|
||||
inverse_squared_singular_values.asDiagonal() *
|
||||
svd.matrixV().transpose();
|
||||
event_logger.AddEvent("PseudoInverse");
|
||||
|
||||
const int num_rows = covariance_matrix_->num_rows();
|
||||
@@ -839,13 +826,16 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
|
||||
// Convert the matrix to column major order as required by SparseQR.
|
||||
EigenSparseMatrix sparse_jacobian =
|
||||
Eigen::MappedSparseMatrix<double, Eigen::RowMajor>(
|
||||
jacobian.num_rows, jacobian.num_cols,
|
||||
jacobian.num_rows,
|
||||
jacobian.num_cols,
|
||||
static_cast<int>(jacobian.values.size()),
|
||||
jacobian.rows.data(), jacobian.cols.data(), jacobian.values.data());
|
||||
jacobian.rows.data(),
|
||||
jacobian.cols.data(),
|
||||
jacobian.values.data());
|
||||
event_logger.AddEvent("ConvertToSparseMatrix");
|
||||
|
||||
Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int>>
|
||||
qr_solver(sparse_jacobian);
|
||||
Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int>> qr_solver(
|
||||
sparse_jacobian);
|
||||
event_logger.AddEvent("QRDecomposition");
|
||||
|
||||
if (qr_solver.info() != Eigen::Success) {
|
||||
@@ -883,22 +873,17 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
|
||||
|
||||
problem_->context()->EnsureMinimumThreads(num_threads);
|
||||
ParallelFor(
|
||||
problem_->context(),
|
||||
0,
|
||||
num_cols,
|
||||
num_threads,
|
||||
[&](int thread_id, int r) {
|
||||
problem_->context(), 0, num_cols, num_threads, [&](int thread_id, int r) {
|
||||
const int row_begin = rows[r];
|
||||
const int row_end = rows[r + 1];
|
||||
if (row_end != row_begin) {
|
||||
double* solution = workspace.get() + thread_id * num_cols;
|
||||
SolveRTRWithSparseRHS<int>(
|
||||
num_cols,
|
||||
qr_solver.matrixR().innerIndexPtr(),
|
||||
qr_solver.matrixR().outerIndexPtr(),
|
||||
&qr_solver.matrixR().data().value(0),
|
||||
inverse_permutation.indices().coeff(r),
|
||||
solution);
|
||||
SolveRTRWithSparseRHS<int>(num_cols,
|
||||
qr_solver.matrixR().innerIndexPtr(),
|
||||
qr_solver.matrixR().outerIndexPtr(),
|
||||
&qr_solver.matrixR().data().value(0),
|
||||
inverse_permutation.indices().coeff(r),
|
||||
solution);
|
||||
|
||||
// Assign the values of the computed covariance using the
|
||||
// inverse permutation used in the QR factorization.
|
||||
|
22
extern/ceres/internal/ceres/covariance_impl.h
vendored
22
extern/ceres/internal/ceres/covariance_impl.h
vendored
@@ -36,7 +36,9 @@
|
||||
#include <set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/covariance.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/problem_impl.h"
|
||||
#include "ceres/suitesparse.h"
|
||||
|
||||
@@ -45,19 +47,17 @@ namespace internal {
|
||||
|
||||
class CompressedRowSparseMatrix;
|
||||
|
||||
class CovarianceImpl {
|
||||
class CERES_EXPORT_INTERNAL CovarianceImpl {
|
||||
public:
|
||||
explicit CovarianceImpl(const Covariance::Options& options);
|
||||
~CovarianceImpl();
|
||||
|
||||
bool Compute(
|
||||
const std::vector<std::pair<const double*,
|
||||
const double*>>& covariance_blocks,
|
||||
ProblemImpl* problem);
|
||||
bool Compute(const std::vector<std::pair<const double*, const double*>>&
|
||||
covariance_blocks,
|
||||
ProblemImpl* problem);
|
||||
|
||||
bool Compute(
|
||||
const std::vector<const double*>& parameter_blocks,
|
||||
ProblemImpl* problem);
|
||||
bool Compute(const std::vector<const double*>& parameter_blocks,
|
||||
ProblemImpl* problem);
|
||||
|
||||
bool GetCovarianceBlockInTangentOrAmbientSpace(
|
||||
const double* parameter_block1,
|
||||
@@ -68,11 +68,11 @@ class CovarianceImpl {
|
||||
bool GetCovarianceMatrixInTangentOrAmbientSpace(
|
||||
const std::vector<const double*>& parameters,
|
||||
bool lift_covariance_to_ambient_space,
|
||||
double *covariance_matrix) const;
|
||||
double* covariance_matrix) const;
|
||||
|
||||
bool ComputeCovarianceSparsity(
|
||||
const std::vector<std::pair<const double*,
|
||||
const double*>>& covariance_blocks,
|
||||
const std::vector<std::pair<const double*, const double*>>&
|
||||
covariance_blocks,
|
||||
ProblemImpl* problem);
|
||||
|
||||
bool ComputeCovarianceValues();
|
||||
|
3
extern/ceres/internal/ceres/cxsparse.cc
vendored
3
extern/ceres/internal/ceres/cxsparse.cc
vendored
@@ -33,13 +33,12 @@
|
||||
|
||||
#ifndef CERES_NO_CXSPARSE
|
||||
|
||||
#include "ceres/cxsparse.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "ceres/compressed_col_sparse_matrix_utils.h"
|
||||
#include "ceres/compressed_row_sparse_matrix.h"
|
||||
#include "ceres/cxsparse.h"
|
||||
#include "ceres/triplet_sparse_matrix.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
|
2
extern/ceres/internal/ceres/cxsparse.h
vendored
2
extern/ceres/internal/ceres/cxsparse.h
vendored
@@ -166,7 +166,7 @@ class CXSparseCholesky : public SparseCholesky {
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
||||
#else // CERES_NO_CXSPARSE
|
||||
#else
|
||||
|
||||
typedef void cs_dis;
|
||||
|
||||
|
@@ -35,21 +35,19 @@
|
||||
|
||||
#include "ceres/casts.h"
|
||||
#include "ceres/dense_sparse_matrix.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/parameter_block.h"
|
||||
#include "ceres/program.h"
|
||||
#include "ceres/residual_block.h"
|
||||
#include "ceres/scratch_evaluate_preparer.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
class DenseJacobianWriter {
|
||||
public:
|
||||
DenseJacobianWriter(Evaluator::Options /* ignored */,
|
||||
Program* program)
|
||||
: program_(program) {
|
||||
}
|
||||
DenseJacobianWriter(Evaluator::Options /* ignored */, Program* program)
|
||||
: program_(program) {}
|
||||
|
||||
// JacobianWriter interface.
|
||||
|
||||
@@ -61,14 +59,13 @@ class DenseJacobianWriter {
|
||||
}
|
||||
|
||||
SparseMatrix* CreateJacobian() const {
|
||||
return new DenseSparseMatrix(program_->NumResiduals(),
|
||||
program_->NumEffectiveParameters(),
|
||||
true);
|
||||
return new DenseSparseMatrix(
|
||||
program_->NumResiduals(), program_->NumEffectiveParameters(), true);
|
||||
}
|
||||
|
||||
void Write(int residual_id,
|
||||
int residual_offset,
|
||||
double **jacobians,
|
||||
double** jacobians,
|
||||
SparseMatrix* jacobian) {
|
||||
DenseSparseMatrix* dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
|
||||
const ResidualBlock* residual_block =
|
||||
@@ -86,15 +83,14 @@ class DenseJacobianWriter {
|
||||
}
|
||||
|
||||
const int parameter_block_size = parameter_block->LocalSize();
|
||||
ConstMatrixRef parameter_jacobian(jacobians[j],
|
||||
num_residuals,
|
||||
parameter_block_size);
|
||||
ConstMatrixRef parameter_jacobian(
|
||||
jacobians[j], num_residuals, parameter_block_size);
|
||||
|
||||
dense_jacobian->mutable_matrix().block(
|
||||
residual_offset,
|
||||
parameter_block->delta_offset(),
|
||||
num_residuals,
|
||||
parameter_block_size) = parameter_jacobian;
|
||||
dense_jacobian->mutable_matrix().block(residual_offset,
|
||||
parameter_block->delta_offset(),
|
||||
num_residuals,
|
||||
parameter_block_size) =
|
||||
parameter_jacobian;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -132,13 +132,8 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingLAPACK(
|
||||
//
|
||||
// Note: This is a bit delicate, it assumes that the stride on this
|
||||
// matrix is the same as the number of rows.
|
||||
BLAS::SymmetricRankKUpdate(A->num_rows(),
|
||||
num_cols,
|
||||
A->values(),
|
||||
true,
|
||||
1.0,
|
||||
0.0,
|
||||
lhs.data());
|
||||
BLAS::SymmetricRankKUpdate(
|
||||
A->num_rows(), num_cols, A->values(), true, 1.0, 0.0, lhs.data());
|
||||
|
||||
if (per_solve_options.D != NULL) {
|
||||
// Undo the modifications to the matrix A.
|
||||
@@ -153,13 +148,10 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingLAPACK(
|
||||
|
||||
LinearSolver::Summary summary;
|
||||
summary.num_iterations = 1;
|
||||
summary.termination_type =
|
||||
LAPACK::SolveInPlaceUsingCholesky(num_cols,
|
||||
lhs.data(),
|
||||
x,
|
||||
&summary.message);
|
||||
summary.termination_type = LAPACK::SolveInPlaceUsingCholesky(
|
||||
num_cols, lhs.data(), x, &summary.message);
|
||||
event_logger.AddEvent("Solve");
|
||||
return summary;
|
||||
}
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
@@ -73,7 +73,7 @@ class DenseSparseMatrix;
|
||||
// library. This solver always returns a solution, it is the user's
|
||||
// responsibility to judge if the solution is good enough for their
|
||||
// purposes.
|
||||
class DenseNormalCholeskySolver: public DenseSparseMatrixSolver {
|
||||
class DenseNormalCholeskySolver : public DenseSparseMatrixSolver {
|
||||
public:
|
||||
explicit DenseNormalCholeskySolver(const LinearSolver::Options& options);
|
||||
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#include "ceres/dense_qr_solver.h"
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
#include "Eigen/Dense"
|
||||
#include "ceres/dense_sparse_matrix.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
@@ -77,7 +78,7 @@ LinearSolver::Summary DenseQRSolver::SolveUsingLAPACK(
|
||||
|
||||
// TODO(sameeragarwal): Since we are copying anyways, the diagonal
|
||||
// can be appended to the matrix instead of doing it on A.
|
||||
lhs_ = A->matrix();
|
||||
lhs_ = A->matrix();
|
||||
|
||||
if (per_solve_options.D != NULL) {
|
||||
// Undo the modifications to the matrix A.
|
||||
@@ -164,5 +165,5 @@ LinearSolver::Summary DenseQRSolver::SolveUsingEigen(
|
||||
return summary;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
@@ -32,8 +32,9 @@
|
||||
#ifndef CERES_INTERNAL_DENSE_QR_SOLVER_H_
|
||||
#define CERES_INTERNAL_DENSE_QR_SOLVER_H_
|
||||
|
||||
#include "ceres/linear_solver.h"
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/linear_solver.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -78,7 +79,7 @@ class DenseSparseMatrix;
|
||||
// library. This solver always returns a solution, it is the user's
|
||||
// responsibility to judge if the solution is good enough for their
|
||||
// purposes.
|
||||
class DenseQRSolver: public DenseSparseMatrixSolver {
|
||||
class CERES_EXPORT_INTERNAL DenseQRSolver : public DenseSparseMatrixSolver {
|
||||
public:
|
||||
explicit DenseQRSolver(const LinearSolver::Options& options);
|
||||
|
||||
|
@@ -31,17 +31,17 @@
|
||||
#include "ceres/dense_sparse_matrix.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include "ceres/triplet_sparse_matrix.h"
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/triplet_sparse_matrix.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
|
||||
DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols)
|
||||
: has_diagonal_appended_(false),
|
||||
has_diagonal_reserved_(false) {
|
||||
: has_diagonal_appended_(false), has_diagonal_reserved_(false) {
|
||||
m_.resize(num_rows, num_cols);
|
||||
m_.setZero();
|
||||
}
|
||||
@@ -49,11 +49,10 @@ DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols)
|
||||
DenseSparseMatrix::DenseSparseMatrix(int num_rows,
|
||||
int num_cols,
|
||||
bool reserve_diagonal)
|
||||
: has_diagonal_appended_(false),
|
||||
has_diagonal_reserved_(reserve_diagonal) {
|
||||
: has_diagonal_appended_(false), has_diagonal_reserved_(reserve_diagonal) {
|
||||
if (reserve_diagonal) {
|
||||
// Allocate enough space for the diagonal.
|
||||
m_.resize(num_rows + num_cols, num_cols);
|
||||
m_.resize(num_rows + num_cols, num_cols);
|
||||
} else {
|
||||
m_.resize(num_rows, num_cols);
|
||||
}
|
||||
@@ -64,9 +63,9 @@ DenseSparseMatrix::DenseSparseMatrix(const TripletSparseMatrix& m)
|
||||
: m_(Eigen::MatrixXd::Zero(m.num_rows(), m.num_cols())),
|
||||
has_diagonal_appended_(false),
|
||||
has_diagonal_reserved_(false) {
|
||||
const double *values = m.values();
|
||||
const int *rows = m.rows();
|
||||
const int *cols = m.cols();
|
||||
const double* values = m.values();
|
||||
const int* rows = m.rows();
|
||||
const int* cols = m.cols();
|
||||
int num_nonzeros = m.num_nonzeros();
|
||||
|
||||
for (int i = 0; i < num_nonzeros; ++i) {
|
||||
@@ -75,14 +74,9 @@ DenseSparseMatrix::DenseSparseMatrix(const TripletSparseMatrix& m)
|
||||
}
|
||||
|
||||
DenseSparseMatrix::DenseSparseMatrix(const ColMajorMatrix& m)
|
||||
: m_(m),
|
||||
has_diagonal_appended_(false),
|
||||
has_diagonal_reserved_(false) {
|
||||
}
|
||||
: m_(m), has_diagonal_appended_(false), has_diagonal_reserved_(false) {}
|
||||
|
||||
void DenseSparseMatrix::SetZero() {
|
||||
m_.setZero();
|
||||
}
|
||||
void DenseSparseMatrix::SetZero() { m_.setZero(); }
|
||||
|
||||
void DenseSparseMatrix::RightMultiply(const double* x, double* y) const {
|
||||
VectorRef(y, num_rows()) += matrix() * ConstVectorRef(x, num_cols());
|
||||
@@ -105,7 +99,7 @@ void DenseSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
|
||||
*dense_matrix = m_.block(0, 0, num_rows(), num_cols());
|
||||
}
|
||||
|
||||
void DenseSparseMatrix::AppendDiagonal(double *d) {
|
||||
void DenseSparseMatrix::AppendDiagonal(double* d) {
|
||||
CHECK(!has_diagonal_appended_);
|
||||
if (!has_diagonal_reserved_) {
|
||||
ColMajorMatrix tmp = m_;
|
||||
@@ -133,9 +127,7 @@ int DenseSparseMatrix::num_rows() const {
|
||||
return m_.rows();
|
||||
}
|
||||
|
||||
int DenseSparseMatrix::num_cols() const {
|
||||
return m_.cols();
|
||||
}
|
||||
int DenseSparseMatrix::num_cols() const { return m_.cols(); }
|
||||
|
||||
int DenseSparseMatrix::num_nonzeros() const {
|
||||
if (has_diagonal_reserved_ && !has_diagonal_appended_) {
|
||||
@@ -148,33 +140,30 @@ ConstColMajorMatrixRef DenseSparseMatrix::matrix() const {
|
||||
return ConstColMajorMatrixRef(
|
||||
m_.data(),
|
||||
((has_diagonal_reserved_ && !has_diagonal_appended_)
|
||||
? m_.rows() - m_.cols()
|
||||
: m_.rows()),
|
||||
? m_.rows() - m_.cols()
|
||||
: m_.rows()),
|
||||
m_.cols(),
|
||||
Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
|
||||
}
|
||||
|
||||
ColMajorMatrixRef DenseSparseMatrix::mutable_matrix() {
|
||||
return ColMajorMatrixRef(
|
||||
m_.data(),
|
||||
((has_diagonal_reserved_ && !has_diagonal_appended_)
|
||||
? m_.rows() - m_.cols()
|
||||
: m_.rows()),
|
||||
m_.cols(),
|
||||
Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
|
||||
return ColMajorMatrixRef(m_.data(),
|
||||
((has_diagonal_reserved_ && !has_diagonal_appended_)
|
||||
? m_.rows() - m_.cols()
|
||||
: m_.rows()),
|
||||
m_.cols(),
|
||||
Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
|
||||
}
|
||||
|
||||
|
||||
void DenseSparseMatrix::ToTextFile(FILE* file) const {
|
||||
CHECK(file != nullptr);
|
||||
const int active_rows =
|
||||
(has_diagonal_reserved_ && !has_diagonal_appended_)
|
||||
? (m_.rows() - m_.cols())
|
||||
: m_.rows();
|
||||
const int active_rows = (has_diagonal_reserved_ && !has_diagonal_appended_)
|
||||
? (m_.rows() - m_.cols())
|
||||
: m_.rows();
|
||||
|
||||
for (int r = 0; r < active_rows; ++r) {
|
||||
for (int c = 0; c < m_.cols(); ++c) {
|
||||
fprintf(file, "% 10d % 10d %17f\n", r, c, m_(r, c));
|
||||
fprintf(file, "% 10d % 10d %17f\n", r, c, m_(r, c));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -34,6 +34,7 @@
|
||||
#define CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/sparse_matrix.h"
|
||||
#include "ceres/types.h"
|
||||
|
||||
@@ -42,7 +43,7 @@ namespace internal {
|
||||
|
||||
class TripletSparseMatrix;
|
||||
|
||||
class DenseSparseMatrix : public SparseMatrix {
|
||||
class CERES_EXPORT_INTERNAL DenseSparseMatrix : public SparseMatrix {
|
||||
public:
|
||||
// Build a matrix with the same content as the TripletSparseMatrix
|
||||
// m. This assumes that m does not have any repeated entries.
|
||||
@@ -92,7 +93,7 @@ class DenseSparseMatrix : public SparseMatrix {
|
||||
// Calling RemoveDiagonal removes the block. It is a fatal error to append a
|
||||
// diagonal to a matrix that already has an appended diagonal, and it is also
|
||||
// a fatal error to remove a diagonal from a matrix that has none.
|
||||
void AppendDiagonal(double *d);
|
||||
void AppendDiagonal(double* d);
|
||||
void RemoveDiagonal();
|
||||
|
||||
private:
|
||||
|
11
extern/ceres/internal/ceres/detect_structure.cc
vendored
11
extern/ceres/internal/ceres/detect_structure.cc
vendored
@@ -29,6 +29,7 @@
|
||||
// Author: sameeragarwal@google.com (Sameer Agarwal)
|
||||
|
||||
#include "ceres/detect_structure.h"
|
||||
|
||||
#include "ceres/internal/eigen.h"
|
||||
#include "glog/logging.h"
|
||||
|
||||
@@ -61,8 +62,7 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
|
||||
} else if (*row_block_size != Eigen::Dynamic &&
|
||||
*row_block_size != row.block.size) {
|
||||
VLOG(2) << "Dynamic row block size because the block size changed from "
|
||||
<< *row_block_size << " to "
|
||||
<< row.block.size;
|
||||
<< *row_block_size << " to " << row.block.size;
|
||||
*row_block_size = Eigen::Dynamic;
|
||||
}
|
||||
|
||||
@@ -73,8 +73,7 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
|
||||
} else if (*e_block_size != Eigen::Dynamic &&
|
||||
*e_block_size != bs.cols[e_block_id].size) {
|
||||
VLOG(2) << "Dynamic e block size because the block size changed from "
|
||||
<< *e_block_size << " to "
|
||||
<< bs.cols[e_block_id].size;
|
||||
<< *e_block_size << " to " << bs.cols[e_block_id].size;
|
||||
*e_block_size = Eigen::Dynamic;
|
||||
}
|
||||
|
||||
@@ -100,9 +99,11 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
|
||||
}
|
||||
}
|
||||
|
||||
// clang-format off
|
||||
const bool is_everything_dynamic = (*row_block_size == Eigen::Dynamic &&
|
||||
*e_block_size == Eigen::Dynamic &&
|
||||
*f_block_size == Eigen::Dynamic);
|
||||
// clang-format on
|
||||
if (is_everything_dynamic) {
|
||||
break;
|
||||
}
|
||||
@@ -110,10 +111,12 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
|
||||
|
||||
CHECK_NE(*row_block_size, 0) << "No rows found";
|
||||
CHECK_NE(*e_block_size, 0) << "No e type blocks found";
|
||||
// clang-format off
|
||||
VLOG(1) << "Schur complement static structure <"
|
||||
<< *row_block_size << ","
|
||||
<< *e_block_size << ","
|
||||
<< *f_block_size << ">.";
|
||||
// clang-format on
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
11
extern/ceres/internal/ceres/detect_structure.h
vendored
11
extern/ceres/internal/ceres/detect_structure.h
vendored
@@ -32,6 +32,7 @@
|
||||
#define CERES_INTERNAL_DETECT_STRUCTURE_H_
|
||||
|
||||
#include "ceres/block_structure.h"
|
||||
#include "ceres/internal/port.h"
|
||||
|
||||
namespace ceres {
|
||||
namespace internal {
|
||||
@@ -55,11 +56,11 @@ namespace internal {
|
||||
// Note: The structure of rows without any e-blocks has no effect on
|
||||
// the values returned by this function. It is entirely possible that
|
||||
// the f_block_size and row_blocks_size is not constant in such rows.
|
||||
void DetectStructure(const CompressedRowBlockStructure& bs,
|
||||
const int num_eliminate_blocks,
|
||||
int* row_block_size,
|
||||
int* e_block_size,
|
||||
int* f_block_size);
|
||||
void CERES_EXPORT DetectStructure(const CompressedRowBlockStructure& bs,
|
||||
const int num_eliminate_blocks,
|
||||
int* row_block_size,
|
||||
int* e_block_size,
|
||||
int* f_block_size);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace ceres
|
||||
|
58
extern/ceres/internal/ceres/dogleg_strategy.cc
vendored
58
extern/ceres/internal/ceres/dogleg_strategy.cc
vendored
@@ -49,7 +49,7 @@ namespace internal {
|
||||
namespace {
|
||||
const double kMaxMu = 1.0;
|
||||
const double kMinMu = 1e-8;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
|
||||
: linear_solver_(options.linear_solver),
|
||||
@@ -122,8 +122,8 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
|
||||
//
|
||||
jacobian->SquaredColumnNorm(diagonal_.data());
|
||||
for (int i = 0; i < n; ++i) {
|
||||
diagonal_[i] = std::min(std::max(diagonal_[i], min_diagonal_),
|
||||
max_diagonal_);
|
||||
diagonal_[i] =
|
||||
std::min(std::max(diagonal_[i], min_diagonal_), max_diagonal_);
|
||||
}
|
||||
diagonal_ = diagonal_.array().sqrt();
|
||||
|
||||
@@ -171,9 +171,8 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
|
||||
// The gradient, the Gauss-Newton step, the Cauchy point,
|
||||
// and all calculations involving the Jacobian have to
|
||||
// be adjusted accordingly.
|
||||
void DoglegStrategy::ComputeGradient(
|
||||
SparseMatrix* jacobian,
|
||||
const double* residuals) {
|
||||
void DoglegStrategy::ComputeGradient(SparseMatrix* jacobian,
|
||||
const double* residuals) {
|
||||
gradient_.setZero();
|
||||
jacobian->LeftMultiply(residuals, gradient_.data());
|
||||
gradient_.array() /= diagonal_.array();
|
||||
@@ -187,8 +186,7 @@ void DoglegStrategy::ComputeCauchyPoint(SparseMatrix* jacobian) {
|
||||
Jg.setZero();
|
||||
// The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
|
||||
// instead of (J * D^-1) * (D^-1 * g).
|
||||
Vector scaled_gradient =
|
||||
(gradient_.array() / diagonal_.array()).matrix();
|
||||
Vector scaled_gradient = (gradient_.array() / diagonal_.array()).matrix();
|
||||
jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
|
||||
alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
|
||||
}
|
||||
@@ -217,7 +215,7 @@ void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
|
||||
// Case 2. The Cauchy point and the Gauss-Newton steps lie outside
|
||||
// the trust region. Rescale the Cauchy point to the trust region
|
||||
// and return.
|
||||
if (gradient_norm * alpha_ >= radius_) {
|
||||
if (gradient_norm * alpha_ >= radius_) {
|
||||
dogleg_step = -(radius_ / gradient_norm) * gradient_;
|
||||
dogleg_step_norm_ = radius_;
|
||||
dogleg_step.array() /= diagonal_.array();
|
||||
@@ -242,14 +240,12 @@ void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
|
||||
// = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2
|
||||
const double c = b_dot_a - a_squared_norm;
|
||||
const double d = sqrt(c * c + b_minus_a_squared_norm *
|
||||
(pow(radius_, 2.0) - a_squared_norm));
|
||||
(pow(radius_, 2.0) - a_squared_norm));
|
||||
|
||||
double beta =
|
||||
(c <= 0)
|
||||
? (d - c) / b_minus_a_squared_norm
|
||||
: (radius_ * radius_ - a_squared_norm) / (d + c);
|
||||
dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_
|
||||
+ beta * gauss_newton_step_;
|
||||
double beta = (c <= 0) ? (d - c) / b_minus_a_squared_norm
|
||||
: (radius_ * radius_ - a_squared_norm) / (d + c);
|
||||
dogleg_step =
|
||||
(-alpha_ * (1.0 - beta)) * gradient_ + beta * gauss_newton_step_;
|
||||
dogleg_step_norm_ = dogleg_step.norm();
|
||||
dogleg_step.array() /= diagonal_.array();
|
||||
VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
|
||||
@@ -345,13 +341,13 @@ void DoglegStrategy::ComputeSubspaceDoglegStep(double* dogleg) {
|
||||
// correctly determined.
|
||||
const double kCosineThreshold = 0.99;
|
||||
const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_;
|
||||
const double cosine_angle = -minimum.dot(grad_minimum) /
|
||||
(minimum.norm() * grad_minimum.norm());
|
||||
const double cosine_angle =
|
||||
-minimum.dot(grad_minimum) / (minimum.norm() * grad_minimum.norm());
|
||||
if (cosine_angle < kCosineThreshold) {
|
||||
LOG(WARNING) << "First order optimality seems to be violated "
|
||||
<< "in the subspace method!\n"
|
||||
<< "Cosine of angle between x and B x + g is "
|
||||
<< cosine_angle << ".\n"
|
||||
<< "Cosine of angle between x and B x + g is " << cosine_angle
|
||||
<< ".\n"
|
||||
<< "Taking a regular dogleg step instead.\n"
|
||||
<< "Please consider filing a bug report if this "
|
||||
<< "happens frequently or consistently.\n";
|
||||
@@ -423,15 +419,17 @@ Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const {
|
||||
const double trB = subspace_B_.trace();
|
||||
const double r2 = radius_ * radius_;
|
||||
Matrix2d B_adj;
|
||||
// clang-format off
|
||||
B_adj << subspace_B_(1, 1) , -subspace_B_(0, 1),
|
||||
-subspace_B_(1, 0) , subspace_B_(0, 0);
|
||||
-subspace_B_(1, 0) , subspace_B_(0, 0);
|
||||
// clang-format on
|
||||
|
||||
Vector polynomial(5);
|
||||
polynomial(0) = r2;
|
||||
polynomial(1) = 2.0 * r2 * trB;
|
||||
polynomial(2) = r2 * (trB * trB + 2.0 * detB) - subspace_g_.squaredNorm();
|
||||
polynomial(3) = -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_
|
||||
- r2 * detB * trB);
|
||||
polynomial(3) =
|
||||
-2.0 * (subspace_g_.transpose() * B_adj * subspace_g_ - r2 * detB * trB);
|
||||
polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
|
||||
|
||||
return polynomial;
|
||||
@@ -565,10 +563,8 @@ LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
|
||||
// of Jx = -r and later set x = -y to avoid having to modify
|
||||
// either jacobian or residuals.
|
||||
InvalidateArray(n, gauss_newton_step_.data());
|
||||
linear_solver_summary = linear_solver_->Solve(jacobian,
|
||||
residuals,
|
||||
solve_options,
|
||||
gauss_newton_step_.data());
|
||||
linear_solver_summary = linear_solver_->Solve(
|
||||
jacobian, residuals, solve_options, gauss_newton_step_.data());
|
||||
|
||||
if (per_solve_options.dump_format_type == CONSOLE ||
|
||||
(per_solve_options.dump_format_type != CONSOLE &&
|
||||
@@ -641,9 +637,7 @@ void DoglegStrategy::StepIsInvalid() {
|
||||
reuse_ = false;
|
||||
}
|
||||
|
||||
double DoglegStrategy::Radius() const {
|
||||
return radius_;
|
||||
}
|
||||
double DoglegStrategy::Radius() const { return radius_; }
|
||||
|
||||
bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
|
||||
// Compute an orthogonal basis for the subspace using QR decomposition.
|
||||
@@ -701,8 +695,8 @@ bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
|
||||
|
||||
subspace_g_ = subspace_basis_.transpose() * gradient_;
|
||||
|
||||
Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor>
|
||||
Jb(2, jacobian->num_rows());
|
||||
Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor> Jb(
|
||||
2, jacobian->num_rows());
|
||||
Jb.setZero();
|
||||
|
||||
Vector tmp;
|
||||
|
@@ -31,6 +31,7 @@
|
||||
#ifndef CERES_INTERNAL_DOGLEG_STRATEGY_H_
|
||||
#define CERES_INTERNAL_DOGLEG_STRATEGY_H_
|
||||
|
||||
#include "ceres/internal/port.h"
|
||||
#include "ceres/linear_solver.h"
|
||||
#include "ceres/trust_region_strategy.h"
|
||||
|
||||
@@ -52,16 +53,16 @@ namespace internal {
|
||||
// DoglegStrategy follows the approach by Shultz, Schnabel, Byrd.
|
||||
// This finds the exact optimum over the two-dimensional subspace
|
||||
// spanned by the two Dogleg vectors.
|
||||
class DoglegStrategy : public TrustRegionStrategy {
|
||||
class CERES_EXPORT_INTERNAL DoglegStrategy : public TrustRegionStrategy {
|
||||
public:
|
||||
explicit DoglegStrategy(const TrustRegionStrategy::Options& options);
|
||||
virtual ~DoglegStrategy() {}
|
||||
|
||||
// TrustRegionStrategy interface
|
||||
Summary ComputeStep(const PerSolveOptions& per_solve_options,
|
||||
SparseMatrix* jacobian,
|
||||
const double* residuals,
|
||||
double* step) final;
|
||||
SparseMatrix* jacobian,
|
||||
const double* residuals,
|
||||
double* step) final;
|
||||
void StepAccepted(double step_quality) final;
|
||||
void StepRejected(double step_quality) final;
|
||||
void StepIsInvalid();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user