🎉 initial rewrite and documentation for the poses module

This commit is contained in:
otthorn 2021-03-16 18:13:14 +01:00
parent b454ef2c2f
commit 2b686d4fc5
5 changed files with 523 additions and 0 deletions

2
poses/README.md Normal file
View file

@ -0,0 +1,2 @@
Contains the COLMAP wrappers and other utils to extract, read and write camera
poses from and to different formats.

0
poses/__init__.py Normal file
View file

312
poses/colmap_read_model.py Normal file
View file

@ -0,0 +1,312 @@
# Copyright (c) 2018, ETH Zurich and UNC Chapel Hill.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of ETH Zurich and UNC Chapel Hill nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Johannes L. Schoenberger (jsch at inf.ethz.ch)
import os
import sys
import collections
import numpy as np
import struct
CameraModel = collections.namedtuple(
"CameraModel", ["model_id", "model_name", "num_params"])
Camera = collections.namedtuple(
"Camera", ["id", "model", "width", "height", "params"])
BaseImage = collections.namedtuple(
"Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
Point3D = collections.namedtuple(
"Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
class Image(BaseImage):
def qvec2rotmat(self):
return qvec2rotmat(self.qvec)
CAMERA_MODELS = {
CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
CameraModel(model_id=3, model_name="RADIAL", num_params=5),
CameraModel(model_id=4, model_name="OPENCV", num_params=8),
CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
CameraModel(model_id=7, model_name="FOV", num_params=5),
CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
}
CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model) \
for camera_model in CAMERA_MODELS])
def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
"""Read and unpack the next bytes from a binary file.
:param fid:
:param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
:param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
:param endian_character: Any of {@, =, <, >, !}
:return: Tuple of read and unpacked values.
"""
data = fid.read(num_bytes)
return struct.unpack(endian_character + format_char_sequence, data)
def read_cameras_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasText(const std::string& path)
void Reconstruction::ReadCamerasText(const std::string& path)
"""
cameras = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
camera_id = int(elems[0])
model = elems[1]
width = int(elems[2])
height = int(elems[3])
params = np.array(tuple(map(float, elems[4:])))
cameras[camera_id] = Camera(id=camera_id, model=model,
width=width, height=height,
params=params)
return cameras
def read_cameras_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::WriteCamerasBinary(const std::string& path)
void Reconstruction::ReadCamerasBinary(const std::string& path)
"""
cameras = {}
with open(path_to_model_file, "rb") as fid:
num_cameras = read_next_bytes(fid, 8, "Q")[0]
for camera_line_index in range(num_cameras):
camera_properties = read_next_bytes(
fid, num_bytes=24, format_char_sequence="iiQQ")
camera_id = camera_properties[0]
model_id = camera_properties[1]
model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
width = camera_properties[2]
height = camera_properties[3]
num_params = CAMERA_MODEL_IDS[model_id].num_params
params = read_next_bytes(fid, num_bytes=8*num_params,
format_char_sequence="d"*num_params)
cameras[camera_id] = Camera(id=camera_id,
model=model_name,
width=width,
height=height,
params=np.array(params))
assert len(cameras) == num_cameras
return cameras
def read_images_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesText(const std::string& path)
void Reconstruction::WriteImagesText(const std::string& path)
"""
images = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
image_id = int(elems[0])
qvec = np.array(tuple(map(float, elems[1:5])))
tvec = np.array(tuple(map(float, elems[5:8])))
camera_id = int(elems[8])
image_name = elems[9]
elems = fid.readline().split()
xys = np.column_stack([tuple(map(float, elems[0::3])),
tuple(map(float, elems[1::3]))])
point3D_ids = np.array(tuple(map(int, elems[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_images_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadImagesBinary(const std::string& path)
void Reconstruction::WriteImagesBinary(const std::string& path)
"""
images = {}
with open(path_to_model_file, "rb") as fid:
num_reg_images = read_next_bytes(fid, 8, "Q")[0]
for image_index in range(num_reg_images):
binary_image_properties = read_next_bytes(
fid, num_bytes=64, format_char_sequence="idddddddi")
image_id = binary_image_properties[0]
qvec = np.array(binary_image_properties[1:5])
tvec = np.array(binary_image_properties[5:8])
camera_id = binary_image_properties[8]
image_name = ""
current_char = read_next_bytes(fid, 1, "c")[0]
while current_char != b"\x00": # look for the ASCII 0 entry
image_name += current_char.decode("utf-8")
current_char = read_next_bytes(fid, 1, "c")[0]
num_points2D = read_next_bytes(fid, num_bytes=8,
format_char_sequence="Q")[0]
x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
format_char_sequence="ddq"*num_points2D)
xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
tuple(map(float, x_y_id_s[1::3]))])
point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
images[image_id] = Image(
id=image_id, qvec=qvec, tvec=tvec,
camera_id=camera_id, name=image_name,
xys=xys, point3D_ids=point3D_ids)
return images
def read_points3D_text(path):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DText(const std::string& path)
void Reconstruction::WritePoints3DText(const std::string& path)
"""
points3D = {}
with open(path, "r") as fid:
while True:
line = fid.readline()
if not line:
break
line = line.strip()
if len(line) > 0 and line[0] != "#":
elems = line.split()
point3D_id = int(elems[0])
xyz = np.array(tuple(map(float, elems[1:4])))
rgb = np.array(tuple(map(int, elems[4:7])))
error = float(elems[7])
image_ids = np.array(tuple(map(int, elems[8::2])))
point2D_idxs = np.array(tuple(map(int, elems[9::2])))
points3D[point3D_id] = Point3D(id=point3D_id, xyz=xyz, rgb=rgb,
error=error, image_ids=image_ids,
point2D_idxs=point2D_idxs)
return points3D
def read_points3d_binary(path_to_model_file):
"""
see: src/base/reconstruction.cc
void Reconstruction::ReadPoints3DBinary(const std::string& path)
void Reconstruction::WritePoints3DBinary(const std::string& path)
"""
points3D = {}
with open(path_to_model_file, "rb") as fid:
num_points = read_next_bytes(fid, 8, "Q")[0]
for point_line_index in range(num_points):
binary_point_line_properties = read_next_bytes(
fid, num_bytes=43, format_char_sequence="QdddBBBd")
point3D_id = binary_point_line_properties[0]
xyz = np.array(binary_point_line_properties[1:4])
rgb = np.array(binary_point_line_properties[4:7])
error = np.array(binary_point_line_properties[7])
track_length = read_next_bytes(
fid, num_bytes=8, format_char_sequence="Q")[0]
track_elems = read_next_bytes(
fid, num_bytes=8*track_length,
format_char_sequence="ii"*track_length)
image_ids = np.array(tuple(map(int, track_elems[0::2])))
point2D_idxs = np.array(tuple(map(int, track_elems[1::2])))
points3D[point3D_id] = Point3D(
id=point3D_id, xyz=xyz, rgb=rgb,
error=error, image_ids=image_ids,
point2D_idxs=point2D_idxs)
return points3D
def read_model(path, ext):
if ext == ".txt":
cameras = read_cameras_text(os.path.join(path, "cameras" + ext))
images = read_images_text(os.path.join(path, "images" + ext))
points3D = read_points3D_text(os.path.join(path, "points3D") + ext)
else:
cameras = read_cameras_binary(os.path.join(path, "cameras" + ext))
images = read_images_binary(os.path.join(path, "images" + ext))
points3D = read_points3d_binary(os.path.join(path, "points3D") + ext)
return cameras, images, points3D
def qvec2rotmat(qvec):
return np.array([
[1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
[2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
[2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
def rotmat2qvec(R):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec
def main():
if len(sys.argv) != 3:
print("Usage: python read_model.py path/to/model/folder [.txt,.bin]")
return
cameras, images, points3D = read_model(path=sys.argv[1], ext=sys.argv[2])
print("num_cameras:", len(cameras))
print("num_images:", len(images))
print("num_points3D:", len(points3D))
if __name__ == "__main__":
main()

71
poses/colmap_wrapper.py Normal file
View file

@ -0,0 +1,71 @@
import os
import subprocess
import logging
# $ DATASET_PATH=/path/to/dataset
# $ colmap feature_extractor \
# --database_path $DATASET_PATH/database.db \
# --image_path $DATASET_PATH/images
# $ colmap exhaustive_matcher \
# --database_path $DATASET_PATH/database.db
# $ mkdir $DATASET_PATH/sparse
# $ colmap mapper \
# --database_path $DATASET_PATH/database.db \
# --image_path $DATASET_PATH/images \
# --output_path $DATASET_PATH/sparse
# $ mkdir $DATASET_PATH/dense
def run_colmap(basedir, match_type):
"""Simple wrapper for COLMAP"""
logfile_name = os.path.join(basedir, 'colmap_output.txt')
logfile = open(logfile_name, 'w')
feature_extractor_args = [
'colmap', 'feature_extractor',
'--database_path', os.path.join(basedir, 'database.db'),
'--image_path', os.path.join(basedir, 'images'),
'--ImageReader.single_camera', '1',
# '--SiftExtraction.use_gpu', '0',
]
logging.debug("Extracting features.")
feat_output = subprocess.check_output(feature_extractor_args, universal_newlines=True)
logfile.write(feat_output)
exhaustive_matcher_args = [
'colmap', match_type,
'--database_path', os.path.join(basedir, 'database.db'),
]
logging.debug("Matching features.")
match_output = subprocess.check_output(exhaustive_matcher_args, universal_newlines=True)
logfile.write(match_output)
p = os.path.join(basedir, 'sparse')
if not os.path.exists(p):
os.makedirs(p)
mapper_args = [
'colmap', 'mapper',
'--database_path', os.path.join(basedir, 'database.db'),
'--image_path', os.path.join(basedir, 'images'),
'--output_path', os.path.join(basedir, 'sparse'), # --export_path changed to --output_path in colmap 3.6
'--Mapper.num_threads', '16',
'--Mapper.init_min_tri_angle', '4',
'--Mapper.multiple_models', '0',
'--Mapper.extract_colors', '0',
]
logging.debug("Creating sparse map")
map_output = subprocess.check_output(mapper_args, universal_newlines=True)
logfile.write(map_output)
logfile.close()
logging.info("Finished running COLMAP. Logfile written in %s",
logfile_name)

138
poses/pose_utils.py Normal file
View file

@ -0,0 +1,138 @@
import os
import logging
import typing
import numpy as np
from colmap_wrapper import run_colmap
import colmap_read_model as read_model
#from nerf_homemade.poses.colmap_wrapper import run_colmap
#import nerf_homemade.poses.colmap_read_model as read_model
def gen_poses(basedir: str, match_type: str='exhaustive') -> None:
"""
Geneate or retreive camera poses.
Retrieve the cameras either by generating using COLMAP or reading them if
they already have been written.
Parameters
----------
basedir: str
The path of the directory either containing raw images or the
arborescence of a COLMAP directory if camera poses have already been
computed
match_type: {'exhaustive', 'sequential'}
The type of match COLMAP should perform if it needs to be run.
Defaults to 'exhaustive'.
"""
files_needed = ["cameras.bin", "images.bin", "points3D.bin"]
path_to_sparse = os.path.join(basedir, "sparse/0")
if os.path.exists(path_to_sparse):
existing_files = os.listdir(path_to_sparse)
else:
existing_files = []
if not all([f in existing_files for f in files_needed]):
logging.info("Running COLMAP")
run_colmap(basedir, match_type)
else:
logging.info("Files genreated by COLMAP found. Skipping running COLMAP.")
logging.debug("Loading COLMAP data")
poses, points_3d, perm = load_colmap_data(basedir)
logging.debug("Saving COLMAP data to npy")
save_poses(basedir, poses, points_3d, perm)
def load_colmap_data(basedir: str) -> (np.ndarray, dict, np.ndarray):
"""
Load data from a COLMAP arborescence.
Parameters
----------
basedir: str
The path of the directory which contains the COLMAP arborescence
Returns
-------
poses: numpy.ndarray
List of poses for each image.
pts3d:
List of 3D points for each image.
perm:
Index list ordered by name of images.
Should be [1, 2, ..., N] if well ordered.
"""
# append prefix
basedir = os.path.join(basedir, "sparse/0/")
# read cameras data
cameras_file = os.path.join(basedir, "cameras.bin")
cameras_data = read_model.read_cameras_binary(cameras_file)
logging.debug(f"Loading camera model from {cameras_file}")
# extract intrinsic values for the camera
# asumption is made that it is unique
camera_h = cameras_data[1].height
camera_w = cameras_data[1].width
camera_f = cameras_data[1].params[0]
hwf = np.array([camera_h, camera_w, camera_f]).reshape([3,1])
logging.debug(f"Number of cameras: {len(cameras_data)}")
# read images data
images_file = os.path.join(basedir, "images.bin")
images_data = read_model.read_images_binary(images_file)
w2c_mats = []
bottom = np.array([0., 0., 0., 1.]).reshape([1, 4])
# sort by name
names = [images_data[k].name for k in images_data]
perm = np.argsort(names)
# equivalent to a range since dict is index by number from 1 to N
# create camera matrix
for k in images_data:
im = images_data[k]
R = im.qvec2rotmat()
t = im.tvec.reshape([3,1])
m = np.concatenate([np.concatenate([R, t], 1), bottom], 0)
w2c_mats.append(m)
w2c_mats = np.stack(w2c_mats, 0)
c2w_mats = np.linalg.inv(w2c_mats)
poses = c2w_mats[:, :3, :4].transpose([1,2,0])
poses = np.concatenate([poses, np.tile(hwf[..., np.newaxis], [1,1,poses.shape[-1]])], 1)
# read 3d points data
pts3d_file = os.path.join(basedir, "points3D.bin")
pts3d = read_model.read_points3d_binary(pts3d_file)
# must switch to [-u, r, -t] from [r, -u, t], NOT [r, u, -t]
poses = np.concatenate([poses[:, 1:2, :], poses[:, 0:1, :], -poses[:, 2:3, :], poses[:, 3:4, :], poses[:, 4:5, :]], 1)
return poses, pts3d, perm
def save_poses(basedir, poses, pts3d, perm) -> None:
"""
Save the COLMAP data in a `.npy` format.
Parameters
----------
basedir: str
The path of the directory in which to save the data.
Data will be saved in `basedir/poses_bounds.npy`.
poses:
The list of poses
pts3d:
The list of 3d points
perm:
The sorted index of the array
"""
# TODO