clean code

This commit is contained in:
Kai-46 2020-10-15 19:29:30 -04:00
parent a51403c562
commit a581e48614
3 changed files with 1 additions and 220 deletions

View file

@ -1,131 +0,0 @@
import cv2
import numpy as np
## pip install opencv-python=3.4.2.17 opencv-contrib-python==3.4.2.17
def skew(x):
return np.array([[0, -x[2], x[1]],
[x[2], 0, -x[0]],
[-x[1], x[0], 0]])
def two_view_geometry(intrinsics1, extrinsics1, intrinsics2, extrinsics2):
'''
:param intrinsics1: 4 by 4 matrix
:param extrinsics1: 4 by 4 W2C matrix
:param intrinsics2: 4 by 4 matrix
:param extrinsics2: 4 by 4 W2C matrix
:return:
'''
relative_pose = extrinsics2.dot(np.linalg.inv(extrinsics1))
R = relative_pose[:3, :3]
T = relative_pose[:3, 3]
tx = skew(T)
E = np.dot(tx, R)
F = np.linalg.inv(intrinsics2[:3, :3]).T.dot(E).dot(np.linalg.inv(intrinsics1[:3, :3]))
return E, F, relative_pose
def drawpointslines(img1, img2, lines1, pts2, color):
'''
draw corresponding epilines on img1 for the points in img2
'''
r, c = img1.shape
img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
for r, pt2, cl in zip(lines1, pts2, color):
x0, y0 = map(int, [0, -r[2]/r[1]])
x1, y1 = map(int, [c, -(r[2]+r[0]*c)/r[1]])
cl = tuple(cl.tolist())
img1 = cv2.line(img1, (x0,y0), (x1,y1), cl, 1)
img2 = cv2.circle(img2, tuple(pt2), 5, cl, -1)
return img1, img2
def epipolar(coord1, F, img1, img2):
# compute epipole
pts1 = coord1.astype(int).T
color = np.random.randint(0, high=255, size=(len(pts1), 3))
# Find epilines corresponding to points in left image (first image) and
# drawing its lines on right image
lines2 = cv2.computeCorrespondEpilines(pts1.reshape(-1,1,2), 1,F)
lines2 = lines2.reshape(-1,3)
img3, img4 = drawpointslines(img2,img1,lines2,pts1,color)
## print(img3.shape)
## print(np.concatenate((img4, img3)).shape)
## cv2.imwrite('vis.png', np.concatenate((img4, img3), axis=1))
return np.concatenate((img4, img3), axis=1)
def verify_data(img1, img2, intrinsics1, extrinsics1, intrinsics2, extrinsics2):
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
E, F, relative_pose = two_view_geometry(intrinsics1, extrinsics1,
intrinsics2, extrinsics2)
# sift = cv2.xfeatures2d.SIFT_create(nfeatures=20)
# kp1 = sift.detect(img1, mask=None)
# coord1 = np.array([[kp.pt[0], kp.pt[1]] for kp in kp1]).T
# Initiate ORB detector
orb = cv2.ORB_create()
# find the keypoints with ORB
kp1 = orb.detect(img1, None)
coord1 = np.array([[kp.pt[0], kp.pt[1]] for kp in kp1[:20]]).T
return epipolar(coord1, F, img1, img2)
if __name__ == '__main__':
from data_loader import load_data
from run_nerf import config_parser
from nerf_sample_ray import parse_camera
import os
parser = config_parser()
args = parser.parse_args()
print(args)
data = load_data(args.datadir, args.scene, testskip=1)
all_imgs = data['images']
all_cameras = data['cameras']
all_intrinsics = []
all_extrinsics = [] # W2C
for i in range(all_cameras.shape[0]):
W, H, intrinsics, extrinsics = parse_camera(all_cameras[i])
all_intrinsics.append(intrinsics)
all_extrinsics.append(np.linalg.inv(extrinsics))
#### arbitrarily select 10 pairs of images to verify pose
out_dir = os.path.join(args.basedir, args.expname, 'data_verify')
print(out_dir)
os.makedirs(out_dir, exist_ok=True)
def calc_angles(c2w_1, c2w_2):
c1 = c2w_1[:3, 3:4]
c2 = c2w_2[:3, 3:4]
c1 = c1 / np.linalg.norm(c1)
c2 = c2 / np.linalg.norm(c2)
return np.rad2deg(np.arccos(np.dot(c1.T, c2)))
images_verify = []
for i in range(10):
while True:
idx1, idx2 = np.random.choice(len(all_imgs), (2,), replace=False)
angle = calc_angles(np.linalg.inv(all_extrinsics[idx1]),
np.linalg.inv(all_extrinsics[idx2]))
if angle > 5. and angle < 10.:
break
im = verify_data(np.uint8(all_imgs[idx1]*255.), np.uint8(all_imgs[idx2]*255.),
all_intrinsics[idx1], all_extrinsics[idx1],
all_intrinsics[idx2], all_extrinsics[idx2])
cv2.imwrite(os.path.join(out_dir, '{:03d}.png'.format(i)), im)

View file

@ -7,15 +7,6 @@ import imageio
######################################################################################################################## ########################################################################################################################
# ray batch sampling # ray batch sampling
######################################################################################################################## ########################################################################################################################
def parse_camera(params):
H, W = params[:2]
intrinsics = params[2:18].reshape((4, 4))
c2w = params[18:34].reshape((4, 4))
return int(W), int(H), intrinsics.astype(np.float32), c2w.astype(np.float32)
def get_rays_single_image(H, W, intrinsics, c2w): def get_rays_single_image(H, W, intrinsics, c2w):
''' '''
:param H: image height :param H: image height

View file

@ -8,9 +8,7 @@ HUGE_NUMBER = 1e10
TINY_NUMBER = 1e-6 # float32 only has 7 decimal digits precision TINY_NUMBER = 1e-6 # float32 only has 7 decimal digits precision
# Misc utils # misc utils
# work on tensors
# img2mse = lambda x, y: torch.mean((x - y) * (x - y))
def img2mse(x, y, mask=None): def img2mse(x, y, mask=None):
if mask is None: if mask is None:
return torch.mean((x - y) * (x - y)) return torch.mean((x - y) * (x - y))
@ -32,13 +30,6 @@ to8b = lambda x: (255 * np.clip(x, 0, 1)).astype(np.uint8)
# gray2rgb = lambda x: np.tile(x[:,:,np.newaxis], (1, 1, 3)) # gray2rgb = lambda x: np.tile(x[:,:,np.newaxis], (1, 1, 3))
mse2psnr = lambda x: -10. * np.log(x+TINY_NUMBER) / np.log(10.) mse2psnr = lambda x: -10. * np.log(x+TINY_NUMBER) / np.log(10.)
#
# def normalize(x):
# x_min, x_max = np.percentile(x, (0.5, 99.5))
# x = np.clip(x, x_min, x_max)
# x = (x - x_min) / (x_max - x_min)
# return x
######################################################################################################################## ########################################################################################################################
# #
@ -51,15 +42,6 @@ import cv2
def get_vertical_colorbar(h, vmin, vmax, cmap_name='jet', label=None): def get_vertical_colorbar(h, vmin, vmax, cmap_name='jet', label=None):
'''
:param w: pixels
:param h: pixels
:param vmin: min value
:param vmax: max value
:param cmap_name:
:param label
:return:
'''
fig = Figure(figsize=(1.2, 8), dpi=100) fig = Figure(figsize=(1.2, 8), dpi=100)
fig.subplots_adjust(right=1.5) fig.subplots_adjust(right=1.5)
canvas = FigureCanvasAgg(fig) canvas = FigureCanvasAgg(fig)
@ -86,9 +68,6 @@ def get_vertical_colorbar(h, vmin, vmax, cmap_name='jet', label=None):
fig.tight_layout() fig.tight_layout()
# # debug
# fig.savefig("debug3.png")
canvas.draw() canvas.draw()
s, (width, height) = canvas.print_to_buffer() s, (width, height) = canvas.print_to_buffer()
@ -102,35 +81,6 @@ def get_vertical_colorbar(h, vmin, vmax, cmap_name='jet', label=None):
return im return im
# def colorize_np(x, cmap_name='jet', append_cbar=False):
# vmin = x.min()
# vmax = x.max() + TINY_NUMBER
# x = (x - vmin) / (vmax - vmin)
# # x = np.clip(x, 0., 1.)
# cmap = cm.get_cmap(cmap_name)
# x_new = cmap(x)[:, :, :3]
# cbar = get_vertical_colorbar(h=x.shape[0], vmin=vmin, vmax=vmax, cmap_name=cmap_name)
# if append_cbar:
# x_new = np.concatenate((x_new, np.zeros_like(x_new[:, :5, :]), cbar), axis=1)
# return x_new
# else:
# return x_new, cbar
# # tensor
# def colorize(x, cmap_name='jet', append_cbar=False):
# x = x.numpy()
# x, cbar = colorize_np(x, cmap_name)
# if append_cbar:
# x = np.concatenate((x, np.zeros_like(x[:, :5, :]), cbar), axis=1)
# x = torch.from_numpy(x)
# return x
def colorize_np(x, cmap_name='jet', mask=None, append_cbar=False): def colorize_np(x, cmap_name='jet', mask=None, append_cbar=False):
if mask is not None: if mask is not None:
# vmin, vmax = np.percentile(x[mask], (1, 99)) # vmin, vmax = np.percentile(x[mask], (1, 99))
@ -175,32 +125,3 @@ def colorize(x, cmap_name='jet', append_cbar=False, mask=None):
x = torch.from_numpy(x) x = torch.from_numpy(x)
return x return x
if __name__ == '__main__':
# # cbar = get_vertical_colorbar(h=512, vmin=0.1, vmax=5, cmap_name='jet')
# # cbar = cbar[:, :, :3]
# import imageio
#
# # imageio.imwrite('./debug.png', cbar)
#
# x = torch.rand(512, 512)
# x = colorize(x, append_cbar=True)
#
# x = np.uint8(x.numpy() * 255.)
#
# import imageio
# imageio.imwrite('./debug.png', x)
import os
import imageio
img_dir = '/phoenix/S7/kz298/latest_work/nerf/logs/dtu_scan9_3_nearfar/renderonly_train_200001'
all_imgs = []
for item in sorted(os.listdir(img_dir)):
if item[-4:] == '.png':
fpath = os.path.join(img_dir, item)
all_imgs.append(imageio.imread(fpath))
imageio.mimwrite(os.path.join(img_dir, 'video.mp4'), all_imgs, fps=3, quality=8)