forked from yenchenlin/nerf-pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
load_deepvoxels.py
113 lines (88 loc) · 3.8 KB
/
load_deepvoxels.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
import os
import imageio
import numpy as np
def load_dv_data(scene="cube", basedir="/data/deepvoxels", testskip=8):
def parse_intrinsics(filepath, trgt_sidelength, invert_y=False):
# Get camera intrinsics
with open(filepath, "r") as file:
f, cx, cy = list(map(float, file.readline().split()))[:3]
grid_barycenter = np.array(list(map(float, file.readline().split())))
near_plane = float(file.readline())
scale = float(file.readline())
height, width = map(float, file.readline().split())
try:
world2cam_poses = int(file.readline())
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
print(cx, cy, f, height, width)
cx = cx / width * trgt_sidelength
cy = cy / height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0.0, cx, 0.0], [0.0, fy, cy, 0], [0.0, 0, 1, 0], [0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses
def load_pose(filename):
assert os.path.isfile(filename)
nums = open(filename).read().split()
return np.array([float(x) for x in nums]).reshape([4, 4]).astype(np.float32)
H = 512
W = 512
deepvoxels_base = "{}/train/{}/".format(basedir, scene)
full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses = parse_intrinsics(
os.path.join(deepvoxels_base, "intrinsics.txt"), H
)
print(full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses)
focal = full_intrinsic[0, 0]
print(H, W, focal)
def dir2poses(posedir):
poses = np.stack(
[load_pose(os.path.join(posedir, f)) for f in sorted(os.listdir(posedir)) if f.endswith("txt")], 0
)
transf = np.array(
[
[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1.0],
]
)
poses = poses @ transf
poses = poses[:, :3, :4].astype(np.float32)
return poses
posedir = os.path.join(deepvoxels_base, "pose")
poses = dir2poses(posedir)
testposes = dir2poses("{}/test/{}/pose".format(basedir, scene))
testposes = testposes[::testskip]
valposes = dir2poses("{}/validation/{}/pose".format(basedir, scene))
valposes = valposes[::testskip]
imgfiles = [f for f in sorted(os.listdir(os.path.join(deepvoxels_base, "rgb"))) if f.endswith("png")]
imgs = np.stack([imageio.imread(os.path.join(deepvoxels_base, "rgb", f)) / 255.0 for f in imgfiles], 0).astype(
np.float32
)
testimgd = "{}/test/{}/rgb".format(basedir, scene)
imgfiles = [f for f in sorted(os.listdir(testimgd)) if f.endswith("png")]
testimgs = np.stack([imageio.imread(os.path.join(testimgd, f)) / 255.0 for f in imgfiles[::testskip]], 0).astype(
np.float32
)
valimgd = "{}/validation/{}/rgb".format(basedir, scene)
imgfiles = [f for f in sorted(os.listdir(valimgd)) if f.endswith("png")]
valimgs = np.stack([imageio.imread(os.path.join(valimgd, f)) / 255.0 for f in imgfiles[::testskip]], 0).astype(
np.float32
)
all_imgs = [imgs, valimgs, testimgs]
counts = [0] + [x.shape[0] for x in all_imgs]
counts = np.cumsum(counts)
i_split = [np.arange(counts[i], counts[i + 1]) for i in range(3)]
imgs = np.concatenate(all_imgs, 0)
poses = np.concatenate([poses, valposes, testposes], 0)
render_poses = testposes
print(poses.shape, imgs.shape)
return imgs, poses, render_poses, [H, W, focal], i_split