-
Notifications
You must be signed in to change notification settings - Fork 41
/
Copy patheval.py
103 lines (84 loc) · 4.12 KB
/
eval.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import torch
import os
from tqdm import tqdm
from os import makedirs
import torch.nn as nn
import torchvision
import numpy as np
from utils.general_utils import safe_state, to_cuda
from argparse import ArgumentParser
from arguments import ModelParams, get_combined_args, NetworkParams, OptimizationParams
from model.avatar_model import AvatarModel
from torchmetrics import PeakSignalNoiseRatio, StructuralSimilarityIndexMeasure
from torchmetrics.image.lpip import LearnedPerceptualImagePatchSimilarity
from torch.cuda.amp import custom_fwd
class Evaluator(nn.Module):
"""adapted from https://github.com/JanaldoChen/Anim-NeRF/blob/main/models/evaluator.py"""
def __init__(self):
super().__init__()
self.lpips = LearnedPerceptualImagePatchSimilarity(net_type="alex")
self.psnr = PeakSignalNoiseRatio(data_range=1)
self.ssim = StructuralSimilarityIndexMeasure(data_range=1)
# custom_fwd: turn off mixed precision to avoid numerical instability during evaluation
@custom_fwd(cast_inputs=torch.float32)
def forward(self, rgb, rgb_gt):
# torchmetrics assumes NCHW format
# rgb = rgb.permute(0, 3, 1, 2).clamp(max=1.0)
# rgb_gt = rgb_gt.permute(0, 3, 1, 2)
return {
"psnr": self.psnr(rgb, rgb_gt),
"ssim": self.ssim(rgb, rgb_gt),
"lpips": self.lpips(rgb, rgb_gt),
}
def render_sets(model, net, opt, epoch:int):
evaluator = Evaluator()
evaluator = evaluator.cuda()
evaluator.eval()
with torch.no_grad():
avatarmodel = AvatarModel(model, net, opt, train=False)
avatarmodel.training_setup()
avatarmodel.load(epoch, test=False)
test_dataset = avatarmodel.getTestDataset()
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size = 1,
shuffle = False,
num_workers = 4,)
render_path = os.path.join(avatarmodel.model_path, 'test_free', "ours_{}".format(epoch))
gt_path = os.path.join(avatarmodel.model_path, 'test_free', 'gt_image')
makedirs(render_path, exist_ok=True)
makedirs(gt_path, exist_ok=True)
results = []
for idx, batch_data in enumerate(tqdm(test_loader, desc="Rendering progress")):
batch_data = to_cuda(batch_data, device=torch.device('cuda:0'))
gt_image = batch_data['original_image']
if model.train_stage == 1:
image, = avatarmodel.render_free_stage1(batch_data, 59400)
else:
image, = avatarmodel.render_free_stage2(batch_data, 59400)
results.append(evaluator(image.unsqueeze(0), gt_image))
torchvision.utils.save_image(gt_image, os.path.join(gt_path, '{0:05d}'.format(idx) + ".png"))
torchvision.utils.save_image(image, os.path.join(render_path, '{0:05d}'.format(idx) + ".png"))
with open("results.txt", "w") as f:
psnr = torch.stack([r['psnr'] for r in results]).mean().item()
print(f"PSNR: {psnr:.2f}")
f.write(f"PSNR: {psnr:.2f}\n")
ssim = torch.stack([r['ssim'] for r in results]).mean().item()
print(f"SSIM: {ssim:.4f}")
f.write(f"SSIM: {ssim:.4f}\n")
lpips = torch.stack([r['lpips'] for r in results]).mean().item()
print(f"LPIPS: {lpips:.4f}")
f.write(f"LPIPS: {lpips:.4f}\n")
print('save video...')
if __name__ == "__main__":
# Set up command line argument parser
parser = ArgumentParser(description="Testing script parameters")
model = ModelParams(parser, sentinel=True)
network = NetworkParams(parser)
op = OptimizationParams(parser)
parser.add_argument("--epoch", default=-1, type=int)
parser.add_argument("--quiet", action="store_true")
parser.add_argument("--checkpoint_epochs", nargs="+", type=int, default=[])
args = get_combined_args(parser)
print("Rendering " + args.model_path)
safe_state(args.quiet)
render_sets(model.extract(args), network.extract(args), op.extract(args), args.epoch,)