Skip to content

Commit

Permalink
Merge pull request #2 from ArnavVarma/eval
Browse files Browse the repository at this point in the history
Eval
  • Loading branch information
ArnavVarma authored Apr 18, 2023
2 parents b5578a7 + f987c08 commit 16ddec4
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 12 deletions.
27 changes: 18 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ This is a reference implementation for using G2S loss described in the [ICRA 202
>
> by [Hemang Chawla](https://scholar.google.com/citations?user=_58RpMgAAAAJ&hl=en&oi=ao), [Arnav Varma](https://scholar.google.com/citations?user=3QSih2AAAAAJ&hl=en&oi=ao), [Elahe Arani](https://www.linkedin.com/in/elahe-arani-630870b2/) and [Bahram Zonooz](https://scholar.google.com/citations?hl=en&user=FZmIlY8AAAAJ).
in the Monodepth2 repository for KITTI Eigen Zhou split.
in the Monodepth2 repository for KITTI Eigen Zhou split. The corresponding checkpoint can be found [here](https://drive.google.com/drive/folders/1_BbMw83cBiIjsxRb1BI66aNMeo1fySoy?usp=sharing).

The official code is available [here](https://github.com/NeurAI-Lab/G2S).

Expand All @@ -15,14 +15,13 @@ This code is for non-commercial use following the original license from Monodept
If you find our work useful in your research please consider citing our paper:

```
@inproceedings{chawlavarma2021multimodal,
author={H. {Chawla} and A. {Varma} and E. {Arani} and B. {Zonooz}},
booktitle={2021 IEEE International Conference on Robotics and Automation (ICRA)},
title={Multimodal Scale Consistency and Awareness for Monocular Self-Supervised
Depth Estimation},
location={Xi’an, China},
publisher={IEEE (in press)},
year={2021}
@inproceedings{chawla2021multimodal,
title={Multimodal scale consistency and awareness for monocular self-supervised depth estimation},
author={Chawla, Hemang and Varma, Arnav and Arani, Elahe and Zonooz, Bahram},
booktitle={2021 IEEE International Conference on Robotics and Automation (ICRA)},
pages={5140--5146},
year={2021},
organization={IEEE}
}
```

Expand All @@ -35,5 +34,15 @@ If you find our work useful in your research please consider citing our paper:
python train.py --model_name g2s --data_path /path/to/KITTI/raw_data/sync --log_dir /path/to/log/dir/ --g2s --png (if images are in png)
```

**Ground truth generation (Needs to be run once before first evaluation):**
```shell
python export_gt_depth.py --data_path /path/to/KITTI/raw_data/sync --split eigen
```

**Monocular evaluation:**
```shell
python train.py evaluate_depth.py --eval_mono --data_path /path/to/KITTI/raw_data/sync --eval_split eigen --load_weights_folder /path/to/ckpt/folder --png (if images are in png)
```

## 👩‍⚖️ License
Please see the [license file](LICENSE) for terms.
16 changes: 13 additions & 3 deletions evaluate_depth.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from options import MonodepthOptions
import datasets
import networks
from copy import deepcopy

cv2.setNumThreads(0) # This speeds up evaluation 5x on our unix systems (OpenCV 3.3.1)

Expand Down Expand Up @@ -82,7 +83,7 @@ def evaluate(opt):

dataset = datasets.KITTIRAWDataset(opt.data_path, filenames,
encoder_dict['height'], encoder_dict['width'],
[0], 4, is_train=False)
[0], 4, is_train=False, img_ext=".png" if opt.png else ".jpg")
dataloader = DataLoader(dataset, 16, shuffle=False, num_workers=opt.num_workers,
pin_memory=True, drop_last=False)

Expand Down Expand Up @@ -163,7 +164,7 @@ def evaluate(opt):
quit()

gt_path = os.path.join(splits_dir, opt.eval_split, "gt_depths.npz")
gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1')["data"]
gt_depths = np.load(gt_path, fix_imports=True, encoding='latin1', allow_pickle=True)["data"]

print("-> Evaluating")

Expand All @@ -176,6 +177,7 @@ def evaluate(opt):
print(" Mono evaluation - using median scaling")

errors = []
errors_metric = []
ratios = []

for i in range(pred_disps.shape[0]):
Expand All @@ -202,6 +204,8 @@ def evaluate(opt):
pred_depth = pred_depth[mask]
gt_depth = gt_depth[mask]

pred_depth_metric = deepcopy(pred_depth)

pred_depth *= opt.pred_depth_scale_factor
if not opt.disable_median_scaling:
ratio = np.median(gt_depth) / np.median(pred_depth)
Expand All @@ -212,16 +216,22 @@ def evaluate(opt):
pred_depth[pred_depth > MAX_DEPTH] = MAX_DEPTH

errors.append(compute_errors(gt_depth, pred_depth))
errors_metric.append(compute_errors(gt_depth, pred_depth_metric))

if not opt.disable_median_scaling:
ratios = np.array(ratios)
med = np.median(ratios)
print(" Scaling ratios | med: {:0.3f} | std: {:0.3f}".format(med, np.std(ratios / med)))
print(" Scaling ratios | med: {:0.3f} | std: {:0.3f}".format(med, np.std(ratios)))

mean_errors = np.array(errors).mean(0)
mean_errors_metric = np.array(errors_metric).mean(0)

print(".....SCALED WITH GROUND TRUTH......")
print("\n " + ("{:>8} | " * 7).format("abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"))
print(("&{: 8.3f} " * 7).format(*mean_errors.tolist()) + "\\\\")
print("\n\n.....UNSCALED......")
print("\n " + ("{:>8} | " * 7).format("abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"))
print(("&{: 8.3f} " * 7).format(*mean_errors_metric.tolist()) + "\\\\")
print("\n-> Done!")


Expand Down

0 comments on commit 16ddec4

Please sign in to comment.