forked from awesome-NeRF/awesome-NeRF
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request awesome-NeRF#99 from lhy0807/main
Add two robotics works
- Loading branch information
Showing
3 changed files
with
31 additions
and
3 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
@InProceedings{pmlr-v164-li22a, | ||
title = {3D Neural Scene Representations for Visuomotor Control}, | ||
author = {Li, Yunzhu and Li, Shuang and Sitzmann, Vincent and Agrawal, Pulkit and Torralba, Antonio}, | ||
booktitle = {Proceedings of the 5th Conference on Robot Learning}, | ||
pages = {112--123}, | ||
year = {2022}, | ||
editor = {Faust, Aleksandra and Hsu, David and Neumann, Gerhard}, | ||
volume = {164}, | ||
series = {Proceedings of Machine Learning Research}, | ||
month = {08--11 Nov}, | ||
publisher = {PMLR}, | ||
pdf = {https://proceedings.mlr.press/v164/li22a/li22a.pdf}, | ||
url = {https://proceedings.mlr.press/v164/li22a.html}, | ||
abstract = {Humans have a strong intuitive understanding of the 3D environment around us. The mental model of the physics in our brain applies to objects of different materials and enables us to perform a wide range of manipulation tasks that are far beyond the reach of current robots. In this work, we desire to learn models for dynamic 3D scenes purely from 2D visual observations. Our model combines Neural Radiance Fields (NeRF) and time contrastive learning with an autoencoding framework, which learns viewpoint-invariant 3D-aware scene representations. We show that a dynamics model, constructed over the learned representation space, enables visuomotor control for challenging manipulation tasks involving both rigid bodies and fluids, where the target is specified in a viewpoint different from what the robot operates on. When coupled with an auto-decoding framework, it can even support goal specification from camera viewpoints that are outside the training distribution. We further demonstrate the richness of the learned 3D dynamics model by performing future prediction and novel view synthesis. Finally, we provide detailed ablation studies regarding different system designs and qualitative analysis of the learned representations.} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
@ARTICLE{9712211, | ||
author={Adamkiewicz, Michal and Chen, Timothy and Caccavale, Adam and Gardner, Rachel and Culbertson, Preston and Bohg, Jeannette and Schwager, Mac}, | ||
journal={IEEE Robotics and Automation Letters}, | ||
title={Vision-Only Robot Navigation in a Neural Radiance World}, | ||
year={2022}, | ||
volume={7}, | ||
number={2}, | ||
pages={4606-4613}, | ||
doi={10.1109/LRA.2022.3150497}} |