BibTex
@inproceedings{Havaldar:1996:10.20380/GI1996.08,
author = {Havaldar, Parag and Lee, Mi-Suen and Medioni, G{\'e}rard},
title = {View synthesis from unregistered 2-D images},
booktitle = {Proceedings of Graphics Interface '96},
series = {GI 1996},
year = {1996},
issn = {0713-5424},
isbn = {0-9695338-5-3},
location = {Toronto, Ontario, Canada},
pages = {61--69},
numpages = {9},
doi = {10.20380/GI1996.08},
publisher = {Canadian Human-Computer Communications Society},
address = {Toronto, Ontario, Canada},
}
Abstract
Synthesizing the image of a 3-D scene as it would be captured by a camera from an arbitrary viewpoint is a central problem in Computer Graphics. Given a complete 3-D model, it is possible to render the scene from any viewpoint. The construction of models is a tedious task. Here, we propose to bypass the model construction phase altogether, and to generate images of a 3-D scene from any novel viewpoint from prestored images. Unlike methods presented so far, we propose to completely avoid inferring and reasoning in 3-D by using projective invariants. These invariants are derived from corresponding points in the prestored images. The correspondences between features are established off-line in a semi-automated way. It is then possible to generate wireframe animation in real time on a standard computing platform. Well understood texture mapping methods can be applied to the wireframes to realistically render new images from the prestored ones. The method proposed here should allow the integration of computer generated and real imagery for applications such as walkthroughs in realistic virtual environments. We illustrate our approach on synthetic and real indoor and outdoor images.