BibTex
@inproceedings{Li:2022:10.20380/GI2022.09,
author = {Li, Zhan and Marshall, Carl S. and Vembar, Deepak S. and Liu, Feng},
title = {Future Frame Synthesis for Fast Monte Carlo Rendering},
booktitle = {Proceedings of Graphics Interface 2022},
series = {GI 2022},
year = {2022},
issn = {0713-5424},
location = {Montr{\'e}al, Quebec},
pages = {74 -- 83},
numpages = {9},
doi = {10.20380/GI2022.09},
publisher = {Canadian Information Processing Society},
}
Abstract
Monte Carlo rendering algorithms can generate high-quality images; however they need to sample many rays per pixel and thus are computationally expensive. In this paper, we present a method to speed up Monte Carlo rendering by significantly reducing the number of pixels that we need to sample rays for. Specifically, we develop a neural future frame synthesis method that quickly predicts future frames from frames that have already been rendered. In each future frame, there are pixels that cannot be predicted correctly from previous frames in challenging scenarios, such as quick camera motion, object motion, and large occlusion. Therefore, our method estimates a mask together with each future frame that indicates the subset of pixels that need ray samples to correct the prediction results. To train and evaluate our neural future frame synthesis method, we develop a large ray-tracing animation dataset. Our experiments show that our method can significantly reduce the number of pixels that we need to render while maintaining high rendering quality.