Video
BibTex
@inproceedings{Prithul:2021:10.20380/GI2021.24,
author = {Prithul, Aniruddha and Adhanom, Isayas Berhe and Folmer, Eelke},
title = {Embodied Third-Person Virtual Locomotion using a Single Depth Camera},
booktitle = {Proceedings of Graphics Interface 2021},
series = {GI 2021},
year = {2021},
issn = {0713-5424},
isbn = {978-0-9947868-6-9},
location = {Virtual Event},
pages = {210 -- 219},
numpages = {10},
doi = {10.20380/GI2021.24},
publisher = {Canadian Information Processing Society},
}
Abstract
Third-person is a popular perspective for video games, but virtual reality (VR) seems to be primarily experienced from a first-person point of view (POV). While a first-person POV generally offers the highest presence; a third-person POV allows users to see their avatar; which allows for a better bond, and the higher vantage point generally increases spatial awareness and navigation. Third-person locomotion is generally implemented using a controller or keyboard, with users often sitting down; an approach that is considered to offer a low presence and embodiment. We present a novel thirdperson locomotion method that enables a high avatar embodiment by integrating skeletal tracking with head-tilt based input to enable omnidirectional navigation beyond the confines of available tracking space. By interpreting movement relative to an avatar, the user will always keep facing the camera which optimizes skeletal tracking and keeps required instrumentation minimal (1 depth camera). A user study compares the performance, usability, VR sickness incidence and avatar embodiment of our method to using a controller for a navigation task that involves interacting with objects. Though a controller offers a higher performance and usability, our locomotion method offered a significantly higher avatar embodiment.