BibTex
@inproceedings{Wang:2017:10.20380/GI2017.25,
author = {Wang, Shuo-Ping and hen, Mei-Ling and Wang, Hao-Chuan and Lai, Chien-Tung and Huang, Ai-Ju},
title = {De-Identified Feature-based Visualization of Facial Expression for Enhanced Text Chat},
booktitle = {Proceedings of Graphics Interface 2017},
series = {GI 2017},
year = {2017},
issn = {0713-5424},
isbn = {978-0-9947868-2-1},
location = {Edmonton, Alberta},
pages = {199 -- 207},
numpages = {9},
doi = {10.20380/GI2017.25},
publisher = {Canadian Human-Computer Communications Society / Soci{\'e}t{\'e} canadienne du dialogue humain-machine},
}
Abstract
The lack of visibility in text-based chat can hinder communication, especially when nonverbal cues are instrumental to the production and understanding of messages. However, communicating rich nonverbal cues such as facial expressions may be technologically more costly (e.g., demand of bandwidth for video streaming) and socially less desirable (e.g., disclosing other personal and context information through video). We consider how to balance the tension by supporting people to convey facial expressions without compromising the benefits of invisibility in text communication. We present KinChat, an enhanced text chat tool that integrates motion sensing and 2D graphical visualization as a technique to convey information of key facial features during text conversations. We conducted two studies to examine how KinChat influences the de-identification and awareness of facial cues in comparison to other techniques using raw and blurring-processed videos, as well as its impact on real-time text chat. We show that feature-based visualization of facial expression can preserve both awareness of facial cues and non-identifiability at the same time, leading to better understanding and reduced anxiety.