BibTex
@inproceedings{Lewis:1987:10.1145/29933.30874,
author = {Lewis, J. and Parke, F.},
title = {Automated lip-synch and speech synthesis for character animation},
booktitle = {Proceedings of the SIGCHI/GI Conference on Human Factors in Computing Systems and Graphics Interface},
series = {GI + CHI 1987},
year = {1987},
issn = {0713-5425},
isbn = {0-89791-213-6},
location = {Toronto, Ontario, Canada},
pages = {143--147},
numpages = {5},
doi = {10.1145/29933.30874},
acmdoi = {10.1145/29933.30874},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
}
Abstract
An automated method of synchronizing facial animation to recorded speech is described. In this method, a common speech synthesis method (linear prediction) is adapted to provide simple and accurate phoneme recognition. The recognized phonemes are then associated with mouth positions to provide keyframes for computer animation of speech using a parametric model of the human face. The linear prediction software, once implemented, can also be used for speech resynthesis. The synthesis retains intelligibility and natural speech rhythm while achieving a “synthetic realism” consistent with computer animation. Speech synthesis also enables certain useful manipulations for the purpose of computer character animation.