@inproceedings{703c679d5a8a48c0aa3a5bd4ad73c7c0,
title = "Estimation and analysis of facial animation parameter patterns",
abstract = "We propose a framework for estimation and analysis of temporal facial expression patterns of a speaker. The proposed system aims to learn personalized elementary dynamic facial expression patterns for a particular speaker. We use head-and-shoulder stereo video sequences to track lip, eye, eyebrow, and eyelid motion of a speaker in 3D. MPEG-4 Facial Definition Parameters (FDPs) are used as the feature set. and temporal facial expression patterns are represented by the MPEG-4 Facial Animation Parameters (FAPs). We perform Hidden Markov Model (HMM) based unsupervised temporal segmentation of upper and lower facial expression features separately to determine recurrent elementary facial expression patterns for a particular speaker. These facial expression patterns coded by FAP sequences, which may not be tied with prespecified emotions, can be used for personalized emotion estimation and synthesis of a speaker. Experimental results are presented.",
keywords = "Dynamic facial expression analysis, Temporal patterns",
author = "Ferda Ofli and Engin Erzin and Yucel Yemez and Tekalp, {A. Murat}",
year = "2007",
doi = "10.1109/ICIP.2007.4380012",
language = "English",
isbn = "1424414377",
series = "Proceedings - International Conference on Image Processing, ICIP",
publisher = "IEEE Computer Society",
pages = "293--296",
booktitle = "2007 IEEE International Conference on Image Processing, ICIP 2007 Proceedings",
address = "United States",
note = "14th IEEE International Conference on Image Processing, ICIP 2007 ; Conference date: 16-09-2007 Through 19-09-2007",
}