@inproceedings{ea620b982c524645aa0e52129d2171d7,
title = "Combining speech recognition and acoustic word emotion models for robust text-independent emotion recognition",
abstract = "Recognition of emotion in speech usually uses acoustic models that ignore the spoken content. Likewise one general model per emotion is trained independent of the phonetic structure. Given sufficient data, this approach seemingly works well enough. Yet, this paper tries to answer the question whether acoustic emotion recognition strongly depends on phonetic content, and if models tailored for the spoken unit can lead to higher accuracies. We therefore investigate phoneme-, and word-models by use of a large prosodic, spectral, and voice quality feature space and Support Vector Machines (SVM). Experiments also take the necessity of ASR into account to select appropriate unitmodels. Test-runs on the well-known EMO-DB database facing speaker-independence demonstrate superiority of word emotion models over today's common general models provided sufficient occurrences in the training corpus.",
keywords = "Acoustic modeling, Affective speech, Emotion recognition, Word models",
author = "Bj{\"o}rn Schuller and Bogdan Vlasenko and Dejan Arsic and Gerhard Rigoll and Andreas Wendemuth",
year = "2008",
doi = "10.1109/ICME.2008.4607689",
language = "English",
isbn = "9781424425716",
series = "2008 IEEE International Conference on Multimedia and Expo, ICME 2008 - Proceedings",
pages = "1333--1336",
booktitle = "2008 IEEE International Conference on Multimedia and Expo, ICME 2008 - Proceedings",
note = "2008 IEEE International Conference on Multimedia and Expo, ICME 2008 ; Conference date: 23-06-2008 Through 26-06-2008",
}