@inproceedings{83c39f7fbc5243c4a3d8a87c580878c0,
title = "Augmenting affect from speech with generative music",
abstract = "In this work we propose a prototype to improve interpersonal communication of emotions. Therefore music is generated with the same affect as when humans talk on the y. Emotions in speech are detected and conveyed to music according to music psychological rules. Existing evaluated modules from affective generative music and speech emotion detection, use cases, emotional models and projected evaluations are discussed.",
keywords = "Affective computing, Circumplex model, Emotion recognition, Generative music, Speech analysis",
author = "Hagerer, {Gerhard Johann} and Michael Lux and Stefan Ehrlich and Gordon Cheng",
year = "2015",
month = apr,
day = "18",
doi = "10.1145/2702613.2732792",
language = "English",
series = "Conference on Human Factors in Computing Systems - Proceedings",
publisher = "Association for Computing Machinery",
pages = "977--982",
booktitle = "CHI 2015 - Extended Abstracts Publication of the 33rd Annual CHI Conference on Human Factors in Computing Systems",
note = "33rd Annual CHI Conference on Human Factors in Computing Systems, CHI EA 2015 ; Conference date: 18-04-2015 Through 23-04-2015",
}