@InProceedings{Sowa2018_419,
author = {Timo Sowa and Alexander Richter and Dietmar Fey},
booktitle = {Studientexte zur Sprachkommunikation: Elektronische Sprachsignalverarbeitung 2018},
title = {A Toolkit for 3D-Gesture and Speech Dialog in Automotive Environments},
year = {2018},
editor = {André Berton and Udo Haiber and Wolfgang Minker},
month = mar,
pages = {285--292},
publisher = {TUDpress, Dresden},
abstract = {3D-gesture is a new input modality that complements touch-screens and
speech dialog systems for automotive infotainment systems. Yet, tools for im-
plementing multimodal interfaces including 3D-gestures are rare. We present a
toolkit for in-car human-machine interfaces (HMIs), that allows modeling 3D-
gesture recognition and integration of gestures with speech. Our approach focuses
on three interaction styles: Firstly, we demonstrate how dialogs containing isolated
gestures, sequences of speech and gesture, and alternative uses of different modal-
ities can be modeled based on the state graph paradigm. Secondly, we show how
continuous gestural interaction, such as gradually modifying a value with continu-
ous hand movements, can be designed by binding hand model parameters to HMI
model values. Thirdly, we present an approach toward modeling and processing
of multimodal utterances with semantics distributed over speech and 3D-gesture.
The approach is based on a semantic representation of the individual modalities as
typed feature structures which are collected in a short-term memory. An integrator
module unifies partial meanings until a fully specified command is created which
can be executed. The approach has been implemented in a demonstration system
using a Leap Motion controller.},
isbn = {978-3-959081-28-3},
issn = {0940-6832},
keywords = {Demo Session},
url = {https://www.essv.de/pdf/2018_285_292.pdf},
}