@InProceedings{Tchorz2017_253,
author = {Jürgen Tchorz and Simone Wollermann and Hendrik Husstedt},
booktitle = {Studientexte zur Sprachkommunikation: Elektronische Sprachsignalverarbeitung 2017},
title = {Classification of Environmental Sounds for Future Hearing Aid Applications},
year = {2017},
editor = {Jürgen Trouvain and Ingmar Steiner and Bernd Möbius},
month = mar,
pages = {294--299},
publisher = {TUDpress, Dresden},
abstract = {Different acoustic environments require different hearing aid settings to
achieve best speech understanding and sound quality. Manual adjustment of hearing
aid settings can be annoying. Thus, many hearing aids automatically classify
the acoustic environment and switch between different programs accordingly.
The classification approach presented in this study utilizes so-called amplitude
modulation spectrogram (AMS) as features, which replicate aspects of sound analysis
in the auditory pathway. The AMS patterns represent time intervals of 500
ms each. The classification of the acoustic environment based on these features is
implemented with supervised machine learning using a deep neural network. The
network is trained on features extracted from several hours of sound from different
classes, namely speech, reverberant speech, speech in noise, music, and noise. For
testing, a set of sounds taken from other recordings was processed and classified
by the neural network. For comparison, these sounds were also automatically classified
using hearing aids from five different brands. The results show comparable
classification accuracy with amplitude modulation spectrograms and hearing aids,
respectively. The time which is needed to classify a situation, however, is much
shorter with the amplitude modulation spectrogram-based approach.},
isbn = {978-3-959080-92-7},
issn = {0940-6832},
keywords = {Poster},
url = {https://www.essv.de/pdf/2017_294_299.pdf},
}