
@article{ref1,
title="Designing speech interfaces for command and control applications",
journal="Human factors and aerospace safety",
year="2004",
author="Nelson, W. T. and Vidulich, Michael A. and Bolia, Robert S.",
volume="4",
number="3",
pages="195-207",
abstract="A user-centred approach was employed to guide the design and development of a speech-based interface to control a subset of workstation functions in a simulated Airborne Warning and Control System (AWACS) task environment. Speech interface concepts, and their associated vocabularies, were identified for a set of tasks common to Air Battle Managers (ABMs) and were integrated into a commercially-available situation display installed on a laptop PC. This system constituted the Laptop Speech Interface Demonstrator (LSID), which was used to demonstrate speech interface concepts with ABMs from the United States and Royal Australian Air Forces, and United States Navy. The LSID was also used in a series of user testing sessions in which manual and speech input modes were compared for a series of tasks required of ABMs, including modification of the situation display, customization of the Air Tasking Order (ATO), and management of radar tracks. Results from the user tests and demonstrator walkthroughs were then used to further refine speech interface concepts that were evaluated as part of a simulated AW ACS mission. Toward this end, trained Air Weapons Officers (AWOs) from the USAF participated in a Close Air Support mission conducted in the Air Force Research Laboratory's Multisensory Overview Large-scale Tactical Knowledge Environment (MOLTKE) Lab. Implications of this work and challenges associated with the integration of speech recognition technology into the Air Battle Management domain are discussed.<p />",
language="",
issn="1468-9456",
doi="",
url="http://dx.doi.org/"
}