Annotation processes in the field of computational linguistics and digital humanities are usually carried out using two-dimensional tools, whether web-based or not. They allow users to add annotations on a desktop using the familiar keyboard and mouse interfaces. This imposes limitations on the way annotation objects are manipulated and interrelated. To overcome these limitations and to draw on gestures and body movements as triggering actions of the annotation process, we introduce VAnnotator, a virtual system for annotating linguistic and multimodal objects. Based on VR glasses and Unity3D, it allows for annotaing a wide range of homogenous and heterogenous relations. We exemplify VAnnotator by example of annotating propositional content and carry out a comparative study in which we evaluate VAnnotator in relation to WebAnno. Our evaluation shows that action-based annotations of textual and multimodal objects as an alternative to classic 2D tools are within reach.
@InProceedings{SPIEKERMANN18.2, author = {Christian Spiekermann ,Giuseppe Abrami and Alexander Mehler}, title = {VAnnotator: a Gesture-driven Annotation Framework for Linguistic and Multimodal Annotation}, booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)}, year = {2018}, month = {may}, date = {7-12}, location = {Miyazaki, Japan}, editor = {James Pustejovsky and Ielka van der Sluis}, publisher = {European Language Resources Association (ELRA)}, address = {Paris, France}, isbn = {979-10-95546-06-1 }, language = {english} }