Title |
Crowdsourcing as a Preprocessing for Complex Semantic Annotation Tasks |
Authors |
Héctor Martínez Alonso and Lauren Romeo |
Abstract |
This article outlines a methodology that uses crowdsourcing to reduce the workload of experts for complex semantic tasks. We split turker-annotated datasets into a high-agreement block, which is not modified, and a low-agreement block, which is re-annotated by experts. The resulting annotations have higher observed agreement. We identify different biases in the annotation for both turkers and experts. |
Topics |
Corpus (Creation, Annotation, etc.), Crowdsourcing |
Full paper |
Crowdsourcing as a Preprocessing for Complex Semantic Annotation Tasks |
Bibtex |
@InProceedings{MARTNEZALONSO14.471,
author = {Héctor Martínez Alonso and Lauren Romeo}, title = {Crowdsourcing as a Preprocessing for Complex Semantic Annotation Tasks}, booktitle = {Proceedings of the Ninth International Conference on Language Resources and Evaluation (LREC'14)}, year = {2014}, month = {may}, date = {26-31}, address = {Reykjavik, Iceland}, editor = {Nicoletta Calzolari (Conference Chair) and Khalid Choukri and Thierry Declerck and Hrafn Loftsson and Bente Maegaard and Joseph Mariani and Asuncion Moreno and Jan Odijk and Stelios Piperidis}, publisher = {European Language Resources Association (ELRA)}, isbn = {978-2-9517408-8-4}, language = {english} } |