Title |
Constructing Evaluation Corpora for Automated Clinical Named Entity Recognition |
Authors |
Philip Ogren, Guergana Savova and Christopher Chute |
Abstract |
We report on the construction of a gold-standard dataset consisting of annotated clinical notes suitable for evaluating our biomedical named entity recognition system. The dataset is the result of consensus between four human annotators and contains 1,556 annotations on 160 clinical notes using 658 unique concept codes from SNOMED-CT corresponding to human disorders. Inter-annotator agreement was calculated on annotations from 100 of the documents for span (90.9%), concept code (81.7%), context (84.8%), and status (86.0%) agreement. Complete agreement for span, concept code, context, and status was 74.6%. We found that creating a consensus set based on annotations from two independently-created annotation sets can reduce inter-annotator disagreement by 32.3%. We found little benefit to pre-annotating the corpus with a third-party named entity recognizer. |
Language |
|
Topics |
Corpus (creation, annotation, etc.), Named Entity recognition, Ontologies |
Full paper |
Constructing Evaluation Corpora for Automated Clinical Named Entity Recognition |
Slides |
- |
Bibtex |
@InProceedings{OGREN08.796,
author = {Philip Ogren, Guergana Savova and Christopher Chute},
title = {Constructing Evaluation Corpora for Automated Clinical Named Entity Recognition},
booktitle = {Proceedings of the Sixth International Conference on Language Resources and Evaluation (LREC'08)},
year = {2008},
month = {may},
date = {28-30},
address = {Marrakech, Morocco},
editor = {Nicoletta Calzolari (Conference Chair), Khalid Choukri, Bente Maegaard, Joseph Mariani, Jan Odijk, Stelios Piperidis, Daniel Tapias},
publisher = {European Language Resources Association (ELRA)},
isbn = {2-9517408-4-0},
note = {http://www.lrec-conf.org/proceedings/lrec2008/},
language = {english}
} |