This paper reports work on automatic analysis of laughter and human body movements in a video corpus of human- human dialogues. We use the Nordic First Encounters video corpus collected in situations where the participants make acquaintance with each other for the rst time. This cor- pus has manual annotations of the participants' head, hand and body movements as well as laughter occurrences. We employ machine learning methods to analyse the corpus au- tomatically using two types of features: visual video features that describe bounding boxes around the dialogue partici- pants' heads and bodies, based on automatically detecting body movements in the video, and audio speech features based on the speech signals related to the participants' spo- ken contributions.
@InProceedings{JOKINEN18.7, author = {Kristiina Jokinen and Trung Ngo Trong}, title = {Laughter and Body Movements as Communicative Actions in Encounters}, booktitle = {Proceedings of the Eleventh International Conference on Language Resources and Evaluation (LREC 2018)}, year = {2018}, month = {may}, date = {7-12}, location = {Miyazaki, Japan}, editor = {James Pustejovsky and Ielka van der Sluis}, publisher = {European Language Resources Association (ELRA)}, address = {Paris, France}, isbn = {979-10-95546-06-1 }, language = {english} }