This paper introduces the Webis Gmane Email Corpus 2019, the largest publicly available and fully preprocessed email corpus to date. We crawled more than 153 million emails from 14,699 mailing lists and segmented them into semantically consistent components using a new neural segmentation model. With 96\% accuracy on 15 classes of email segments, our model achieves state-of-the-art performance while being more efficient to train than previous ones. All data, code, and trained models are made freely available alongside the paper.
%0 Conference Paper
%1 bevendorff-etal-2020-crawling
%A Bevendorff, Janek
%A Al Khatib, Khalid
%A Potthast, Martin
%A Stein, Benno
%B Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics
%C Online
%D 2020
%E Jurafsky, Dan
%E Chai, Joyce
%E Schluter, Natalie
%E Tetreault, Joel
%I Association for Computational Linguistics
%K imported
%P 1151--1158
%R 10.18653/v1/2020.acl-main.108
%T Crawling and Preprocessing Mailing Lists At Scale for Dialog Analysis
%U https://aclanthology.org/2020.acl-main.108
%X This paper introduces the Webis Gmane Email Corpus 2019, the largest publicly available and fully preprocessed email corpus to date. We crawled more than 153 million emails from 14,699 mailing lists and segmented them into semantically consistent components using a new neural segmentation model. With 96\% accuracy on 15 classes of email segments, our model achieves state-of-the-art performance while being more efficient to train than previous ones. All data, code, and trained models are made freely available alongside the paper.
@inproceedings{bevendorff-etal-2020-crawling,
abstract = {This paper introduces the Webis Gmane Email Corpus 2019, the largest publicly available and fully preprocessed email corpus to date. We crawled more than 153 million emails from 14,699 mailing lists and segmented them into semantically consistent components using a new neural segmentation model. With 96{\%} accuracy on 15 classes of email segments, our model achieves state-of-the-art performance while being more efficient to train than previous ones. All data, code, and trained models are made freely available alongside the paper.},
added-at = {2024-10-02T10:38:17.000+0200},
address = {Online},
author = {Bevendorff, Janek and Al Khatib, Khalid and Potthast, Martin and Stein, Benno},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/24267962ffcef4787f7931b8870f09c69/scadsfct},
booktitle = {Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics},
doi = {10.18653/v1/2020.acl-main.108},
editor = {Jurafsky, Dan and Chai, Joyce and Schluter, Natalie and Tetreault, Joel},
interhash = {3b8b59e8ab17f7f002e5fc224aa08d2a},
intrahash = {4267962ffcef4787f7931b8870f09c69},
keywords = {imported},
month = jul,
pages = {1151--1158},
publisher = {Association for Computational Linguistics},
timestamp = {2024-10-02T10:38:17.000+0200},
title = {Crawling and Preprocessing Mailing Lists At Scale for Dialog Analysis},
url = {https://aclanthology.org/2020.acl-main.108},
year = 2020
}