We propose to use image captions from the Web as a previously underutilized resource for paraphrases (i.e., texts with the same ``message'') and to create and analyze a corresponding dataset. When an image is reused on the Web, an original caption is often assigned. We hypothesize that different captions for the same image naturally form a set of mutual paraphrases. To demonstrate the suitability of this idea, we analyze captions in the English Wikipedia, where editors frequently relabel the same image for different articles. The paper introduces the underlying mining technology, the resulting Wikipedia-IPC dataset, and compares known paraphrase corpora with respect to their syntactic and semantic paraphrase similarity to our new resource. In this context, we introduce characteristic maps along the two similarity dimensions to identify the style of paraphrases coming from different sources. An annotation study demonstrates the high reliability of the algorithmically determined characteristic maps.
%0 Journal Article
%1 Gohsen2023-yh
%A Gohsen, Marcel
%A Hagen, Matthias
%A Potthast, Martin
%A Stein, Benno
%D 2023
%I arXiv
%K
%T Paraphrase Acquisition from Image Captions
%X We propose to use image captions from the Web as a previously underutilized resource for paraphrases (i.e., texts with the same ``message'') and to create and analyze a corresponding dataset. When an image is reused on the Web, an original caption is often assigned. We hypothesize that different captions for the same image naturally form a set of mutual paraphrases. To demonstrate the suitability of this idea, we analyze captions in the English Wikipedia, where editors frequently relabel the same image for different articles. The paper introduces the underlying mining technology, the resulting Wikipedia-IPC dataset, and compares known paraphrase corpora with respect to their syntactic and semantic paraphrase similarity to our new resource. In this context, we introduce characteristic maps along the two similarity dimensions to identify the style of paraphrases coming from different sources. An annotation study demonstrates the high reliability of the algorithmically determined characteristic maps.
@article{Gohsen2023-yh,
abstract = {We propose to use image captions from the Web as a previously underutilized resource for paraphrases (i.e., texts with the same ``message'') and to create and analyze a corresponding dataset. When an image is reused on the Web, an original caption is often assigned. We hypothesize that different captions for the same image naturally form a set of mutual paraphrases. To demonstrate the suitability of this idea, we analyze captions in the English Wikipedia, where editors frequently relabel the same image for different articles. The paper introduces the underlying mining technology, the resulting Wikipedia-IPC dataset, and compares known paraphrase corpora with respect to their syntactic and semantic paraphrase similarity to our new resource. In this context, we introduce characteristic maps along the two similarity dimensions to identify the style of paraphrases coming from different sources. An annotation study demonstrates the high reliability of the algorithmically determined characteristic maps.},
added-at = {2024-09-10T10:41:24.000+0200},
author = {Gohsen, Marcel and Hagen, Matthias and Potthast, Martin and Stein, Benno},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/28de44815e4f3b9c0413c1804db494d26/scadsfct},
interhash = {1d2727f81375b411fc3d10ed33434f72},
intrahash = {8de44815e4f3b9c0413c1804db494d26},
keywords = {},
publisher = {arXiv},
timestamp = {2024-09-10T10:47:32.000+0200},
title = {Paraphrase Acquisition from Image Captions},
year = 2023
}