Commercial web search engines employ near-duplicate detection to ensure that users see each relevant result only once, albeit the underlying web crawls typically include (near-)duplicates of many web pages. We revisit the risks and potential of near-duplicates with an information retrieval focus, motivating that current efforts toward an open and independent European web search infrastructure should maintain metadata on duplicate and near-duplicate documents in its index. Near-duplicate detection implemented in an open web search infrastructure should provide a suitable similarity threshold, a difficult choice since identical pages may substantially differ in parts of a page that are irrelevant to searchers (templates, advertisements, etc.). We study this problem by comparing the similarity of pages for five (main) content extraction methods in two studies on the ClueWeb crawls. We find that the full content of pages serves precision-oriented near-duplicate-detection, while main content extraction is more recall-oriented.
%0 Journal Article
%1 Frobe2021-wk
%A Fröbe, Maik
%A Hagen, Matthias
%A Bevendorff, Janek
%A Völske, Michael
%A Stein, Benno
%A Schröder, Christopher
%A Wagner, Robby
%A Gienapp, Lukas
%A Potthast, Martin
%D 2021
%I arXiv
%K
%T The impact of main content extraction on near-duplicate detection
%X Commercial web search engines employ near-duplicate detection to ensure that users see each relevant result only once, albeit the underlying web crawls typically include (near-)duplicates of many web pages. We revisit the risks and potential of near-duplicates with an information retrieval focus, motivating that current efforts toward an open and independent European web search infrastructure should maintain metadata on duplicate and near-duplicate documents in its index. Near-duplicate detection implemented in an open web search infrastructure should provide a suitable similarity threshold, a difficult choice since identical pages may substantially differ in parts of a page that are irrelevant to searchers (templates, advertisements, etc.). We study this problem by comparing the similarity of pages for five (main) content extraction methods in two studies on the ClueWeb crawls. We find that the full content of pages serves precision-oriented near-duplicate-detection, while main content extraction is more recall-oriented.
@article{Frobe2021-wk,
abstract = {Commercial web search engines employ near-duplicate detection to ensure that users see each relevant result only once, albeit the underlying web crawls typically include (near-)duplicates of many web pages. We revisit the risks and potential of near-duplicates with an information retrieval focus, motivating that current efforts toward an open and independent European web search infrastructure should maintain metadata on duplicate and near-duplicate documents in its index. Near-duplicate detection implemented in an open web search infrastructure should provide a suitable similarity threshold, a difficult choice since identical pages may substantially differ in parts of a page that are irrelevant to searchers (templates, advertisements, etc.). We study this problem by comparing the similarity of pages for five (main) content extraction methods in two studies on the ClueWeb crawls. We find that the full content of pages serves precision-oriented near-duplicate-detection, while main content extraction is more recall-oriented.},
added-at = {2024-09-10T11:56:37.000+0200},
author = {Fr{\"o}be, Maik and Hagen, Matthias and Bevendorff, Janek and V{\"o}lske, Michael and Stein, Benno and Schr{\"o}der, Christopher and Wagner, Robby and Gienapp, Lukas and Potthast, Martin},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/2cd752e8f2628be9311c41c4d3495c31a/scadsfct},
interhash = {0dc0ab72a3f87acb2cf88cf554558056},
intrahash = {cd752e8f2628be9311c41c4d3495c31a},
keywords = {},
publisher = {arXiv},
timestamp = {2024-09-10T15:15:57.000+0200},
title = {The impact of main content extraction on near-duplicate detection},
year = 2021
}