The BigCode project is an open-scientific collaboration working on the responsible development of large language models for code. This tech report describes the progress of the collaboration until December 2022, outlining the current state of the Personally Identifiable Information (PII) redaction pipeline, the experiments conducted to de-risk the model architecture, and the experiments investigating better preprocessing methods for the training data. We train 1.1B parameter models on the Java, JavaScript, and Python subsets of The Stack and evaluate them on the MultiPL-E text-to-code benchmark. We find that more aggressive filtering of near-duplicates can further boost performance and, surprisingly, that selecting files from repositories with 5+ GitHub stars deteriorates performance significantly. Our best model outperforms previous open-source multilingual code generation models (InCoder-6.7B and CodeGen-Multi-2.7B) in both left-to-right generation and infilling on the Java, JavaScript, and Python portions of MultiPL-E, despite being a substantially smaller model. All models are released under an OpenRAIL license at https://hf.co/bigcode.
%0 Journal Article
%1 Allal2023-az
%A Allal, Loubna Ben
%A Li, Raymond
%A Kocetkov, Denis
%A Mou, Chenghao
%A Akiki, Christopher
%A Ferrandis, Carlos Munoz
%A Muennighoff, Niklas
%A Mishra, Mayank
%A Gu, Alex
%A Dey, Manan
%A Umapathi, Logesh Kumar
%A Anderson, Carolyn Jane
%A Zi, Yangtian
%A Poirier, Joel Lamy
%A Schoelkopf, Hailey
%A Troshin, Sergey
%A Abulkhanov, Dmitry
%A Romero, Manuel
%A Lappert, Michael
%A De Toni, Francesco
%A del R\'ıo, Bernardo Garc\'ıa
%A Liu, Qian
%A Bose, Shamik
%A Bhattacharyya, Urvashi
%A Zhuo, Terry Yue
%A Yu, Ian
%A Villegas, Paulo
%A Zocca, Marco
%A Mangrulkar, Sourab
%A Lansky, David
%A Nguyen, Huu
%A Contractor, Danish
%A Villa, Luis
%A Li, Jia
%A Bahdanau, Dzmitry
%A Jernite, Yacine
%A Hughes, Sean
%A Fried, Daniel
%A Guha, Arjun
%A de Vries, Harm
%A von Werra, Leandro
%D 2023
%I arXiv
%K topic_language
%T SantaCoder: don't reach for the stars!
%X The BigCode project is an open-scientific collaboration working on the responsible development of large language models for code. This tech report describes the progress of the collaboration until December 2022, outlining the current state of the Personally Identifiable Information (PII) redaction pipeline, the experiments conducted to de-risk the model architecture, and the experiments investigating better preprocessing methods for the training data. We train 1.1B parameter models on the Java, JavaScript, and Python subsets of The Stack and evaluate them on the MultiPL-E text-to-code benchmark. We find that more aggressive filtering of near-duplicates can further boost performance and, surprisingly, that selecting files from repositories with 5+ GitHub stars deteriorates performance significantly. Our best model outperforms previous open-source multilingual code generation models (InCoder-6.7B and CodeGen-Multi-2.7B) in both left-to-right generation and infilling on the Java, JavaScript, and Python portions of MultiPL-E, despite being a substantially smaller model. All models are released under an OpenRAIL license at https://hf.co/bigcode.
@article{Allal2023-az,
abstract = {The BigCode project is an open-scientific collaboration working on the responsible development of large language models for code. This tech report describes the progress of the collaboration until December 2022, outlining the current state of the Personally Identifiable Information (PII) redaction pipeline, the experiments conducted to de-risk the model architecture, and the experiments investigating better preprocessing methods for the training data. We train 1.1B parameter models on the Java, JavaScript, and Python subsets of The Stack and evaluate them on the MultiPL-E text-to-code benchmark. We find that more aggressive filtering of near-duplicates can further boost performance and, surprisingly, that selecting files from repositories with 5+ GitHub stars deteriorates performance significantly. Our best model outperforms previous open-source multilingual code generation models (InCoder-6.7B and CodeGen-Multi-2.7B) in both left-to-right generation and infilling on the Java, JavaScript, and Python portions of MultiPL-E, despite being a substantially smaller model. All models are released under an OpenRAIL license at https://hf.co/bigcode.},
added-at = {2024-09-10T10:41:24.000+0200},
author = {Allal, Loubna Ben and Li, Raymond and Kocetkov, Denis and Mou, Chenghao and Akiki, Christopher and Ferrandis, Carlos Munoz and Muennighoff, Niklas and Mishra, Mayank and Gu, Alex and Dey, Manan and Umapathi, Logesh Kumar and Anderson, Carolyn Jane and Zi, Yangtian and Poirier, Joel Lamy and Schoelkopf, Hailey and Troshin, Sergey and Abulkhanov, Dmitry and Romero, Manuel and Lappert, Michael and De Toni, Francesco and del R{\'\i}o, Bernardo Garc{\'\i}a and Liu, Qian and Bose, Shamik and Bhattacharyya, Urvashi and Zhuo, Terry Yue and Yu, Ian and Villegas, Paulo and Zocca, Marco and Mangrulkar, Sourab and Lansky, David and Nguyen, Huu and Contractor, Danish and Villa, Luis and Li, Jia and Bahdanau, Dzmitry and Jernite, Yacine and Hughes, Sean and Fried, Daniel and Guha, Arjun and de Vries, Harm and von Werra, Leandro},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/2296b06ded09f5228062e7b2b19b8e8be/scadsfct},
interhash = {83695ec04a1953e55c047a9f12f6aff9},
intrahash = {296b06ded09f5228062e7b2b19b8e8be},
keywords = {topic_language},
publisher = {arXiv},
timestamp = {2024-11-22T15:46:49.000+0100},
title = {{SantaCoder}: don't reach for the stars!},
year = 2023
}