The BigCode community, an open-scientific collaboration working on the responsible development of Large Language Models for Code (Code LLMs), introduces StarCoder and StarCoderBase: 15.5B parameter models with 8K context length, infilling capabilities and fast large-batch inference enabled by multi-query attention. StarCoderBase is trained on 1 trillion tokens sourced from The Stack, a large collection of permissively licensed GitHub repositories with inspection tools and an opt-out process. We fine-tuned StarCoderBase on 35B Python tokens, resulting in the creation of StarCoder. We perform the most comprehensive evaluation of Code LLMs to date and show that StarCoderBase outperforms every open Code LLM that supports multiple programming languages and matches or outperforms the OpenAI code-cushman-001 model. Furthermore, StarCoder outperforms every model that is fine-tuned on Python, can be prompted to achieve 40\% pass@1 on HumanEval, and still retains its performance on other programming languages. We take several important steps towards a safe open-access model release, including an improved PII redaction pipeline and a novel attribution tracing tool, and make the StarCoder models publicly available under a more commercially viable version of the Open Responsible AI Model license.
%0 Journal Article
%1 Li2023-nq
%A Li, Raymond
%A Allal, Loubna Ben
%A Zi, Yangtian
%A Muennighoff, Niklas
%A Kocetkov, Denis
%A Mou, Chenghao
%A Marone, Marc
%A Akiki, Christopher
%A Li, Jia
%A Chim, Jenny
%A Liu, Qian
%A Zheltonozhskii, Evgenii
%A Zhuo, Terry Yue
%A Wang, Thomas
%A Dehaene, Olivier
%A Davaadorj, Mishig
%A Lamy-Poirier, Joel
%A Monteiro, João
%A Shliazhko, Oleh
%A Gontier, Nicolas
%A Meade, Nicholas
%A Zebaze, Armel
%A Yee, Ming-Ho
%A Umapathi, Logesh Kumar
%A Zhu, Jian
%A Lipkin, Benjamin
%A Oblokulov, Muhtasham
%A Wang, Zhiruo
%A Murthy, Rudra
%A Stillerman, Jason
%A Patel, Siva Sankalp
%A Abulkhanov, Dmitry
%A Zocca, Marco
%A Dey, Manan
%A Zhang, Zhihan
%A Fahmy, Nour
%A Bhattacharyya, Urvashi
%A Yu, Wenhao
%A Singh, Swayam
%A Luccioni, Sasha
%A Villegas, Paulo
%A Kunakov, Maxim
%A Zhdanov, Fedor
%A Romero, Manuel
%A Lee, Tony
%A Timor, Nadav
%A Ding, Jennifer
%A Schlesinger, Claire
%A Schoelkopf, Hailey
%A Ebert, Jan
%A Dao, Tri
%A Mishra, Mayank
%A Gu, Alex
%A Robinson, Jennifer
%A Anderson, Carolyn Jane
%A Dolan-Gavitt, Brendan
%A Contractor, Danish
%A Reddy, Siva
%A Fried, Daniel
%A Bahdanau, Dzmitry
%A Jernite, Yacine
%A Ferrandis, Carlos Muñoz
%A Hughes, Sean
%A Wolf, Thomas
%A Guha, Arjun
%A von Werra, Leandro
%A de Vries, Harm
%D 2023
%I arXiv
%K topic_language
%T StarCoder: may the source be with you!
%X The BigCode community, an open-scientific collaboration working on the responsible development of Large Language Models for Code (Code LLMs), introduces StarCoder and StarCoderBase: 15.5B parameter models with 8K context length, infilling capabilities and fast large-batch inference enabled by multi-query attention. StarCoderBase is trained on 1 trillion tokens sourced from The Stack, a large collection of permissively licensed GitHub repositories with inspection tools and an opt-out process. We fine-tuned StarCoderBase on 35B Python tokens, resulting in the creation of StarCoder. We perform the most comprehensive evaluation of Code LLMs to date and show that StarCoderBase outperforms every open Code LLM that supports multiple programming languages and matches or outperforms the OpenAI code-cushman-001 model. Furthermore, StarCoder outperforms every model that is fine-tuned on Python, can be prompted to achieve 40\% pass@1 on HumanEval, and still retains its performance on other programming languages. We take several important steps towards a safe open-access model release, including an improved PII redaction pipeline and a novel attribution tracing tool, and make the StarCoder models publicly available under a more commercially viable version of the Open Responsible AI Model license.
@article{Li2023-nq,
abstract = {The BigCode community, an open-scientific collaboration working on the responsible development of Large Language Models for Code (Code LLMs), introduces StarCoder and StarCoderBase: 15.5B parameter models with 8K context length, infilling capabilities and fast large-batch inference enabled by multi-query attention. StarCoderBase is trained on 1 trillion tokens sourced from The Stack, a large collection of permissively licensed GitHub repositories with inspection tools and an opt-out process. We fine-tuned StarCoderBase on 35B Python tokens, resulting in the creation of StarCoder. We perform the most comprehensive evaluation of Code LLMs to date and show that StarCoderBase outperforms every open Code LLM that supports multiple programming languages and matches or outperforms the OpenAI code-cushman-001 model. Furthermore, StarCoder outperforms every model that is fine-tuned on Python, can be prompted to achieve 40\% pass@1 on HumanEval, and still retains its performance on other programming languages. We take several important steps towards a safe open-access model release, including an improved PII redaction pipeline and a novel attribution tracing tool, and make the StarCoder models publicly available under a more commercially viable version of the Open Responsible AI Model license.},
added-at = {2024-09-10T10:41:24.000+0200},
author = {Li, Raymond and Allal, Loubna Ben and Zi, Yangtian and Muennighoff, Niklas and Kocetkov, Denis and Mou, Chenghao and Marone, Marc and Akiki, Christopher and Li, Jia and Chim, Jenny and Liu, Qian and Zheltonozhskii, Evgenii and Zhuo, Terry Yue and Wang, Thomas and Dehaene, Olivier and Davaadorj, Mishig and Lamy-Poirier, Joel and Monteiro, Jo{\~a}o and Shliazhko, Oleh and Gontier, Nicolas and Meade, Nicholas and Zebaze, Armel and Yee, Ming-Ho and Umapathi, Logesh Kumar and Zhu, Jian and Lipkin, Benjamin and Oblokulov, Muhtasham and Wang, Zhiruo and Murthy, Rudra and Stillerman, Jason and Patel, Siva Sankalp and Abulkhanov, Dmitry and Zocca, Marco and Dey, Manan and Zhang, Zhihan and Fahmy, Nour and Bhattacharyya, Urvashi and Yu, Wenhao and Singh, Swayam and Luccioni, Sasha and Villegas, Paulo and Kunakov, Maxim and Zhdanov, Fedor and Romero, Manuel and Lee, Tony and Timor, Nadav and Ding, Jennifer and Schlesinger, Claire and Schoelkopf, Hailey and Ebert, Jan and Dao, Tri and Mishra, Mayank and Gu, Alex and Robinson, Jennifer and Anderson, Carolyn Jane and Dolan-Gavitt, Brendan and Contractor, Danish and Reddy, Siva and Fried, Daniel and Bahdanau, Dzmitry and Jernite, Yacine and Ferrandis, Carlos Mu{\~n}oz and Hughes, Sean and Wolf, Thomas and Guha, Arjun and von Werra, Leandro and de Vries, Harm},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/20f586affbb1197659900a1254fff29d0/scadsfct},
interhash = {5b6dc9b919748e2466222fe645a7bb32},
intrahash = {0f586affbb1197659900a1254fff29d0},
keywords = {topic_language},
publisher = {arXiv},
timestamp = {2024-11-28T17:41:12.000+0100},
title = {{StarCoder}: may the source be with you!},
year = 2023
}