BACKGROUND: Assessing the performance of machine learning (ML) models requires careful consideration of the evaluation metrics used. It is often necessary to utilize multiple metrics to gain a comprehensive understanding of a trained model's performance, as each metric focuses on a specific aspect. However, comparing the scores of these individual metrics for each model to determine the best-performing model can be time-consuming and susceptible to subjective user preferences, potentially introducing bias. RESULTS: We propose the Machine Learning Cumulative Performance Score (MLcps), a novel evaluation metric for classification problems. MLcps integrates several precomputed evaluation metrics into a unified score, enabling a comprehensive assessment of the trained model's strengths and weaknesses. We tested MLcps on 4 publicly available datasets, and the results demonstrate that MLcps provides a holistic evaluation of the model's robustness, ensuring a thorough understanding of its overall performance. CONCLUSIONS: By utilizing MLcps, researchers and practitioners no longer need to individually examine and compare multiple metrics to identify the best-performing models. Instead, they can rely on a single MLcps value to assess the overall performance of their ML models. This streamlined evaluation process saves valuable time and effort, enhancing the efficiency of model evaluation. MLcps is available as a Python package at https://pypi.org/project/MLcps/.
%0 Journal Article
%1 Akshay2022-nv
%A Akshay, Akshay
%A Abedi, Masoud
%A Shekarchizadeh, Navid
%A Burkhard, Fiona C
%A Katoch, Mitali
%A Bigger-Allen, Alex
%A Adam, Rosalyn M
%A Monastyrskaya, Katia
%A Gheinani, Ali Hashemi
%D 2022
%J Gigascience
%K topic_federatedlearn Python classification evaluation evaluation; learning; machine model package; problems; score unified
%T MLcps: machine learning cumulative performance score for classification problems
%V 12
%X BACKGROUND: Assessing the performance of machine learning (ML) models requires careful consideration of the evaluation metrics used. It is often necessary to utilize multiple metrics to gain a comprehensive understanding of a trained model's performance, as each metric focuses on a specific aspect. However, comparing the scores of these individual metrics for each model to determine the best-performing model can be time-consuming and susceptible to subjective user preferences, potentially introducing bias. RESULTS: We propose the Machine Learning Cumulative Performance Score (MLcps), a novel evaluation metric for classification problems. MLcps integrates several precomputed evaluation metrics into a unified score, enabling a comprehensive assessment of the trained model's strengths and weaknesses. We tested MLcps on 4 publicly available datasets, and the results demonstrate that MLcps provides a holistic evaluation of the model's robustness, ensuring a thorough understanding of its overall performance. CONCLUSIONS: By utilizing MLcps, researchers and practitioners no longer need to individually examine and compare multiple metrics to identify the best-performing models. Instead, they can rely on a single MLcps value to assess the overall performance of their ML models. This streamlined evaluation process saves valuable time and effort, enhancing the efficiency of model evaluation. MLcps is available as a Python package at https://pypi.org/project/MLcps/.
@article{Akshay2022-nv,
abstract = {BACKGROUND: Assessing the performance of machine learning (ML) models requires careful consideration of the evaluation metrics used. It is often necessary to utilize multiple metrics to gain a comprehensive understanding of a trained model's performance, as each metric focuses on a specific aspect. However, comparing the scores of these individual metrics for each model to determine the best-performing model can be time-consuming and susceptible to subjective user preferences, potentially introducing bias. RESULTS: We propose the Machine Learning Cumulative Performance Score (MLcps), a novel evaluation metric for classification problems. MLcps integrates several precomputed evaluation metrics into a unified score, enabling a comprehensive assessment of the trained model's strengths and weaknesses. We tested MLcps on 4 publicly available datasets, and the results demonstrate that MLcps provides a holistic evaluation of the model's robustness, ensuring a thorough understanding of its overall performance. CONCLUSIONS: By utilizing MLcps, researchers and practitioners no longer need to individually examine and compare multiple metrics to identify the best-performing models. Instead, they can rely on a single MLcps value to assess the overall performance of their ML models. This streamlined evaluation process saves valuable time and effort, enhancing the efficiency of model evaluation. MLcps is available as a Python package at https://pypi.org/project/MLcps/.},
added-at = {2024-09-10T10:41:24.000+0200},
author = {Akshay, Akshay and Abedi, Masoud and Shekarchizadeh, Navid and Burkhard, Fiona C and Katoch, Mitali and Bigger-Allen, Alex and Adam, Rosalyn M and Monastyrskaya, Katia and Gheinani, Ali Hashemi},
biburl = {https://puma.scadsai.uni-leipzig.de/bibtex/2f267ae6f5784d1cae5dab78a3322b2ff/scadsfct},
interhash = {431a3ae5f5c38fa0d3162b02f306833c},
intrahash = {f267ae6f5784d1cae5dab78a3322b2ff},
journal = {Gigascience},
keywords = {topic_federatedlearn Python classification evaluation evaluation; learning; machine model package; problems; score unified},
language = {en},
month = dec,
timestamp = {2024-11-22T15:45:41.000+0100},
title = {{MLcps}: machine learning cumulative performance score for classification problems},
volume = 12,
year = 2022
}