
@article{ref1,
title="Performance of Google Bard and ChatGPT in mass casualty incidents triage",
journal="American journal of emergency medicine",
year="2023",
author="Gan, Rick Kye and Ogbodo, Jude Chukwuebuka and Wee, Yong Zheng and Gan, Ann Zee and González, Pedro Arcos",
volume="75",
number="",
pages="72-78",
abstract="AIM: The objective of our research is to evaluate and compare the performance of ChatGPT, Google Bard, and medical students in performing START triage during mass casualty situations. <br><br>METHOD: We conducted a cross-sectional analysis to compare ChatGPT, Google Bard, and medical students in mass casualty incident (MCI) triage using the Simple Triage And Rapid Treatment (START) method. A validated questionnaire with 15 diverse MCI scenarios was used to assess triage accuracy and content analysis in four categories: &quot;Walking wounded,&quot; &quot;Respiration,&quot; &quot;Perfusion,&quot; and &quot;Mental Status.&quot; Statistical analysis compared the results. <br><br>RESULT: Google Bard demonstrated a notably higher accuracy of 60%, while ChatGPT achieved an accuracy of 26.67% (p = 0.002). Comparatively, medical students performed at an accuracy rate of 64.3% in a previous study. However, there was no significant difference observed between Google Bard and medical students (p = 0.211). Qualitative content analysis of 'walking-wounded', 'respiration', 'perfusion', and 'mental status' indicated that Google Bard outperformed ChatGPT. <br><br>CONCLUSION: Google Bard was found to be superior to ChatGPT in correctly performing mass casualty incident triage. Google Bard achieved an accuracy of 60%, while chatGPT only achieved an accuracy of 26.67%. This difference was statistically significant (p = 0.002).<p /> <p>Language: en</p>",
language="en",
issn="0735-6757",
doi="10.1016/j.ajem.2023.10.034",
url="http://dx.doi.org/10.1016/j.ajem.2023.10.034"
}