forked from oboy-1/shahlab-website
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpapers.bib
184 lines (170 loc) · 13.5 KB
/
papers.bib
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
---
---
@article{Guo2024,
title = {A multi-center study on the adaptability of a shared foundation model for electronic health records},
volume = {7},
ISSN = {2398-6352},
url = {http://dx.doi.org/10.1038/s41746-024-01166-w},
DOI = {10.1038/s41746-024-01166-w},
number = {1},
journal = {npj Digital Medicine},
publisher = {Springer Science and Business Media LLC},
author = {Guo, Lin Lawrence and Fries, Jason and Steinberg, Ethan and Fleming, Scott Lanyon and Morse, Keith and Aftandilian, Catherine and Posada, Jose and Shah, Nigam and Sung, Lillian},
year = {2024},
month = jun
}
@misc{https://doi.org/10.48550/arxiv.2406.13264,
doi = {10.48550/ARXIV.2406.13264},
url = {https://arxiv.org/abs/2406.13264},
author = {Wornow, Michael and Narayan, Avanika and Viggiano, Ben and Khare, Ishan S. and Verma, Tathagat and Thompson, Tibor and Hernandez, Miguel Angel Fuentes and Sundar, Sudharsan and Trujillo, Chloe and Chawla, Krrish and Lu, Rongfei and Shen, Justin and Nagaraj, Divya and Martinez, Joshua and Agrawal, Vardhan and Hudson, Althea and Shah, Nigam H. and Re, Christopher},
keywords = {Artificial Intelligence (cs.AI), Machine Learning (cs.LG), Software Engineering (cs.SE), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Do Multimodal Foundation Models Understand Enterprise Workflows? A Benchmark for Business Process Management Tasks},
publisher = {arXiv},
year = {2024},
copyright = {Creative Commons Attribution 4.0 International}
}
@Article{Chang2024,
author={Chang, Yin-Hsi
and Ong, Jasmine Chiat Ling
and William, Wasswa
and Butte, Atul J.
and Shah, Nigam H.
and Chew, Lita Sui Tjien
and Liu, Nan
and Doshi-Velez, Finale
and Lu, Wei
and Savulescu, Julian
and Ting, Daniel},
title={Large Language Models in Medicine: Addressing Ethical Challenges},
journal={Investigative Ophthalmology {\&} Visual Science},
year={2024},
month={Jun},
day={17},
volume={65},
number={7},
pages={362-362},
abstract={Striking a balance between advancing innovation in generative AI, in particular large language models, and maintaining ethical principles is crucial for medical applications. We herein propose a new framework of collaboration, grounded in four bioethical principles, to promote responsible use of LLMs. Original papers, reviews, narratives, correspondences, perspectives, and viewpoints that shed light on LLM use in medicine were identified through searches of PubMed and arXiv from January, 2020 to August, 2023. We excluded publications that did not discuss ethical issues. A total of 58 nonduplicate articles out of initial 1666 initial search results were retrieved for meticulous evaluation. Major ethical conundrums of LLM in medicine includes cognitive bias, data privacy, transparency, hallucinations, accountability and responsibility. To harness the full potential of LLMs, clear ethical guidelines serve as the robust foundation, ensuring the use of LLMs remains both beneficial and just. A trinary model depicts the roles of 3 different stakeholders: patient, clinician and LLM developer, reframing the traditional patient-clinician relationship. We suggest the users follow the bioethical principles: beneficence, nonmaleficence, autonomy and justice, that guides the responsible use of LLMs in medicine. Patients can actively participated in decision-making and be fully aware of the technological tools shaping their healthcare journey. This balanced approach facilitates symbiotic integration of LLMs into healthcare; leveraging technological capabilities while upholding bioethical standards. This abstract was presented at the 2024 ARVO Annual Meeting, held in Seattle, WA, May 5-9, 2024.},
issn={1552-5783}
}
@article{doi:10.1056/AIra2400038,
author = {Jasmine Chiat Ling Ong and Shelley Yin-Hsi Chang and Wasswa William and Atul J. Butte and Nigam H. Shah and Lita Sui Tjien Chew and Nan Liu and Finale Doshi-Velez and Wei Lu and Julian Savulescu and Daniel Shu Wei Ting },
title = {Medical Ethics of Large Language Models in Medicine},
journal = {NEJM AI},
volume = {1},
number = {7},
pages = {AIra2400038},
year = {2024},
doi = {10.1056/AIra2400038},
URL = {https://ai.nejm.org/doi/abs/10.1056/AIra2400038},
eprint = {https://ai.nejm.org/doi/pdf/10.1056/AIra2400038}
,
abstract = { Large language models (LLMs) have shown significant promise related to their application in medical research, medical education, and clinical tasks. While acknowledging their capabilities, we face the challenge of striking a balance between defining and holding ethical boundaries and driving innovation in LLM technology for medicine. We herein propose a framework, grounded in four bioethical principles, to promote the responsible use of LLMs. This model requires the responsible application of LLMs by three parties — the patient, the clinician, and the systems that govern the LLM itself — and suggests potential approaches to mitigating the risks of LLMs in medicine. This approach allows us to use LLMs ethically, equitably, and effectively in medicine. Large language models (LLMs) are adopted in diverse applications, generating exciting innovations in health care. Widespread investigation LLMs has introduced important ethical discussions, including potential for bias perpetuation and medical misinformation dissemination. A framework guided by bioethical principles could guide responsible use of LLMs, but it requires a collaborative effort between patients, clinicians, and systems that govern LLM development and implementation. }
}
@misc{https://doi.org/10.48550/arxiv.2406.06512,
doi = {10.48550/ARXIV.2406.06512},
url = {https://arxiv.org/abs/2406.06512},
author = {Blankemeier, Louis and Cohen, Joseph Paul and Kumar, Ashwin and Van Veen, Dave and Gardezi, Syed Jamal Safdar and Paschali, Magdalini and Chen, Zhihong and Delbrouck, Jean-Benoit and Reis, Eduardo and Truyts, Cesar and Bluethgen, Christian and Jensen, Malte Engmann Kjeldskov and Ostmeier, Sophie and Varma, Maya and Valanarasu, Jeya Maria Jose and Fang, Zhongnan and Huo, Zepeng and Nabulsi, Zaid and Ardila, Diego and Weng, Wei-Hung and Junior, Edson Amaro and Ahuja, Neera and Fries, Jason and Shah, Nigam H. and Johnston, Andrew and Boutin, Robert D. and Wentland, Andrew and Langlotz, Curtis P. and Hom, Jason and Gatidis, Sergios and Chaudhari, Akshay S.},
keywords = {Computer Vision and Pattern Recognition (cs.CV), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Merlin: A Vision Language Foundation Model for 3D Computed Tomography},
publisher = {arXiv},
year = {2024},
copyright = {Creative Commons Attribution 4.0 International}
}
@article{Ong2024,
title = {Ethical and regulatory challenges of large language models in medicine},
volume = {6},
ISSN = {2589-7500},
url = {http://dx.doi.org/10.1016/S2589-7500(24)00061-X},
DOI = {10.1016/s2589-7500(24)00061-x},
number = {6},
journal = {The Lancet Digital Health},
publisher = {Elsevier BV},
author = {Ong, Jasmine Chiat Ling and Chang, Shelley Yin-Hsi and William, Wasswa and Butte, Atul J and Shah, Nigam H and Chew, Lita Sui Tjien and Liu, Nan and Doshi-Velez, Finale and Lu, Wei and Savulescu, Julian and Ting, Daniel Shu Wei},
year = {2024},
month = jun,
pages = {e428–e432}
}
@article{10.1093/jamia/ocae043,
author = {Jindal, Jenelle A and Lungren, Matthew P and Shah, Nigam H},
title = "{Ensuring useful adoption of generative artificial intelligence in healthcare}",
journal = {Journal of the American Medical Informatics Association},
volume = {31},
number = {6},
pages = {1441-1444},
year = {2024},
month = {03},
abstract = "{This article aims to examine how generative artificial intelligence (AI) can be adopted with the most value in health systems, in response to the Executive Order on AI.We reviewed how technology has historically been deployed in healthcare, and evaluated recent examples of deployments of both traditional AI and generative AI (GenAI) with a lens on value.Traditional AI and GenAI are different technologies in terms of their capability and modes of current deployment, which have implications on value in health systems.Traditional AI when applied with a framework top-down can realize value in healthcare. GenAI in the short term when applied top-down has unclear value, but encouraging more bottom-up adoption has the potential to provide more benefit to health systems and patients.GenAI in healthcare can provide the most value for patients when health systems adapt culturally to grow with this new technology and its adoption patterns.}",
issn = {1527-974X},
doi = {10.1093/jamia/ocae043},
url = {https://doi.org/10.1093/jamia/ocae043},
eprint = {https://academic.oup.com/jamia/article-pdf/31/6/1441/57768946/ocae043.pdf},
}
@misc{https://doi.org/10.48550/arxiv.2405.03710,
doi = {10.48550/ARXIV.2405.03710},
url = {https://arxiv.org/abs/2405.03710},
author = {Wornow, Michael and Narayan, Avanika and Opsahl-Ong, Krista and McIntyre, Quinn and Shah, Nigam H. and Re, Christopher},
keywords = {Software Engineering (cs.SE), Artificial Intelligence (cs.AI), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Automating the Enterprise with Foundation Models},
publisher = {arXiv},
year = {2024},
copyright = {Creative Commons Attribution 4.0 International}
}
@article{Bedi2024,
title = {A Systematic Review of Testing and Evaluation of Healthcare Applications of Large Language Models (LLMs)},
url = {http://dx.doi.org/10.1101/2024.04.15.24305869},
DOI = {10.1101/2024.04.15.24305869},
publisher = {Cold Spring Harbor Laboratory},
author = {Bedi, Suhana and Liu, Yutong and Orr-Ewing, Lucy and Dash, Dev and Koyejo, Sanmi and Callahan, Alison and Fries, Jason A. and Wornow, Michael and Swaminathan, Akshay and Lehmann, Lisa Soleymani and Hong, Hyo Jung and Kashyap, Mehr and Chaurasia, Akash R. and Shah, Nirav R. and Singh, Karandeep and Tazbaz, Troy and Milstein, Arnold and Pfeffer, Michael A. and Shah, Nigam H.},
year = {2024},
month = apr
}
@article{Yoo2024,
title = {Scalable Approach to Consumer Wearable Postmarket Surveillance: Development and Validation Study},
volume = {12},
ISSN = {2291-9694},
url = {http://dx.doi.org/10.2196/51171},
DOI = {10.2196/51171},
journal = {JMIR Medical Informatics},
publisher = {JMIR Publications Inc.},
author = {Yoo, Richard M and Viggiano, Ben T and Pundi, Krishna N and Fries, Jason A and Zahedivash, Aydin and Podchiyska, Tanya and Din, Natasha and Shah, Nigam H},
year = {2024},
month = apr,
pages = {e51171–e51171}
}
@article{Fleming2024,
title = {MedAlign: A Clinician-Generated Dataset for Instruction Following with Electronic Medical Records},
volume = {38},
ISSN = {2159-5399},
url = {http://dx.doi.org/10.1609/aaai.v38i20.30205},
DOI = {10.1609/aaai.v38i20.30205},
number = {20},
journal = {Proceedings of the AAAI Conference on Artificial Intelligence},
publisher = {Association for the Advancement of Artificial Intelligence (AAAI)},
author = {Fleming, Scott L. and Lozano, Alejandro and Haberkorn, William J. and Jindal, Jenelle A. and Reis, Eduardo and Thapa, Rahul and Blankemeier, Louis and Genkins, Julian Z. and Steinberg, Ethan and Nayak, Ashwin and Patel, Birju and Chiang, Chia-Chun and Callahan, Alison and Huo, Zepeng and Gatidis, Sergios and Adams, Scott and Fayanju, Oluseyi and Shah, Shreya J. and Savage, Thomas and Goh, Ethan and Chaudhari, Akshay S. and Aghaeepour, Nima and Sharp, Christopher and Pfeffer, Michael A. and Liang, Percy and Chen, Jonathan H. and Morse, Keith E. and Brunskill, Emma P. and Fries, Jason A. and Shah, Nigam H.},
year = {2024},
month = mar,
pages = {22021–22030}
}
@misc{https://doi.org/10.48550/arxiv.2403.07911,
doi = {10.48550/ARXIV.2403.07911},
url = {https://arxiv.org/abs/2403.07911},
author = {Callahan, Alison and McElfresh, Duncan and Banda, Juan M. and Bunney, Gabrielle and Char, Danton and Chen, Jonathan and Corbin, Conor K. and Dash, Debadutta and Downing, Norman L. and Jain, Sneha S. and Kotecha, Nikesh and Masterson, Jonathan and Mello, Michelle M. and Morse, Keith and Nallan, Srikar and Pandya, Abby and Revri, Anurang and Sharma, Aditya and Sharp, Christopher and Thapa, Rahul and Wornow, Michael and Youssef, Alaa and Pfeffer, Michael A. and Shah, Nigam H.},
keywords = {Computers and Society (cs.CY), Artificial Intelligence (cs.AI), FOS: Computer and information sciences, FOS: Computer and information sciences},
title = {Standing on FURM ground -- A framework for evaluating Fair, Useful, and Reliable AI Models in healthcare systems},
publisher = {arXiv},
year = {2024},
copyright = {Creative Commons Attribution Non Commercial No Derivatives 4.0 International}
}
@article{Guo2024,
title = {Characterizing the limitations of using diagnosis codes in the context of machine learning for healthcare},
volume = {24},
ISSN = {1472-6947},
url = {http://dx.doi.org/10.1186/s12911-024-02449-8},
DOI = {10.1186/s12911-024-02449-8},
number = {1},
journal = {BMC Medical Informatics and Decision Making},
publisher = {Springer Science and Business Media LLC},
author = {Guo, Lin Lawrence and Morse, Keith E. and Aftandilian, Catherine and Steinberg, Ethan and Fries, Jason and Posada, Jose and Fleming, Scott Lanyon and Lemmon, Joshua and Jessa, Karim and Shah, Nigam and Sung, Lillian},
year = {2024},
month = feb
}