@inproceedings{ad931da22c664bd4a0dcdee0c5fb9043,
title = "An Evaluation of Pretrained BERT Models for Comparing Semantic Similarity Across Unstructured Clinical Trial Texts",
abstract = "Processing unstructured clinical texts is often necessary to support certain tasks in biomedicine, such as matching patients to clinical trials. Among other methods, domain-specific language models have been built to utilize free-text information. This study evaluated the performance of Bidirectional Encoder Representations from Transformers (BERT) models in assessing the similarity between clinical trial texts. We compared an unstructured aggregated summary of clinical trials reviewed at the Johns Hopkins Molecular Tumor Board with the ClinicalTrials.gov records, focusing on the titles and eligibility criteria. Seven pretrained BERT-Based models were used in our analysis. Of the six biomedical-domain-specific models, only SciBERT outperformed the original BERT model by accurately assigning higher similarity scores to matched than mismatched trials. This finding is promising and shows that BERT and, likely, other language models may support patient-trial matching.",
keywords = "Clinical trial, bidirectional coder representations, word embeddings",
author = "Jessica Patricoski and Kory Kreimeyer and Archana Balan and Kent Hardart and Jessica Tao and Valsamo Anagnostou and Taxiarchis Botsis",
note = "Publisher Copyright: {\textcopyright} 2022 The authors and IOS Press.",
year = "2022",
doi = "10.3233/SHTI210848",
language = "English (US)",
series = "Studies in Health Technology and Informatics",
publisher = "IOS Press BV",
pages = "18--21",
editor = "John Mantas and Arie Hasman and Househ, {Mowafa S.} and Parisis Gallos and Emmanouil Zoulias and Joseph Liasko",
booktitle = "Informatics and Technology in Clinical Care and Public Health",
address = "Netherlands",
}