@inproceedings{f6ebd38fc72c4534b1128628c500acd3,
title = "Proxy Model Explanations for Time Series RNNs",
abstract = "While machine learning models can produce accurate predictions of complex real-world phenomena, domain experts may be unwilling to trust such a prediction without an explanation of the model's behavior. This concern has motivated widespread research and produced many methods for interpreting black-box models. Many such methods explain predictions one-by-one, which can be slow and inconsistent across a large dataset, and ill-suited for time series applications. We introduce a proxy model approach that is fast to train, faithful to the original model, and globally consistent in its explanations. We compare our approach to several previous methods and find both that methods disagree with one another and that our approach improves over existing methods in an application to political event forecasting.",
keywords = "Causality, Explainability, Interpretability, Recurrent neural network, Time series",
author = "Zach Wood-Doughty and Isabel Cachola and Mark Dredze",
note = "Funding Information: We acknowledge support provided by the Johns Hopkins Institute for Assured Autonomy. We thank Anna Buczak, Benjamin Baugher, and Adam Berlier for sharing the Crystal Cube model and data. Publisher Copyright: {\textcopyright} 2021 IEEE.; 20th IEEE International Conference on Machine Learning and Applications, ICMLA 2021 ; Conference date: 13-12-2021 Through 16-12-2021",
year = "2021",
doi = "10.1109/ICMLA52953.2021.00117",
language = "English (US)",
series = "Proceedings - 20th IEEE International Conference on Machine Learning and Applications, ICMLA 2021",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "698--703",
editor = "Wani, {M. Arif} and Sethi, {Ishwar K.} and Weisong Shi and Guangzhi Qu and Raicu, {Daniela Stan} and Ruoming Jin",
booktitle = "Proceedings - 20th IEEE International Conference on Machine Learning and Applications, ICMLA 2021",
}