@article{c151fbb6a9de45ad9ac794699dabe8a5,
title = "The deformable most-likely-point paradigm",
abstract = "In this paper, we present three deformable registration algorithms designed within a paradigm that uses 3D statistical shape models to accomplish two tasks simultaneously:1)register point features from previously unseen data to a statistically derived shape (e.g., mean shape), and2)deform the statistically derived shape to estimate the shape represented by the point features.This paradigm, called the deformable most-likely-point paradigm, is motivated by the idea that generative shape models built from available data can be used to estimate previously unseen data. We developed three deformable registration algorithms within this paradigm using statistical shape models built from reliably segmented objects with correspondences. Results from several experiments show that our algorithms produce accurate registrations and reconstructions in a variety of applications with errors up to CT resolution on medical datasets. Our code is available at https://github.com/AyushiSinha/cisstICP.",
keywords = "Deformable most-likely-point paradigm, Deformable registration, Shape inference, Statistical shape models",
author = "Ayushi Sinha and Billings, {Seth D.} and Austin Reiter and Xingtong Liu and Masaru Ishii and Hager, {Gregory D.} and Taylor, {Russell H.}",
note = "Funding Information: This work was funded by NIH R01-EB015530 : Enhanced Navigation for Endoscopic Sinus Surgery through Video Analysis, NSF Graduate Research Fellowship Program, a fellowship from Intuitive Surgical, Inc., JHU Provost{\textquoteright}s Postdoctoral Fellowship, and other JHU Internal Funds. We would also like to acknowledge the JHU Applied Physics Laboratory for providing the pelvis meshes that were extracted from CTs obtained as part of the Allometry project, the Cancer Imaging Archive (TCIA) for the head CTs from which structures in the nasal cavity were extracted, and the University of Washington Graphics and Imaging Laboratory for making the human expression data available. Finally, we would like to thank Keenan Crane for allowing us to use a modified version of his saddle figure seen in Figs. 1 , 2 , and 3 , and in the graphical abstract. Funding Information: This work was funded by NIH R01-EB015530: Enhanced Navigation for Endoscopic Sinus Surgery through Video Analysis, NSF Graduate Research Fellowship Program, a fellowship from Intuitive Surgical, Inc. JHU Provost's Postdoctoral Fellowship, and other JHU Internal Funds. We would also like to acknowledge the JHU Applied Physics Laboratory for providing the pelvis meshes that were extracted from CTs obtained as part of the Allometry project, the Cancer Imaging Archive (TCIA)for the head CTs from which structures in the nasal cavity were extracted, and the University of Washington Graphics and Imaging Laboratory for making the human expression data available. Finally, we would like to thank Keenan Crane for allowing us to use a modified version of his saddle figure seen in Figs. 1, 2, and 3, and in the graphical abstract. Publisher Copyright: {\textcopyright} 2019 Elsevier B.V.",
year = "2019",
month = jul,
doi = "10.1016/j.media.2019.04.013",
language = "English (US)",
volume = "55",
pages = "148--164",
journal = "Medical image analysis",
issn = "1361-8415",
publisher = "Elsevier B.V.",
}