2024
Barszczyk, Mark; Singh, Navneet; Alikhassi, Afsaneh; Oirschot, Matthew Van; Kuling, Grey; Kiss, Alex; Gandhi, Sonal; Nofech-Mozes, Sharon; Hong, Nicole Look; Bilbily, Alexander; Martel, Anne; Matsuura, Naomi; Curpen, Belinda
In: Journal of Breast Imaging, 2024, ISSN: 2631-6110.
@article{Barszczyk2024,
title = {3D CT Radiomic Analysis Improves Detection of Axillary Lymph Node Metastases Compared to Conventional Features in Patients With Locally Advanced Breast Cancer},
author = {Mark Barszczyk and Navneet Singh and Afsaneh Alikhassi and Matthew Van Oirschot and Grey Kuling and Alex Kiss and Sonal Gandhi and Sharon Nofech-Mozes and Nicole Look Hong and Alexander Bilbily and Anne Martel and Naomi Matsuura and Belinda Curpen},
doi = {10.1093/jbi/wbae022},
issn = {2631-6110},
year = {2024},
date = {2024-01-01},
journal = {Journal of Breast Imaging},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reinke, Annika; Tizabi, Minu D.; Baumgartner, Michael; Eisenmann, Matthias; Heckmann-Nötzel, Doreen; Kavur, A. Emre; Rädsch, Tim; Sudre, Carole H.; Acion, Laura; Antonelli, Michela; Arbel, Tal; Bakas, Spyridon; Benis, Arriel; Buettner, Florian; Cardoso, M. Jorge; Cheplygina, Veronika; Chen, Jianxu; Christodoulou, Evangelia; Cimini, Beth A.; Farahani, Keyvan; Ferrer, Luciana; Galdran, Adrian; Ginneken, Bram; Glocker, Ben; Godau, Patrick; Hashimoto, Daniel A.; Hoffman, Michael M.; Huisman, Merel; Isensee, Fabian; Jannin, Pierre; Kahn, Charles E.; Kainmueller, Dagmar; Kainz, Bernhard; Karargyris, Alexandros; Kleesiek, Jens; Kofler, Florian; Kooi, Thijs; Kopp-Schneider, Annette; Kozubek, Michal; Kreshuk, Anna; Kurc, Tahsin; Landman, Bennett A.; Litjens, Geert; Madani, Amin; Maier-Hein, Klaus; Martel, Anne L.; Meijering, Erik; Menze, Bjoern; Moons, Karel G. M.; Müller, Henning; Nichyporuk, Brennan; Nickel, Felix; Petersen, Jens; Rafelski, Susanne M.; Rajpoot, Nasir; Reyes, Mauricio; Riegler, Michael A.; Rieke, Nicola; Saez-Rodriguez, Julio; Sánchez, Clara I.; Shetty, Shravya; Summers, Ronald M.; Taha, Abdel A.; Tiulpin, Aleksei; Tsaftaris, Sotirios A.; Calster, Ben Van; Varoquaux, Gaël; Yaniv, Ziv R.; Jäger, Paul F.; Maier-Hein, Lena
Understanding metric-related pitfalls in image analysis validation Journal Article
In: Nature Methods, vol. 21, iss. 2, pp. 182-194, 2024, ISSN: 1548-7091.
@article{Reinke2024,
title = {Understanding metric-related pitfalls in image analysis validation},
author = {Annika Reinke and Minu D. Tizabi and Michael Baumgartner and Matthias Eisenmann and Doreen Heckmann-Nötzel and A. Emre Kavur and Tim Rädsch and Carole H. Sudre and Laura Acion and Michela Antonelli and Tal Arbel and Spyridon Bakas and Arriel Benis and Florian Buettner and M. Jorge Cardoso and Veronika Cheplygina and Jianxu Chen and Evangelia Christodoulou and Beth A. Cimini and Keyvan Farahani and Luciana Ferrer and Adrian Galdran and Bram Ginneken and Ben Glocker and Patrick Godau and Daniel A. Hashimoto and Michael M. Hoffman and Merel Huisman and Fabian Isensee and Pierre Jannin and Charles E. Kahn and Dagmar Kainmueller and Bernhard Kainz and Alexandros Karargyris and Jens Kleesiek and Florian Kofler and Thijs Kooi and Annette Kopp-Schneider and Michal Kozubek and Anna Kreshuk and Tahsin Kurc and Bennett A. Landman and Geert Litjens and Amin Madani and Klaus Maier-Hein and Anne L. Martel and Erik Meijering and Bjoern Menze and Karel G. M. Moons and Henning Müller and Brennan Nichyporuk and Felix Nickel and Jens Petersen and Susanne M. Rafelski and Nasir Rajpoot and Mauricio Reyes and Michael A. Riegler and Nicola Rieke and Julio Saez-Rodriguez and Clara I. Sánchez and Shravya Shetty and Ronald M. Summers and Abdel A. Taha and Aleksei Tiulpin and Sotirios A. Tsaftaris and Ben Van Calster and Gaël Varoquaux and Ziv R. Yaniv and Paul F. Jäger and Lena Maier-Hein},
doi = {10.1038/s41592-023-02150-0},
issn = {1548-7091},
year = {2024},
date = {2024-01-01},
journal = {Nature Methods},
volume = {21},
issue = {2},
pages = {182-194},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Maier-Hein, Lena; Reinke, Annika; Godau, Patrick; Tizabi, Minu D.; Buettner, Florian; Christodoulou, Evangelia; Glocker, Ben; Isensee, Fabian; Kleesiek, Jens; Kozubek, Michal; Reyes, Mauricio; Riegler, Michael A.; Wiesenfarth, Manuel; Kavur, A. Emre; Sudre, Carole H.; Baumgartner, Michael; Eisenmann, Matthias; Heckmann-Nötzel, Doreen; Rädsch, Tim; Acion, Laura; Antonelli, Michela; Arbel, Tal; Bakas, Spyridon; Benis, Arriel; Blaschko, Matthew B.; Cardoso, M. Jorge; Cheplygina, Veronika; Cimini, Beth A.; Collins, Gary S.; Farahani, Keyvan; Ferrer, Luciana; Galdran, Adrian; Ginneken, Bram; Haase, Robert; Hashimoto, Daniel A.; Hoffman, Michael M.; Huisman, Merel; Jannin, Pierre; Kahn, Charles E.; Kainmueller, Dagmar; Kainz, Bernhard; Karargyris, Alexandros; Karthikesalingam, Alan; Kofler, Florian; Kopp-Schneider, Annette; Kreshuk, Anna; Kurc, Tahsin; Landman, Bennett A.; Litjens, Geert; Madani, Amin; Maier-Hein, Klaus; Martel, Anne L.; Mattson, Peter; Meijering, Erik; Menze, Bjoern; Moons, Karel G. M.; Müller, Henning; Nichyporuk, Brennan; Nickel, Felix; Petersen, Jens; Rajpoot, Nasir; Rieke, Nicola; Saez-Rodriguez, Julio; Sánchez, Clara I.; Shetty, Shravya; Smeden, Maarten; Summers, Ronald M.; Taha, Abdel A.; Tiulpin, Aleksei; Tsaftaris, Sotirios A.; Calster, Ben Van; Varoquaux, Gaël; Jäger, Paul F.
Metrics reloaded: recommendations for image analysis validation Journal Article
In: Nature Methods, vol. 21, iss. 2, pp. 195-212, 2024, ISSN: 1548-7091.
@article{nokey,
title = {Metrics reloaded: recommendations for image analysis validation},
author = {Lena Maier-Hein and Annika Reinke and Patrick Godau and Minu D. Tizabi and Florian Buettner and Evangelia Christodoulou and Ben Glocker and Fabian Isensee and Jens Kleesiek and Michal Kozubek and Mauricio Reyes and Michael A. Riegler and Manuel Wiesenfarth and A. Emre Kavur and Carole H. Sudre and Michael Baumgartner and Matthias Eisenmann and Doreen Heckmann-Nötzel and Tim Rädsch and Laura Acion and Michela Antonelli and Tal Arbel and Spyridon Bakas and Arriel Benis and Matthew B. Blaschko and M. Jorge Cardoso and Veronika Cheplygina and Beth A. Cimini and Gary S. Collins and Keyvan Farahani and Luciana Ferrer and Adrian Galdran and Bram Ginneken and Robert Haase and Daniel A. Hashimoto and Michael M. Hoffman and Merel Huisman and Pierre Jannin and Charles E. Kahn and Dagmar Kainmueller and Bernhard Kainz and Alexandros Karargyris and Alan Karthikesalingam and Florian Kofler and Annette Kopp-Schneider and Anna Kreshuk and Tahsin Kurc and Bennett A. Landman and Geert Litjens and Amin Madani and Klaus Maier-Hein and Anne L. Martel and Peter Mattson and Erik Meijering and Bjoern Menze and Karel G. M. Moons and Henning Müller and Brennan Nichyporuk and Felix Nickel and Jens Petersen and Nasir Rajpoot and Nicola Rieke and Julio Saez-Rodriguez and Clara I. Sánchez and Shravya Shetty and Maarten Smeden and Ronald M. Summers and Abdel A. Taha and Aleksei Tiulpin and Sotirios A. Tsaftaris and Ben Van Calster and Gaël Varoquaux and Paul F. Jäger},
doi = {10.1038/s41592-023-02151-z},
issn = {1548-7091},
year = {2024},
date = {2024-01-01},
journal = {Nature Methods},
volume = {21},
issue = {2},
pages = {195-212},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ramanathan, Vishwesh; Martel, Anne L.
Self Supervised Multi-view Graph Representation Learning in Digital Pathology Proceedings Article
In: pp. 74-84, 2024.
@inproceedings{Ramanathan2024,
title = {Self Supervised Multi-view Graph Representation Learning in Digital Pathology},
author = {Vishwesh Ramanathan and Anne L. Martel},
doi = {10.1007/978-3-031-55088-1_7},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
pages = {74-84},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Kuling, Grey; Belinda, Curpen; Martel, Anne L.
Accurate estimation of density and background parenchymal enhancement in breast MRI using deep regression and transformers Proceedings Article
In: Li, Hui; Giger, Maryellen L.; Drukker, Karen; Whitney, Heather M. (Ed.): pp. 22, SPIE, 2024, ISBN: 9781510680203.
Abstract | Links | BibTeX | Tags: _breast_segmentation
@inproceedings{Kuling2024,
title = {Accurate estimation of density and background parenchymal enhancement in breast MRI using deep regression and transformers},
author = {Grey Kuling and Curpen Belinda and Anne L. Martel},
editor = {Hui Li and Maryellen L. Giger and Karen Drukker and Heather M. Whitney},
doi = {10.1117/12.3025341},
isbn = {9781510680203},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {17th International Workshop on Breast Imaging (IWBI 2024)},
pages = {22},
publisher = {SPIE},
abstract = {Early detection of breast cancer is important for improving survival rates. Based on accurate and tissue-specific risk factors, such as breast density and background parenchymal enhancement (BPE), risk-stratified screening can help identify high-risk women and provide personalized screening plans, ultimately leading to better outcomes. Measurements of density and BPE are carried out through image segmentation, but volumetric measurements may not capture the qualitative scale of these tissue-specific risk factors. This study aimed to create deep regression models that estimate the interval scale underlying the BI-RADS density and BPE categories. These models incorporate a 3D convolutional encoder and transformer layers to comprehend time-sequential data in DCE-MRI. The correlation between the models and the BI-RADS categories was evaluated with Spearman coefficients. Using 1024 patients with a BI-RADS assessment score of 3 or less and no prior history of breast cancer, the models were trained on 50% of the data and tested on 50%. The density and BPE ground truth labels were extracted from the radiology reports using BI-RADS BERT. The ordinal classes were then translated to a continuous interval scale using a linear link function. The density regression model is strongly correlated to the BI-RADS category with a correlation of 0.77, slightly lower than segmentation %FGT. The BPE regression model with transformer layers shows a moderate correlation with radiologists at 0.52, similar to the segmentation %BPE. The deep regression transformer has an advantage over segmentation as it doesn’t need time-point image registration, making it easier to use.
(2024) Published by SPIE. Downloading of the abstract is permitted for personal use only.
Citation Download Citation
Grey Kuling, Belinda Curpen, and Anne L . Martel "Accurate estimation of density and background parenchymal enhancement in breast MRI using deep regression and transformers"},
keywords = {_breast_segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
(2024) Published by SPIE. Downloading of the abstract is permitted for personal use only.
Citation Download Citation
Grey Kuling, Belinda Curpen, and Anne L . Martel "Accurate estimation of density and background parenchymal enhancement in breast MRI using deep regression and transformers"
Chen, Han; Martel, Anne L.
Towards improved breast cancer detection on digital mammograms using local self-attention-based transformer Proceedings Article
In: Li, Hui; Giger, Maryellen L.; Drukker, Karen; Whitney, Heather M. (Ed.): pp. 23, SPIE, 2024, ISBN: 9781510680203.
@inproceedings{Chen2024,
title = {Towards improved breast cancer detection on digital mammograms using local self-attention-based transformer},
author = {Han Chen and Anne L. Martel},
editor = {Hui Li and Maryellen L. Giger and Karen Drukker and Heather M. Whitney},
doi = {10.1117/12.3025375},
isbn = {9781510680203},
year = {2024},
date = {2024-01-01},
journal = {17th International Workshop on Breast Imaging (IWBI 2024)},
pages = {23},
publisher = {SPIE},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Cheung, Alison M.; Han, Wenchao; Zhou, Xuerong; Wang, Dan; Ramanathan, Vishwesh; Qu, Owen; Martel, Anne L.; Yaffe, Martin J.
Spatial analysis of immune cells in breast cancer using k-nearest neighbor graphs and Louvain-community clustering of immunofluorescent protein multiplexing images Proceedings Article
In: Li, Hui; Giger, Maryellen L.; Drukker, Karen; Whitney, Heather M. (Ed.): pp. 36, SPIE, 2024, ISBN: 9781510680203.
@inproceedings{Cheung2024,
title = {Spatial analysis of immune cells in breast cancer using k-nearest neighbor graphs and Louvain-community clustering of immunofluorescent protein multiplexing images},
author = {Alison M. Cheung and Wenchao Han and Xuerong Zhou and Dan Wang and Vishwesh Ramanathan and Owen Qu and Anne L. Martel and Martin J. Yaffe},
editor = {Hui Li and Maryellen L. Giger and Karen Drukker and Heather M. Whitney},
doi = {10.1117/12.3025909},
isbn = {9781510680203},
year = {2024},
date = {2024-01-01},
journal = {17th International Workshop on Breast Imaging (IWBI 2024)},
pages = {36},
publisher = {SPIE},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2023
Boone, Lyndon; Biparva, Mahdi; Forooshani, Parisa Mojiri; Ramirez, Joel; Masellis, Mario; Bartha, Robert; Symons, Sean; Strother, Stephen; Black, Sandra E.; Heyn, Chris; Martel, Anne L.; Swartz, Richard H.; Goubran, Maged
ROOD-MRI: Benchmarking the robustness of deep learning segmentation models to out-of-distribution and corrupted data in MRI Journal Article
In: NeuroImage, vol. 278, pp. 120289, 2023, ISSN: 10538119.
Abstract | Links | BibTeX | Tags:
@article{Boone2023,
title = {ROOD-MRI: Benchmarking the robustness of deep learning segmentation models to out-of-distribution and corrupted data in MRI},
author = {Lyndon Boone and Mahdi Biparva and Parisa Mojiri Forooshani and Joel Ramirez and Mario Masellis and Robert Bartha and Sean Symons and Stephen Strother and Sandra E. Black and Chris Heyn and Anne L. Martel and Richard H. Swartz and Maged Goubran},
url = {https://doi.org/10.1016/j.neuroimage.2023.120289 https://linkinghub.elsevier.com/retrieve/pii/S1053811923004408},
doi = {10.1016/j.neuroimage.2023.120289},
issn = {10538119},
year = {2023},
date = {2023-09-01},
journal = {NeuroImage},
volume = {278},
pages = {120289},
abstract = {Deep artificial neural networks (DNNs) have moved to the forefront of medical image analysis due to their success in classification, segmentation, and detection challenges. A principal challenge in large-scale deployment of DNNs in neuroimage analysis is the potential for shifts in signal-to-noise ratio, contrast, resolution, and presence of artifacts from site to site due to variances in scanners and acquisition protocols. DNNs are famously susceptible to these distribution shifts in computer vision. Currently, there are no benchmarking platforms or frameworks to assess the robustness of new and existing models to specific distribution shifts in MRI, and accessible multi-site benchmarking datasets are still scarce or task-specific. To address these limitations, we propose ROOD-MRI: a novel platform for benchmarking the Robustness of DNNs to Out-Of-Distribution (OOD) data, corruptions, and artifacts in MRI. This flexible platform provides modules for generating benchmarking datasets using transforms that model distribution shifts in MRI, implementations of newly derived benchmarking metrics for image segmentation, and examples for using the methodology with new models and tasks. We apply our methodology to hippocampus, ventricle, and white matter hyperintensity segmentation in several large studies, providing the hippocampus dataset as a publicly available benchmark. By evaluating modern DNNs on these datasets, we demonstrate that they are highly susceptible to distribution shifts and corruptions in MRI. We show that while data augmentation strategies can substantially improve robustness to OOD data for anatomical segmentation tasks, modern DNNs using augmentation still lack robustness in more challenging lesion-based segmentation tasks. We finally benchmark U-Nets and vision transformers, finding robustness susceptibility to particular classes of transforms across architectures. The presented open-source platform enables generating new benchmarking datasets and comparing across models to study model design that results in improved robustness to OOD data and corruptions in MRI.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bahlmann, Laura C.; Xue, Chang; Chin, Allysia A.; Skirzynska, Arianna; Lu, Joy; Thériault, Brigitte; Uehling, David; Yerofeyeva, Yulia; Peters, Rachel; Liu, Kela; Chen, Jianan; Martel, Anne L.; Yaffe, Martin; Al-awar, Rima; Goswami, Rashmi S.; Ylanko, Jarkko; Andrews, David W.; Kuruvilla, John; Laister, Rob C.; Shoichet, Molly S.
Targeting tumour-associated macrophages in hodgkin lymphoma using engineered extracellular matrix-mimicking cryogels Journal Article
In: Biomaterials, vol. 297, pp. 122121, 2023, ISSN: 01429612.
Abstract | Links | BibTeX | Tags:
@article{Bahlmann2023,
title = {Targeting tumour-associated macrophages in hodgkin lymphoma using engineered extracellular matrix-mimicking cryogels},
author = {Laura C. Bahlmann and Chang Xue and Allysia A. Chin and Arianna Skirzynska and Joy Lu and Brigitte Thériault and David Uehling and Yulia Yerofeyeva and Rachel Peters and Kela Liu and Jianan Chen and Anne L. Martel and Martin Yaffe and Rima Al-awar and Rashmi S. Goswami and Jarkko Ylanko and David W. Andrews and John Kuruvilla and Rob C. Laister and Molly S. Shoichet},
url = {https://doi.org/10.1016/j.biomaterials.2023.122121 https://linkinghub.elsevier.com/retrieve/pii/S0142961223001291},
doi = {10.1016/j.biomaterials.2023.122121},
issn = {01429612},
year = {2023},
date = {2023-06-01},
journal = {Biomaterials},
volume = {297},
pages = {122121},
abstract = {Tumour-associated macrophages are linked with poor prognosis and resistance to therapy in Hodgkin lymphoma; however, there are no suitable preclinical models to identify macrophage-targeting therapeutics. We used primary human tumours to guide the development of a mimetic cryogel, wherein Hodgkin (but not Non-Hodgkin) lymphoma cells promoted primary human macrophage invasion. In an invasion inhibitor screen, we identified five drug hits that significantly reduced tumour-associated macrophage invasion: marimastat, batimastat, AS1517499, ruxolitinib, and PD-169316. Importantly, ruxolitinib has demonstrated recent success in Hodgkin lymphoma clinical trials. Both ruxolitinib and PD-169316 (a p38 mitogen-activated protein kinase (p38 MAPK) inhibitor) decreased the percent of M2-like macrophages; however, only PD-169316 enhanced the percentage of M1-like macrophages. We validated p38 MAPK as an anti-invasion drug target with five additional drugs using a high-content imaging platform. With our biomimetic cryogel, we modeled macrophage invasion in Hodgkin lymphoma and then used it for target discovery and drug screening, ultimately identifying potential future therapeutics.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Moncayo, Ricardo; Martel, Anne L.; Romero, Eduardo
Removing non-nuclei information from histopathological images: A preprocessing step towards improving nuclei segmentation methods Journal Article
In: Journal of Pathology Informatics, pp. 100315, 2023, ISSN: 21533539.
Abstract | Links | BibTeX | Tags:
@article{Moncayo2023,
title = {Removing non-nuclei information from histopathological images: A preprocessing step towards improving nuclei segmentation methods},
author = {Ricardo Moncayo and Anne L. Martel and Eduardo Romero},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2153353923001293},
doi = {10.1016/j.jpi.2023.100315},
issn = {21533539},
year = {2023},
date = {2023-04-01},
journal = {Journal of Pathology Informatics},
pages = {100315},
abstract = {Disease interpretation by computer-aided diagnosis systems in digital pathology depends on reliable detection and segmentation of nuclei in hematoxylin and eosin (HE) images. These 2 tasks are challenging since appearance of both cell nuclei and background structures are very variable. This paper presents a method to improve nuclei detection and segmentation in HE images by removing tiles that only contain background information. The method divides each image into smaller patches and uses their projection to the noiselet space to capture different spatial features from non-nuclei background and nuclei structures. The noiselet features are clustered by a K-means algorithm and the resultant partition, defined by the cluster centroids, is herein named the noiselet code-book. A part of an image, a tile, is divided into patches and represented by the histogram of occurrences of the projected patches in the noiselet code-book. Finally, with these histograms, a classifier learns to differentiate between nuclei and non-nuclei tiles. By applying a conventional watershed-marked method to detect and segment nuclei, evaluation consisted in comparing pure watershed method against denoising-plus-watershed in an open database with 8 different types of tissues. The averaged F-score of nuclei detection improved from 0.830 to 0.86 and the dice score after segmentation increased from 0.701 to 0.723.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Cheung, Alison M.; Besher, Hassan; Wang, Dan; Liu, Kela; Amemiya, Yutaka; Chen, Jianan; Martel, Anne L.; Seth, Arun; Yaffe, Martin J.
Integrated image-processing and transcriptomic analysis of cancer-associated fibroblasts (CAFs) in breast cancer subtypes Proceedings Article
In: Tomaszewski, John E.; Ward, Aaron D. (Ed.): Medical Imaging 2023: Digital and Computational Pathology, pp. 54, SPIE, 2023, ISBN: 9781510660472.
Abstract | Links | BibTeX | Tags:
@inproceedings{Cheung2023,
title = {Integrated image-processing and transcriptomic analysis of cancer-associated fibroblasts (CAFs) in breast cancer subtypes},
author = {Alison M. Cheung and Hassan Besher and Dan Wang and Kela Liu and Yutaka Amemiya and Jianan Chen and Anne L. Martel and Arun Seth and Martin J. Yaffe},
editor = {John E. Tomaszewski and Aaron D. Ward},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/12471/2653946/Integrated-image-processing-and-transcriptomic-analysis-of-cancer-associated-fibroblasts/10.1117/12.2653946.full},
doi = {10.1117/12.2653946},
isbn = {9781510660472},
year = {2023},
date = {2023-04-01},
booktitle = {Medical Imaging 2023: Digital and Computational Pathology},
pages = {54},
publisher = {SPIE},
abstract = {The tumor microenvironment (TME) plays an important role in driving cancer progression and affecting treatment efficacy. Cellular components of the TME include various immune subsets (tumor infiltrating lymphocytes (TILs) and macrophages), cancer-associated fibroblasts (CAFs) and vascular cells. While immune lineage has been a main focus of intensive research on the TME, CAFs have also been shown to be highly heterogeneous in their molecular phenotype and function. Using a protein marker immunofluorescence multiplexing system (Cell DIVE, Leica Microsystems) and quantitative imaging tools, we investigated the identity of various CAF clusters based on the expression of α-Smooth Muscle Actin (αSMA) and Fibroblast Activation Protein (FAP), and compared their distributions across breast cancer subtypes. We determined the cell counts of various CAF subsets using binary counting and identified the heterogeneous presentations of clusters using K-means clustering and Uniform Manifold Approximation and Projection (UMAP). We found that the abundance of CAF clusters varied among breast cancer subtypes. An integrated analysis of CAF cluster composition in each cancer and the transcriptomic data of CAF-associated genes such as CD29, IL6 and PDGFRβ was performed. We observed increased densities of proliferative, αSMA-positive CAFs in basal-like breast cancers that exhibited a co-expression signature of CAF-associated genes. Finally, an association analysis of CAF cluster composition and gene expression with previously identified radiomic phenotype was performed, but significant correlation was not detected.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Ramanathan, Vishwesh; Han, Wenchao; Bassiouny, Dina; Rakovitch, Eileen; Martel, Anne L.
Ink removal in whole slide images using hallucinated data Proceedings Article
In: Tomaszewski, John E.; Ward, Aaron D. (Ed.): Medical Imaging 2023: Digital and Computational Pathology, pp. 36, SPIE, 2023, ISBN: 9781510660472.
Abstract | Links | BibTeX | Tags: _DCIS, _Histology_QA
@inproceedings{Ramanathan2023,
title = {Ink removal in whole slide images using hallucinated data},
author = {Vishwesh Ramanathan and Wenchao Han and Dina Bassiouny and Eileen Rakovitch and Anne L. Martel},
editor = {John E. Tomaszewski and Aaron D. Ward},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/12471/2653281/Ink-removal-in-whole-slide-images-using-hallucinated-data/10.1117/12.2653281.full},
doi = {10.1117/12.2653281},
isbn = {9781510660472},
year = {2023},
date = {2023-04-01},
urldate = {2023-04-01},
booktitle = {Medical Imaging 2023: Digital and Computational Pathology},
pages = {36},
publisher = {SPIE},
abstract = {Pathologists regularly use ink markings on histopathology slides to highlight specific areas of interest or orientation, making it an integral part of the workflow. Unfortunately, digitization of these ink-annotated slides hinders any computer-aided analyses, particularly deep learning algorithms, which require clean data free from artifacts. We propose a methodology that can identify and remove the ink markings for the purpose of computational analyses. We propose a two-stage network with a binary classifier for ink filtering and Pix2Pix for ink removal. We trained our network by artificially generating pseudo ink markings using only clean slides, requiring no manual annotation or curation of data. Furthermore, we demonstrate our algorithm's efficacy over an independent dataset of H&E stained breast carcinoma slides scanned before and after the removal of pen markings. Our quantitative analysis shows promising results, achieving 98.7% accuracy for the binary classifier. For Pix2Pix, we observed a 65.6% increase in structure similarity index, a 21.3% increase in peak signal-to-noise ratio, and a 30% increase in visual information fidelity. As only clean slides are required for training, the pipeline can be adapted to multiple colors of ink markings or new domains, making it easy to deploy over different sets of histopathology slides. Code and trained models are available at: https://github.com/Vishwesh4/Ink-WSI.},
keywords = {_DCIS, _Histology_QA},
pubstate = {published},
tppubtype = {inproceedings}
}
Han, Wenchao; Cheung, Alison M.; Ramanathan, Vishwesh; Wang, Dan; Liu, Kela; Yaffe, Martin J.; Martel, Anne L.
Identification of molecular cell type of breast cancer on digital histopathology images using deep learning and multiplexed fluorescence imaging Proceedings Article
In: Tomaszewski, John E.; Ward, Aaron D. (Ed.): Medical Imaging 2023: Digital and Computational Pathology, pp. 5, SPIE, 2023, ISBN: 9781510660472.
Abstract | Links | BibTeX | Tags:
@inproceedings{Han2023,
title = {Identification of molecular cell type of breast cancer on digital histopathology images using deep learning and multiplexed fluorescence imaging},
author = {Wenchao Han and Alison M. Cheung and Vishwesh Ramanathan and Dan Wang and Kela Liu and Martin J. Yaffe and Anne L. Martel},
editor = {John E. Tomaszewski and Aaron D. Ward},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/12471/2654943/Identification-of-molecular-cell-type-of-breast-cancer-on-digital/10.1117/12.2654943.full},
doi = {10.1117/12.2654943},
isbn = {9781510660472},
year = {2023},
date = {2023-04-01},
booktitle = {Medical Imaging 2023: Digital and Computational Pathology},
pages = {5},
publisher = {SPIE},
abstract = {ER, PR (estrogen, progesterone receptor), and HER2 (human epidermal growth factor receptor 2) status are assessed using immunohistochemistry and reported in standard clinical workflows as they provide valuable information to help treatment planning. The protein Ki67 has also been suggested as a prognostic biomarker but is not routinely evaluated clinically due to insufficient quality assurance. The routine pathological practice usually relies on small biopsies, such that the reduction in consumption is necessary to save materials for special assays. For this purpose, we developed and validated an automatic system for segmenting and identifying the (ER, PR, HER2, Ki67) positive cells from hæmatoxylin and eosin (H&E) stained tissue sections using multiplexed immunofluorescence (MxIF) images at cellular level as a reference standard. In this study, we used 100 tissue-microarray cores sampled from 56 cases of invasive breast cancer. For ER, we extracted cell nucleus images (HoverNet) from the H&E images and assigned each cell nucleus as ER positive vs. negative based on the corresponding MxIF signals (whole cell segmentation with DeepCSeg) upon H&E to MxIF image registration. We trained a Res-Net 18 and validated the model on a separate test-set for classifying the cells as positive vs. negative for ER, and performed the same experiment for the other three markers. We obtained area-under-the- receiver-operating-characteristic-curves (AUCs) of 0.82 (ER), 0.85 (PR), 0.75 (HER2), 0.82 (Ki67) respectively. Our study demonstrates the feasibility of using machine learning to identify molecular status at cellular level directly from the H&E slides.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Chen, Jianan; Cheung, Helen M C; Karanicolas, Paul J; Coburn, Natalie G; Martel, Guillaume; Lee, Albert; Patel, Chirag; Milot, Laurent; Martel, Anne L
A radiomic biomarker for prognosis of resected colorectal cancer liver metastases generalizes across MRI contrast agents Journal Article
In: Frontiers in Oncology, vol. 13, pp. 898854, 2023, ISSN: 2234-943X.
Abstract | Links | BibTeX | Tags: colorectal cancer, contrast agents, liver, metastasis, MRI, radiomics
@article{Chen2023,
title = {A radiomic biomarker for prognosis of resected colorectal cancer liver metastases generalizes across MRI contrast agents},
author = {Jianan Chen and Helen M C Cheung and Paul J Karanicolas and Natalie G Coburn and Guillaume Martel and Albert Lee and Chirag Patel and Laurent Milot and Anne L Martel},
url = {http://www.ncbi.nlm.nih.gov/pubmed/36816920 http://www.pubmedcentral.nih.gov/articlerender.fcgi?artid=PMC9932499 https://www.frontiersin.org/articles/10.3389/fonc.2023.898854/full},
doi = {10.3389/fonc.2023.898854},
issn = {2234-943X},
year = {2023},
date = {2023-02-01},
journal = {Frontiers in Oncology},
volume = {13},
pages = {898854},
abstract = {INTRODUCTION Contrast-enhanced MRI is routinely performed as part of preoperative work-up for patients with Colorectal Cancer Liver Metastases (CRLM). Radiomic biomarkers depicting the characteristics of CRLMs in MRI have been associated with overall survival (OS) of patients, but the reproducibility and clinical applicability of these biomarkers are limited due to the variations in MRI protocols between hospitals. METHODS In this work, we propose a generalizable radiomic model for predicting OS of CRLM patients who received preoperative chemotherapy and delayed-phase contrast enhanced (DPCE) MRIs prior to hepatic resection. This retrospective two-center study included three DPCE MRI cohorts (n=221) collected between January 2006 and December 2012. A 10-minute delayed Gd-DO3A-butrol enhanced MRI discovery cohort was used to select features based on robustness across contrast agents, correlation with OS and pairwise Pearson correlation, and to train a logistic regression model that predicts 3-year OS. RESULTS The model was evaluated on a 10-minute delayed Gd-DO3A-butrol enhanced MRI validation cohort (n=121), a 20-minute delayed Gd-EOB-DTPA (n=72) cohort from the same institute, and a 5-minute delayed Gd-DTPA cohort (n=28) from an independent institute. Two features were selected: minor axis length and dependence variance. The radiomic signature model stratified high-risk and low-risk CRLM groups in the Gd-DO3Abutrol (HR = 6.29},
keywords = {colorectal cancer, contrast agents, liver, metastasis, MRI, radiomics},
pubstate = {published},
tppubtype = {article}
}
Chen, Jianan; Martel, Anne L.
Head and Neck Tumor Segmentation with 3D UNet and Survival Prediction with Multiple Instance Neural Network Proceedings Article
In: Andrearczyk, Oreiller V. (Ed.): Head and Neck Tumor Segmentation and Outcome Prediction: Third Challenge, HECKTOR 2022, Held in Conjunction with MICCAI 2022, Singapore, September 22, 2022, Proceedings, pp. 221–229, Springer, Cham, 2023.
Abstract | Links | BibTeX | Tags:
@inproceedings{Chen2023b,
title = {Head and Neck Tumor Segmentation with 3D UNet and Survival Prediction with Multiple Instance Neural Network},
author = {Jianan Chen and Anne L. Martel},
editor = {Oreiller V. Andrearczyk},
url = {https://link.springer.com/10.1007/978-3-031-27420-6_22},
doi = {10.1007/978-3-031-27420-6_22},
year = {2023},
date = {2023-01-01},
booktitle = {Head and Neck Tumor Segmentation and Outcome Prediction: Third Challenge, HECKTOR 2022, Held in Conjunction with MICCAI 2022, Singapore, September 22, 2022, Proceedings},
pages = {221–229},
publisher = {Springer, Cham},
abstract = {Head and Neck Squamous Cell Carcinoma (HNSCC) is a group of malignancies arising in the squamous cells of the head and neck region. As a group, HNSCC accounts for around 4.5% of cancer incidences and deaths worldwide. Radiotherapy is part of the standard care for HNSCC cancers and accurate delineation of tumors is important for treatment quality. Imaging features of Computed Tomography (CT) and Positron Emission Tomography (PET) scans have been shown to be correlated with survival of HNSCC patients. In this paper we present our solutions to the segmentation task and recurrence-free survival prediction task of the HECKTOR 2022 challenge. We trained a 3D UNet model for the segmentation of primary tumors and lymph node metastases based on CT images. Three sets of models with different combinations of loss functions were ensembled to generate a more robust model. The softmax output of the ensembled model was fused with co-registered PET scans and post-processed to generate our submission to task 1 of the challenge, which achieved a 0.716 aggregated Dice score on the test data. Our segmentation model outputs were used to extract radiomic features of individual tumors on test data. Clinical variables and location of the tumors were also encoded and concatenated with radiomic features as additional inputs. We trained a multiple instance neural network to aggregate features of individual tumors into patient-level representations and predict recurrence-free survival rates of patients. Our method achieved an AUC of 0.619 for task 2 on the test data (Team name: SMIAL).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sharahi, Hossein J.; Acconcia, Christopher N.; Li, Matthew; Martel, Anne; Hynynen, Kullervo
A Convolutional Neural Network for Beamforming and Image Reconstruction in Passive Cavitation Imaging Journal Article
In: Sensors, vol. 23, iss. 21, pp. 8760, 2023, ISSN: 1424-8220.
Abstract | Links | BibTeX | Tags:
@article{Sharahi2023,
title = {A Convolutional Neural Network for Beamforming and Image Reconstruction in Passive Cavitation Imaging},
author = {Hossein J. Sharahi and Christopher N. Acconcia and Matthew Li and Anne Martel and Kullervo Hynynen},
doi = {10.3390/s23218760},
issn = {1424-8220},
year = {2023},
date = {2023-01-01},
journal = {Sensors},
volume = {23},
issue = {21},
pages = {8760},
abstract = {<p>Convolutional neural networks (CNNs), initially developed for image processing applications, have recently received significant attention within the field of medical ultrasound imaging. In this study, passive cavitation imaging/mapping (PCI/PAM), which is used to map cavitation sources based on the correlation of signals across an array of receivers, is evaluated. Traditional reconstruction techniques in PCI, such as delay-and-sum, yield high spatial resolution at the cost of a substantial computational time. This results from the resource-intensive process of determining sensor weights for individual pixels in these methodologies. Consequently, the use of conventional algorithms for image reconstruction does not meet the speed requirements that are essential for real-time monitoring. Here, we show that a three-dimensional (3D) convolutional network can learn the image reconstruction algorithm for a 16×16 element matrix probe with a receive frequency ranging from 256 kHz up to 1.0 MHz. The network was trained and evaluated using simulated data representing point sources, resulting in the successful reconstruction of volumetric images with high sensitivity, especially for single isolated sources (100% in the test set). As the number of simultaneous sources increased, the network’s ability to detect weaker intensity sources diminished, although it always correctly identified the main lobe. Notably, however, network inference was remarkably fast, completing the task in approximately 178 s for a dataset comprising 650 frames of 413 volume images with signal duration of 20μs. This processing speed is roughly thirty times faster than a parallelized implementation of the traditional time exposure acoustics algorithm on the same GPU device. This would open a new door for PCI application in the real-time monitoring of ultrasound ablation.</p>},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Xu, Tony; Rozak, Matthew; Ntiri, Edward; Dorr, Adrienne; Mester, James; Stefanovic, Bojana; Martel, Anne; Goubran, Maged
Masked Image Modeling for Label-Efficient Segmentation in Two-Photon Excitation Microscopy Book Chapter
In: pp. 117-127, 2023.
@inbook{Xu2023,
title = {Masked Image Modeling for Label-Efficient Segmentation in Two-Photon Excitation Microscopy},
author = {Tony Xu and Matthew Rozak and Edward Ntiri and Adrienne Dorr and James Mester and Bojana Stefanovic and Anne Martel and Maged Goubran},
doi = {10.1007/978-3-031-44917-8_11},
year = {2023},
date = {2023-01-01},
pages = {117-127},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
2022
Han, Wenchao; Cheung, Alison M.; Yaffe, Martin J.; Martel, Anne L.
Cell segmentation for immunofluorescence multiplexed images using two-stage domain adaptation and weakly labeled data for pre-training Journal Article
In: Scientific Reports, vol. 12, no. 1, pp. 4399, 2022, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags:
@article{Han2022,
title = {Cell segmentation for immunofluorescence multiplexed images using two-stage domain adaptation and weakly labeled data for pre-training},
author = {Wenchao Han and Alison M. Cheung and Martin J. Yaffe and Anne L. Martel},
url = {https://www.nature.com/articles/s41598-022-08355-1},
doi = {10.1038/s41598-022-08355-1},
issn = {2045-2322},
year = {2022},
date = {2022-12-01},
journal = {Scientific Reports},
volume = {12},
number = {1},
pages = {4399},
abstract = {Cellular profiling with multiplexed immunofluorescence (MxIF) images can contribute to a more accurate patient stratification for immunotherapy. Accurate cell segmentation of the MxIF images is an essential step. We propose a deep learning pipeline to train a Mask R-CNN model (deep network) for cell segmentation using nuclear (DAPI) and membrane (Na + K + ATPase) stained images. We used two-stage domain adaptation by first using a weakly labeled dataset followed by fine-tuning with a manually annotated dataset. We validated our method against manual annotations on three different datasets. Our method yields comparable results to the multi-observer agreement on an ovarian cancer dataset and improves on state-of-the-art performance on a publicly available dataset of mouse pancreatic tissues. Our proposed method, using a weakly labeled dataset for pre-training, showed superior performance in all of our experiments. When using smaller training sample sizes for fine-tuning, the proposed method provided comparable performance to that obtained using much larger training sample sizes. Our results demonstrate that using two-stage domain adaptation with a weakly labeled dataset can effectively boost system performance, especially when using a small training sample size. We deployed the model as a plug-in to CellProfiler, a widely used software platform for cellular image analysis.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Kuling, Grey; Curpen, Belinda; Martel, Anne L.
BI-RADS BERT and Using Section Segmentation to Understand Radiology Reports Journal Article
In: Journal of Imaging, vol. 8, no. 5, pp. 131, 2022, ISSN: 2313-433X.
Abstract | Links | BibTeX | Tags: NLP
@article{Kuling_nlp_2022,
title = {BI-RADS BERT and Using Section Segmentation to Understand Radiology Reports},
author = {Grey Kuling and Belinda Curpen and Anne L. Martel},
url = {https://www.mdpi.com/2313-433X/8/5/131},
doi = {10.3390/jimaging8050131},
issn = {2313-433X},
year = {2022},
date = {2022-05-01},
journal = {Journal of Imaging},
volume = {8},
number = {5},
pages = {131},
abstract = {Radiology reports are one of the main forms of communication between radiologists and other clinicians, and contain important information for patient care. In order to use this information for research and automated patient care programs, it is necessary to convert the raw text into structured data suitable for analysis. State-of-the-art natural language processing (NLP) domain-specific contextual word embeddings have been shown to achieve impressive accuracy for these tasks in medicine, but have yet to be utilized for section structure segmentation. In this work, we pre-trained a contextual embedding BERT model using breast radiology reports and developed a classifier that incorporated the embedding with auxiliary global textual features in order to perform section segmentation. This model achieved 98% accuracy in segregating free-text reports, sentence by sentence, into sections of information outlined in the Breast Imaging Reporting and Data System (BI-RADS) lexicon, which is a significant improvement over the classic BERT model without auxiliary information. We then evaluated whether using section segmentation improved the downstream extraction of clinically relevant information such as modality/procedure, previous cancer, menopausal status, purpose of exam, breast density, and breast MRI background parenchymal enhancement. Using the BERT model pre-trained on breast radiology reports, combined with section segmentation, resulted in an overall accuracy of 95.9% in the field extraction tasks. This is a 17% improvement, compared to an overall accuracy of 78.9% for field extraction with models using classic BERT embeddings and not using section segmentation. Our work shows the strength of using BERT in the analysis of radiology reports and the advantages of section segmentation by identifying the key features of patient factors recorded in breast radiology reports.},
keywords = {NLP},
pubstate = {published},
tppubtype = {article}
}
Ciga, Ozan; Xu, Tony; Martel, Anne L
Self supervised contrastive learning for digital histopathology Journal Article
In: Machine Learning with Applications, vol. 7, pp. 100198, 2022, ISSN: 26668270.
Abstract | Links | BibTeX | Tags:
@article{Ciga2022,
title = {Self supervised contrastive learning for digital histopathology},
author = {Ozan Ciga and Tony Xu and Anne L Martel},
url = {https://linkinghub.elsevier.com/retrieve/pii/S2666827021000992},
doi = {10.1016/j.mlwa.2021.100198},
issn = {26668270},
year = {2022},
date = {2022-03-01},
urldate = {2022-03-01},
journal = {Machine Learning with Applications},
volume = {7},
pages = {100198},
abstract = {Unsupervised learning has been a long-standing goal of machine learning and is especially important for medical image analysis, where the learning can compensate for the scarcity of labeled datasets. A promising subclass of unsupervised learning is self-supervised learning, which aims to learn salient features using the raw input as the learning signal. In this paper, we use a contrastive self-supervised learning method Chen et al. (2020a) that achieved state-of-the-art results on natural-scene images, and apply this method to digital histopathology by collecting and training on 60 histopathology datasets without any labels. We find that combining multiple multi-organ datasets with different types of staining and resolution properties improves the quality of the learned features. Furthermore, we find drastically subsampling a dataset (e.g., using ? 1% of the available image patches) does not negatively impact the learned representations, unlike training on natural-scene images. Linear classifiers trained on top of the learned features show that networks pretrained on digital histopathology datasets perform better than ImageNet pretrained networks, boosting task performances up to 7.5% in accuracy and 8.9% in F1. These findings may also be useful when applying newer contrastive techniques to histopathology data. Pretrained PyTorch models are made publicly available at https://github.com/ozanciga/self-supervised-histopathology.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Bilocq-Lacoste, Julie; Ferre, Romuald; Kuling, Grey; Martel, Anne L.; Tyrrell, Pascal N.; Li, Siying; Wang, Guan; Curpen, Belinda
Missed Breast Cancers on MRI in High-Risk Patients: A Retrospective Case–Control Study Journal Article
In: Tomography, vol. 8, no. 1, pp. 329–340, 2022, ISSN: 2379-139X.
Abstract | Links | BibTeX | Tags:
@article{Bilocq-Lacoste2022,
title = {Missed Breast Cancers on MRI in High-Risk Patients: A Retrospective Case–Control Study},
author = {Julie Bilocq-Lacoste and Romuald Ferre and Grey Kuling and Anne L. Martel and Pascal N. Tyrrell and Siying Li and Guan Wang and Belinda Curpen},
url = {https://www.mdpi.com/2379-139X/8/1/27},
doi = {10.3390/tomography8010027},
issn = {2379-139X},
year = {2022},
date = {2022-02-01},
journal = {Tomography},
volume = {8},
number = {1},
pages = {329–340},
abstract = {Purpose: To determine if MRI features and molecular subtype influence the detectability of breast cancers on MRI in high-risk patients. Methods and Materials: Breast cancers in a high-risk population of 104 patients were diagnosed following MRI describing a BI-RADS 4–5 lesion. MRI characteristics at the time of diagnosis were compared with previous MRI, where a BI-RADS 1–2–3 lesion was described. Results: There were 77 false-negative MRIs. A total of 51 cancers were overlooked and 26 were misinterpreted. There was no association found between MRI characteristics, the receptor type and the frequency of missed cancers. The main factors for misinterpreted lesions were multiple breast lesions, prior biopsy/surgery and long-term stability. Lesions were mostly overlooked because of their small size and high background parenchymal enhancement. Among missed lesions, 50% of those with plateau kinetics on initial MRI changed for washout kinetics, and 65% of initially progressively enhancing lesions then showed plateau or washout kinetics. There were more basal-like tumours in BRCA1 carriers (50%) than in non-carriers (13%)},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reinke, Annika; Maier-Hein, Lena; Christodoulou, Evangelia; Glocker, Ben; Scholz, Patrick; Isensee, Fabian; Kleesiek, Jens; Kozubek, Michal; Reyes, Mauricio; Riegler, Michael Alexander; Wiesenfarth, Manuel; Baumgartner, Michael; Eisenmann, Matthias; Heckmann-Nötzel, Doreen; Kavur, Ali Emre; Rädsch, Tim; Tizabi, Minu D.; Acion, Laura; Antonelli, Michela; Arbel, Tal; Bakas, Spyridon; Bankhead, Peter; Benis, Arriel; Cardoso, M. Jorge; Cheplygina, Veronika; Cimini, Beth A; Collins, Gary S.; Farahani, Keyvan; Ginneken, Bram; Hamprecht, Fred A; Hashimoto, Daniel A.; Hoffman, Michael M.; Huisman, Merel; Jannin, Pierre; Kahn, Charles; Karargyris, Alexandros; Karthikesalingam, Alan; Kenngott, Hannes; Kopp-Schneider, Annette; Kreshuk, Anna; Kurc, Tahsin; Landman, Bennett A.; Litjens, Geert; Madani, Amin; Maier-Hein, Klaus; Martel, Anne; Mattson, Peter; Meijering, Erik; Menze, Bjoern; Moher, David; Moons, Karel G. M.; Müller, Henning; Nichyporuk, Brennan; Nickel, Felix; Petersen, Jens; Rajpoot, Nasir; Rieke, Nicola; Saez-Rodriguez, Julio; Sánchez, Clara I.; Shetty, Shravya; Smeden, Maarten; Sudre, Carole H.; Summers, Ronald M.; Taha, Abdel A.; Tsaftaris, Sotirios A.; Calster, Ben Van; Varoquaux, Gael; Jaeger, Paul F
Metrics Reloaded - A new recommendation framework for biomedical image analysis validation Journal Article
In: Medical Imaging with Deep Learning, 2022.
Abstract | BibTeX | Tags: Classification, Instance Segmentation, Medical Imaging, Metrics, Object Detection, Validation
@article{Reinke2022,
title = {Metrics Reloaded - A new recommendation framework for biomedical image analysis validation},
author = {Annika Reinke and Lena Maier-Hein and Evangelia Christodoulou and Ben Glocker and Patrick Scholz and Fabian Isensee and Jens Kleesiek and Michal Kozubek and Mauricio Reyes and Michael Alexander Riegler and Manuel Wiesenfarth and Michael Baumgartner and Matthias Eisenmann and Doreen Heckmann-Nötzel and Ali Emre Kavur and Tim Rädsch and Minu D. Tizabi and Laura Acion and Michela Antonelli and Tal Arbel and Spyridon Bakas and Peter Bankhead and Arriel Benis and M. Jorge Cardoso and Veronika Cheplygina and Beth A Cimini and Gary S. Collins and Keyvan Farahani and Bram Ginneken and Fred A Hamprecht and Daniel A. Hashimoto and Michael M. Hoffman and Merel Huisman and Pierre Jannin and Charles Kahn and Alexandros Karargyris and Alan Karthikesalingam and Hannes Kenngott and Annette Kopp-Schneider and Anna Kreshuk and Tahsin Kurc and Bennett A. Landman and Geert Litjens and Amin Madani and Klaus Maier-Hein and Anne Martel and Peter Mattson and Erik Meijering and Bjoern Menze and David Moher and Karel G. M. Moons and Henning Müller and Brennan Nichyporuk and Felix Nickel and Jens Petersen and Nasir Rajpoot and Nicola Rieke and Julio Saez-Rodriguez and Clara I. Sánchez and Shravya Shetty and Maarten Smeden and Carole H. Sudre and Ronald M. Summers and Abdel A. Taha and Sotirios A. Tsaftaris and Ben Van Calster and Gael Varoquaux and Paul F Jaeger},
year = {2022},
date = {2022-01-01},
journal = {Medical Imaging with Deep Learning},
abstract = {Meaningful performance assessment of biomedical image analysis algorithms depends on objective and appropriate performance metrics. There are major shortcomings in the current state of the art. Yet, so far limited attention has been paid to practical pitfalls associated when using particular metrics for image analysis tasks. Therefore, a number of international initiatives have collaborated to offer researchers with guidance and tools for selecting performance metrics in a problem-aware manner. In our proposed framework, the characteristics of the given biomedical problem are first captured in a problem fingerprint, which identifies properties related to domain interests, the target structure(s), the input datasets, and algorithm output. A problem category-specific mapping is applied in the second step to match fingerprints to metrics that reflect domain requirements. Based on input from experts from more than 60 institutions worldwide, we believe our metric recommendation framework to be useful to the MIDL community and to enhance the quality of biomedical image analysis algorithm validation.},
keywords = {Classification, Instance Segmentation, Medical Imaging, Metrics, Object Detection, Validation},
pubstate = {published},
tppubtype = {article}
}
Han, Wenchao; Cheung, Alison; Wang, Dan; Liu, Kela; Yaffe, Martin J.; Martel, Anne L.
Cell phenotyping using unsupervised clustering on multiplexed fluorescence images of breast cancer tissue specimens Proceedings Article
In: SPIE Medical Imaging 2022: Digital Pathology, pp. 12039–21, 2022.
@inproceedings{Han2022b,
title = {Cell phenotyping using unsupervised clustering on multiplexed fluorescence images of breast cancer tissue specimens},
author = {Wenchao Han and Alison Cheung and Dan Wang and Kela Liu and Martin J. Yaffe and Anne L. Martel},
year = {2022},
date = {2022-01-01},
booktitle = {SPIE Medical Imaging 2022: Digital Pathology},
pages = {12039–21},
abstract = {Cell phenotyping is an essential step for analyzing high-dimensional cellular information. Literature illustrated the usability for unsupervised algorithms for cell phenotyping by validating the results against manual gated cell populations. To extend the knowledge for identifying unknown inclusive cell populations in our database of multiplexed immunofluorescence images of breast cancer tissue microarrays, we explored two commonly used methods (PhenoGraph and FlowSOM) using reference standard of clinical relevant cancer subtypes that were manually assigned based on immunohistochemistry scoring of serial sections. Our results showed PhenoGraph yielded better results but much larger variations using different parameter settings than FlowSOM.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
McNeil, Matthew; Anil, Cem; Martel, Anne L.
Sparse capsule networks for informative representation learning in digital pathology Proceedings Article
In: SPIE Medical Imaging: Digital Pathology, pp. 12039–45, 2022.
@inproceedings{McNeil2022,
title = {Sparse capsule networks for informative representation learning in digital pathology},
author = {Matthew McNeil and Cem Anil and Anne L. Martel},
year = {2022},
date = {2022-01-01},
booktitle = {SPIE Medical Imaging: Digital Pathology},
pages = {12039–45},
abstract = {Digital pathology involves the digitization of high quality tissue biopsies on microscope slides to be used by physicians for patient diagnosis and prognosis. These slides have become exciting avenues for deep learning applications to improve care. Despite this, labels are difficult to produce and thus remain rare. In this work, we create a sparse capsule network with a spatial broadcast decoder to perform representation learning on segmented nuclei patches extracted from the BreastPathQ dataset. This was able to produce disentangled latent space for categories such as rotations, and logistic regression classifiers trained on the latent space performed well.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
Cheung, Alison; Wang, Dan; Liu, Kela; Hope, Tyna; Murray, Mayan; Ginty, Fiona; Nofech-Mozes, Sharon; Martel, Anne L; Yaffe, Martin Joel
In: Breast Cancer Research, vol. 23, no. 1, pp. 114, 2021, ISSN: 1465-542X.
@article{Cheung2021,
title = {Quantitative single-cell analysis of immunofluorescence protein multiplex images illustrates biomarker spatial heterogeneity within breast cancer subtypes},
author = {Alison Cheung and Dan Wang and Kela Liu and Tyna Hope and Mayan Murray and Fiona Ginty and Sharon Nofech-Mozes and Anne L Martel and Martin Joel Yaffe},
url = {https://breast-cancer-research.biomedcentral.com/articles/10.1186/s13058-021-01475-y},
doi = {10.1186/s13058-021-01475-y},
issn = {1465-542X},
year = {2021},
date = {2021-12-01},
urldate = {2021-12-01},
journal = {Breast Cancer Research},
volume = {23},
number = {1},
pages = {114},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ciga, Ozan; Xu, Tony; Nofech-Mozes, Sharon; Noy, Shawna; Lu, Fang-i; Martel, Anne L
Overcoming the limitations of patch-based learning to detect cancer in whole slide images Journal Article
In: Scientific Reports, vol. 11, no. 1, pp. 8894, 2021, ISSN: 2045-2322.
Abstract | Links | BibTeX | Tags:
@article{Ciga2021a,
title = {Overcoming the limitations of patch-based learning to detect cancer in whole slide images},
author = {Ozan Ciga and Tony Xu and Sharon Nofech-Mozes and Shawna Noy and Fang-i Lu and Anne L Martel},
url = {http://arxiv.org/abs/2012.00617 http://www.nature.com/articles/s41598-021-88494-z},
doi = {10.1038/s41598-021-88494-z},
issn = {2045-2322},
year = {2021},
date = {2021-12-01},
journal = {Scientific Reports},
volume = {11},
number = {1},
pages = {8894},
abstract = {Whole slide images (WSIs) pose unique challenges when training deep learning models. They are very large which makes it necessary to break each image down into smaller patches for analysis, image features have to be extracted at multiple scales in order to capture both detail and context, and extreme class imbalances may exist. Significant progress has been made in the analysis of these images, thanks largely due to the availability of public annotated datasets. We postulate, however, that even if a method scores well on a challenge task, this success may not translate to good performance in a more clinically relevant workflow. Many datasets consist of image patches which may suffer from data curation bias; other datasets are only labelled at the whole slide level and the lack of annotations across an image may mask erroneous local predictions so long as the final decision is correct. In this paper, we outline the differences between patch or slide-level classification versus methods that need to localize or segment cancer accurately across the whole slide, and we experimentally verify that best practices differ in both cases. We apply a binary cancer detection network on post neoadjuvant therapy breast cancer WSIs to find the tumor bed outlining the extent of cancer, a task which requires sensitivity and precision across the whole slide. We extensively study multiple design choices and their effects on the outcome, including architectures and augmentations. We propose a negative data sampling strategy, which drastically reduces the false positive rate (25% of false positives versus 62.5%) and improves each metric pertinent to our problem, with a 53% reduction in the error of tumor extent. Our results indicate classification performances of image patches versus WSIs are inversely related when the same negative data sampling strategy is used. Specifically, injection of negatives into training data for image patch classification degrades the performance, whereas the performance is improved for slide and pixel-level WSI classification tasks. Furthermore, we find applying extensive augmentations helps more in WSI-based tasks compared to patch-level image classification.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Chen, Jianan; Cheung, Helen M. C.; Milot, Laurent; Martel, Anne L.
AMINN: Autoencoder-Based Multiple Instance Neural Network Improves Outcome Prediction in Multifocal Liver Metastases Book Section
In: MICCAI2021, pp. 752–761, 2021.
Abstract | Links | BibTeX | Tags:
@incollection{Chen2021,
title = {AMINN: Autoencoder-Based Multiple Instance Neural Network Improves Outcome Prediction in Multifocal Liver Metastases},
author = {Jianan Chen and Helen M. C. Cheung and Laurent Milot and Anne L. Martel},
url = {http://arxiv.org/abs/2012.06875 https://link.springer.com/chapter/10.1007/978-3-030-87240-3_72#chapter-info https://link.springer.com/10.1007/978-3-030-87240-3_72},
doi = {10.1007/978-3-030-87240-3_72},
year = {2021},
date = {2021-12-01},
booktitle = {MICCAI2021},
pages = {752–761},
abstract = {Colorectal cancer is one of the most common and lethal cancers and colorectal cancer liver metastases (CRLM) is the major cause of death in patients with colorectal cancer. Multifocality occurs frequently in CRLM, but is relatively unexplored in CRLM outcome prediction. Most existing clinical and imaging biomarkers do not take the imaging features of all multifocal lesions into account. In this paper, we present an end-to-end autoencoder-based multiple instance neural network (AMINN) for the prediction of survival outcomes in multifocal CRLM patients using radiomic features extracted from contrast-enhanced MRIs. Specifically, we jointly train an autoencoder to reconstruct input features and a multiple instance network to make predictions by aggregating information from all tumour lesions of a patient. In addition, we incorporate a two-step normalization technique to improve the training of deep neural networks, built on the observation that the distributions of radiomic features are almost always severely skewed. Experimental results empirically validated our hypothesis that incorporating imaging features of all lesions improves outcome prediction for multifocal cancer. The proposed ADMINN framework achieved an area under the ROC curve (AUC) of 0.70, which is 19.5% higher than baseline methods. We built a risk score based on the outputs of our network and compared it to other clinical and imaging biomarkers. Our risk score is the only one that achieved statistical significance in univariate and multivariate cox proportional hazard modeling in our cohort of multifocal CRLM patients. The effectiveness of incorporating all lesions and applying two-step normalization is demonstrated by a series of ablation studies. Our code will be released after the peer-review process.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Srinidhi, Chetan L; Martel, Anne L
Improving Self-supervised Learning with Hardness-aware Dynamic Curriculum Learning: An Application to Digital Pathology Journal Article
In: ICCV 2021 CDpath workshop, 2021.
Abstract | Links | BibTeX | Tags:
@article{Srinidhi2021,
title = {Improving Self-supervised Learning with Hardness-aware Dynamic Curriculum Learning: An Application to Digital Pathology},
author = {Chetan L Srinidhi and Anne L Martel},
url = {http://arxiv.org/abs/2108.07183},
year = {2021},
date = {2021-08-01},
journal = {ICCV 2021 CDpath workshop},
abstract = {Self-supervised learning (SSL) has recently shown tremendous potential to learn generic visual representations useful for many image analysis tasks. Despite their notable success, the existing SSL methods fail to generalize to downstream tasks when the number of labeled training instances is small or if the domain shift between the transfer domains is significant. In this paper, we attempt to improve self-supervised pretrained representations through the lens of curriculum learning by proposing a hardness-aware dynamic curriculum learning (HaDCL) approach. To improve the robustness and generalizability of SSL, we dynamically leverage progressive harder examples via easy-to-hard and hard-to-very-hard samples during mini-batch downstream fine-tuning. We discover that by progressive stage-wise curriculum learning, the pretrained representations are significantly enhanced and adaptable to both in-domain and out-of-domain distribution data. We performed extensive validation on three histology benchmark datasets on both patch-wise and slide-level classification problems. Our curriculum based fine-tuning yields a significant improvement over standard fine-tuning, with a minimum improvement in area-under-the-curve (AUC) score of 1.7% and 2.2% on in-domain and out-of-domain distribution data, respectively. Further, we empirically show that our approach is more generic and adaptable to any SSL methods and does not impose any additional overhead complexity. Besides, we also outline the role of patch-based versus slide-based curriculum learning in histopathology to provide practical insights into the success of curriculum based fine-tuning of SSL methods. Code is released at https://github.com/srinidhiPY/ICCV-CDPATH2021-ID-8},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Ma, Jun; Chen, Jianan; Ng, Matthew; Huang, Rui; Li, Yu; Li, Chen; Yang, Xiaoping; Martel, Anne L
Loss odyssey in medical image segmentation Journal Article
In: Medical Image Analysis, vol. 71, pp. 102035, 2021, ISSN: 13618415.
Links | BibTeX | Tags: convolutional neural networks, loss function, segmentation
@article{Ma2021,
title = {Loss odyssey in medical image segmentation},
author = {Jun Ma and Jianan Chen and Matthew Ng and Rui Huang and Yu Li and Chen Li and Xiaoping Yang and Anne L Martel},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1361841521000815},
doi = {10.1016/j.media.2021.102035},
issn = {13618415},
year = {2021},
date = {2021-07-01},
journal = {Medical Image Analysis},
volume = {71},
pages = {102035},
keywords = {convolutional neural networks, loss function, segmentation},
pubstate = {published},
tppubtype = {article}
}
Petrick, Nicholas; Akbar, Shazia; Cha, Kenny H.; Nofech-Mozes, Sharon; Sahiner, Berkman; Gavrielides, Marios A.; Kalpathy-Cramer, Jayashree; Drukker, Karen; Martel, Anne L.; Group, BreastPathQ Challenge
In: Journal of Medical Imaging, vol. 8, no. 03, pp. 034501, 2021, ISSN: 2329-4302.
@article{Petrick2021,
title = {SPIE-AAPM-NCI BreastPathQ challenge: an image analysis challenge for quantitative tumor cellularity assessment in breast cancer histology images following neoadjuvant treatment},
author = {Nicholas Petrick and Shazia Akbar and Kenny H. Cha and Sharon Nofech-Mozes and Berkman Sahiner and Marios A. Gavrielides and Jayashree Kalpathy-Cramer and Karen Drukker and Anne L. Martel and BreastPathQ Challenge Group},
url = {https://www.spiedigitallibrary.org/journals/journal-of-medical-imaging/volume-8/issue-03/034501/SPIE-AAPM-NCI-BreastPathQ-challenge–an-image-analysis-challenge/10.1117/1.JMI.8.3.034501.full},
doi = {10.1117/1.JMI.8.3.034501},
issn = {2329-4302},
year = {2021},
date = {2021-05-01},
journal = {Journal of Medical Imaging},
volume = {8},
number = {03},
pages = {034501},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Reinke, Annika; Eisenmann, Matthias; Tizabi, Minu Dietlinde; Sudre, Carole H.; Rädsch, Tim; Antonelli, Michela; Arbel, Tal; Bakas, Spyridon; Cardoso, M. Jorge; Cheplygina, Veronika; Farahani, Keyvan; Glocker, Ben; Heckmann-Nötzel, Doreen; Isensee, Fabian; Jannin, Pierre; Kahn, Charles; Kleesiek, Jens; Kurc, Tahsin; Kozubek, Michal; Landman, Bennett A.; Litjens, Geert; Maier-Hein, Klaus; Martel, Anne L; Menze, Bjoern; Müller, Henning; Petersen, Jens; Reyes, Mauricio; Rieke, Nicola; Stieltjes, Bram; Summers, Ronald M.; Tsaftaris, Sotirios A.; Ginneken, Bram; Kopp-Schneider, Annette; Jäger, Paul; Maier-Hein, Lena
Common limitations of performance metrics in biomedical image analysis Proceedings Article
In: MIDL 2021, 2021.
Abstract | Links | BibTeX | Tags: Challenges, Metrics, segmentation, Validation
@inproceedings{Reinke2021,
title = {Common limitations of performance metrics in biomedical image analysis},
author = {Annika Reinke and Matthias Eisenmann and Minu Dietlinde Tizabi and Carole H. Sudre and Tim Rädsch and Michela Antonelli and Tal Arbel and Spyridon Bakas and M. Jorge Cardoso and Veronika Cheplygina and Keyvan Farahani and Ben Glocker and Doreen Heckmann-Nötzel and Fabian Isensee and Pierre Jannin and Charles Kahn and Jens Kleesiek and Tahsin Kurc and Michal Kozubek and Bennett A. Landman and Geert Litjens and Klaus Maier-Hein and Anne L Martel and Bjoern Menze and Henning Müller and Jens Petersen and Mauricio Reyes and Nicola Rieke and Bram Stieltjes and Ronald M. Summers and Sotirios A. Tsaftaris and Bram Ginneken and Annette Kopp-Schneider and Paul Jäger and Lena Maier-Hein},
url = {https://arxiv.org/abs/2104.05642},
year = {2021},
date = {2021-04-01},
urldate = {2021-04-01},
booktitle = {MIDL 2021},
abstract = {While the importance of automatic biomedical image analysis is increasing at an enormous pace, recent meta-research revealed major flaws with respect to algorithm validation. Performance metrics are key for objective, transparent and comparative performance assessment , but little attention has been given to their pitfalls. Under the umbrella of the Helmholtz Imaging Platform (HIP), three international initiatives-the MICCAI Society's challenge working group, the Biomedical Image Analysis Challenges (BIAS) initiative, as well as the benchmarking working group of the MONAI framework-have now joined forces with the mission to generate best practice recommendations with respect to metrics in medical image analysis. Consensus building is achieved via a Delphi process, a popular tool for integrating opinions in large international consortia. The current document serves as a teaser for the results presentation and focuses on the pitfalls of the most commonly used metric in biomedical image analysis, the Dice Similarity Coefficient (DSC), in the categories of (1) mathematical properties/edge cases, (2) task/metric fit and (3) metric aggregation. Being compiled by a large group of experts from more than 30 institutes worldwide, we believe that our framework could be of general interest to the MIDL community and will improve the quality of biomedical image analysis algorithm validation.},
keywords = {Challenges, Metrics, segmentation, Validation},
pubstate = {published},
tppubtype = {inproceedings}
}
Hesse, Linde S; Kuling, Grey; Veta, Mitko; Martel, Anne L.
Intensity Augmentation to Improve Generalizability of Breast Segmentation Across Different MRI Scan Protocols Journal Article
In: IEEE Transactions on Biomedical Engineering, vol. 68, no. 3, pp. 759–770, 2021, ISSN: 0018-9294.
Links | BibTeX | Tags: _breast_segmentation
@article{Hesse2020,
title = {Intensity Augmentation to Improve Generalizability of Breast Segmentation Across Different MRI Scan Protocols},
author = {Linde S Hesse and Grey Kuling and Mitko Veta and Anne L. Martel},
url = {https://ieeexplore.ieee.org/document/9166708/},
doi = {10.1109/TBME.2020.3016602},
issn = {0018-9294},
year = {2021},
date = {2021-03-01},
urldate = {2021-03-01},
journal = {IEEE Transactions on Biomedical Engineering},
volume = {68},
number = {3},
pages = {759–770},
keywords = {_breast_segmentation},
pubstate = {published},
tppubtype = {article}
}
Ciga, Ozan; Martel, Anne L.
Learning to segment images with classification labels Journal Article
In: Medical Image Analysis, vol. 68, pp. 101912, 2021, ISSN: 13618415.
Abstract | Links | BibTeX | Tags: digital histopathology, image segmentation, weakly supervised learning, whole slide image
@article{Ciga2021b,
title = {Learning to segment images with classification labels},
author = {Ozan Ciga and Anne L. Martel},
url = {https://linkinghub.elsevier.com/retrieve/pii/S1361841520302760},
doi = {10.1016/j.media.2020.101912},
issn = {13618415},
year = {2021},
date = {2021-02-01},
journal = {Medical Image Analysis},
volume = {68},
pages = {101912},
abstract = {Two of the most common tasks in medical imaging are classification and segmentation. Either task requires labeled data annotated by experts, which is scarce and expensive to collect. Annotating data for segmentation is generally considered to be more laborious as the annotator has to draw around the boundaries of regions of interest, as opposed to assigning image patches a class label. Furthermore, in tasks such as breast cancer histopathology, any realistic clinical application often includes working with whole slide images, whereas most publicly available training data are in the form of image patches, which are given a class label. We propose an architecture that can alleviate the requirements for segmentation-level ground truth by making use of image-level labels to reduce the amount of time spent on data curation. In addition, this architecture can help unlock the potential of previously acquired image-level datasets on segmentation tasks by annotating a small number of regions of interest. In our experiments, we show using only one segmentation-level annotation per class, we can achieve performance comparable to a fully annotated dataset.},
keywords = {digital histopathology, image segmentation, weakly supervised learning, whole slide image},
pubstate = {published},
tppubtype = {article}
}
Srinidhi, Chetan L; Martel, Anne L
Improving Self-supervised Learning with Hardness-aware Dynamic Curriculum Learning : An Application to Digital Pathology Proceedings Article
In: 2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW), 2021.
@inproceedings{Srinidhi2021a,
title = {Improving Self-supervised Learning with Hardness-aware Dynamic Curriculum Learning : An Application to Digital Pathology},
author = {Chetan L Srinidhi and Anne L Martel},
year = {2021},
date = {2021-01-01},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)},
abstract = {Self-supervised learning (SSL) has recently shown tremendous potential to learn generic visual representations useful for many image analysis tasks. Despite their notable success, the existing SSL methods fail to generalize to downstream tasks when the number of labeled training instances is small or if the domain shift between the transfer domains is significant. In this paper, we attempt to improve self-supervised pretrained representations through the lens of curriculum learning by proposing a hardness-aware dynamic curriculum learning (HaDCL) approach. To improve the robustness and generalizability of SSL, we dynamically leverage progressive harder examples via easy-to-hard and hard-to-very-hard samples during mini-batch downstream fine-tuning. We discover that by progressive stage-wise curriculum learning, the pretrained representations are significantly enhanced and adaptable to both in-domain and out-of-domain distribution data. We performed extensive validation on three histology benchmark datasets on both patch-wise and slide-level classification problems. Our curriculum based fine-tuning yields a significant improvement over standard fine-tuning, with a minimum improvement in area-under-the-curve (AUC) score of 1.7% and 2.2% on in-domain and out-of-domain distribution data, respectively. Further, we empirically show that our approach is more generic and adaptable to any SSL methods and does not impose any additional overhead complexity. Besides, we also outline the role of patch-based versus slide-based curriculum learning in histopathology to provide practical insights into the success of curriculum based fine-tuning of SSL methods.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2020
Maier-Hein, Lena; Reinke, Annika; Kozubek, Michal; Martel, Anne L.; Arbel, Tal; Eisenmann, Matthias; Hanbury, Allan; Jannin, Pierre; Müller, Henning; Onogur, Sinan; Saez-Rodriguez, Julio; Ginneken, Bram; Kopp-Schneider, Annette; Landman, Bennett A
BIAS: Transparent reporting of biomedical image analysis challenges Journal Article
In: Medical Image Analysis, vol. 66, pp. 101796, 2020, ISSN: 13618423.
Abstract | Links | BibTeX | Tags: Biomedical challenges
@article{Maier-Hein2020,
title = {BIAS: Transparent reporting of biomedical image analysis challenges},
author = {Lena Maier-Hein and Annika Reinke and Michal Kozubek and Anne L. Martel and Tal Arbel and Matthias Eisenmann and Allan Hanbury and Pierre Jannin and Henning Müller and Sinan Onogur and Julio Saez-Rodriguez and Bram Ginneken and Annette Kopp-Schneider and Bennett A Landman},
url = {https://doi.org/10.1016/j.media.2020.101796 https://linkinghub.elsevier.com/retrieve/pii/S1361841520301602},
doi = {10.1016/j.media.2020.101796},
issn = {13618423},
year = {2020},
date = {2020-12-01},
urldate = {2020-12-01},
journal = {Medical Image Analysis},
volume = {66},
pages = {101796},
abstract = {The number of biomedical image analysis challenges organized per year is steadily increasing. These international competitions have the purpose of benchmarking algorithms on common data sets, typically to identify the best method for a given problem. Recent research, however, revealed that common practice related to challenge reporting does not allow for adequate interpretation and reproducibility of results. To address the discrepancy between the impact of challenges and the quality (control), the Biomedical Image Analysis ChallengeS (BIAS) initiative developed a set of recommendations for the reporting of challenges. The BIAS statement aims to improve the transparency of the reporting of a biomedical image analysis challenge regardless of field of application, image modality or task category assessed. This article describes how the BIAS statement was developed and presents a checklist which authors of biomedical image analysis challenges are encouraged to include in their submission when giving a paper on a challenge into review. The purpose of the checklist is to standardize and facilitate the review process and raise interpretability and reproducibility of challenge results by making relevant information explicit.},
keywords = {Biomedical challenges},
pubstate = {published},
tppubtype = {article}
}
Kuling, Grey; Curpen, Belinda; Martel, Anne L.
Domain adapted breast tissue segmentation in magnetic resonance imaging Proceedings Article
In: Ongeval, Chantal Van; Marshall, Nicholas; Bosmans, Hilde (Ed.): 15th International Workshop on Breast Imaging (IWBI2020), pp. 61, SPIE, 2020, ISBN: 9781510638310.
Abstract | Links | BibTeX | Tags: _breast_segmentation
@inproceedings{Kuling2020,
title = {Domain adapted breast tissue segmentation in magnetic resonance imaging},
author = {Grey Kuling and Belinda Curpen and Anne L. Martel},
editor = {Chantal Van Ongeval and Nicholas Marshall and Hilde Bosmans},
url = {https://doi.org/10.1117/12.2564131 https://www.spiedigitallibrary.org/conference-proceedings-of-spie/11513/2564131/Domain-adapted-breast-tissue-segmentation-in-magnetic-resonance-imaging/10.1117/12.2564131.full},
doi = {10.1117/12.2564131},
isbn = {9781510638310},
year = {2020},
date = {2020-05-01},
urldate = {2020-05-01},
booktitle = {15th International Workshop on Breast Imaging (IWBI2020)},
pages = {61},
publisher = {SPIE},
abstract = {For women of high risk ($>25%$ lifetime risk) for developing Breast Cancer combination screening of mammography and magnetic resonance imaging (MRI) is recommended. Risk stratification is based on current modeling tools for risk assessment. However, adding additional radiological features may improve AUC. To validate tissue features in MRI requires large scale epidemiological studies across health centres. Therefore it is essential to have a robust, fully automated segmentation method. This presents a challenge of imaging domain adaptation in deep learning. Here, we present a breast segmentation pipeline that uses multiple UNet segmentation models trained on different image types. We use Monte-Carlo Dropout to measure each model's uncertainty allowing the most appropriate model to be selected when the image domain is unknown. We show our pipeline achieves a dice similarity average of 0.78 for fibroglandular tissue segmentation and has good adherence to radiologist assessment.},
keywords = {_breast_segmentation},
pubstate = {published},
tppubtype = {inproceedings}
}
Klein, Geoff; Martel, Anne; Sahgal, Arjun; Whyne, Cari; Hardisty, Michael
Metastatic Vertebrae Segmentation for Use in a Clinical Pipeline Workshop
Computational Methods and Clinical Applications for Spine Imaging, vol. 11963, Lecture Notes in Computer Science International Workshop and Challenge on Computational Methods and Clinical Applications for Spine Imaging Springer, 2020.
Abstract | Links | BibTeX | Tags: Medical Imaging
@workshop{Klein2020,
title = {Metastatic Vertebrae Segmentation for Use in a Clinical Pipeline},
author = {Geoff Klein and Anne Martel and Arjun Sahgal and Cari Whyne and Michael Hardisty},
doi = {https://doi.org/10.1007/978-3-030-39752-4_2},
year = {2020},
date = {2020-02-01},
urldate = {2020-02-01},
booktitle = {Computational Methods and Clinical Applications for Spine Imaging},
volume = {11963},
pages = {15-28},
publisher = {Springer},
organization = {International Workshop and Challenge on Computational Methods and Clinical Applications for Spine Imaging},
series = {Lecture Notes in Computer Science},
abstract = {Vertebral metastases are common complications of primary cancers that alter bone architecture potentially leading to vertebral fracture and neurological compromise. Quantitative measures from vertebral body segmentation from Computed Tomography (CT) scans have been useful for assessing fracture risk predictions and vertebrae stability. Previous segmentation methods used to generate these metrics were slow and required manual intervention, limiting their utility. More accurate, robust and fast methods are needed for clinical assessments. This investigation proposes a 3D U-Net Convolutional Neural Network (CNN) to accurately segment individual trabecular centrum from metastatically compromised vertebrae of interest in CT imaging. Using different augmentation techniques achieved good performance (DSC = 0.904 ± 0.056) with the segmentation model remaining accurate with simulated lower image quality, and translation of the vertebrae within the image, especially compared to when no augmentations were used (DSC = 0.774 ± 0.188). Integration of this method into a clinical tool will allow accurate and robust quantitative assessment of mechanical stability, aiding clinical decision making to improve patient care.},
keywords = {Medical Imaging},
pubstate = {published},
tppubtype = {workshop}
}
Chen, Jianan; Amemiya, Yutaka; Kuling, Grey; Fashandi, Homa; Yerofeyeva, Yulia; Hussein, Heba; Slodkowska, Elzbieta; Ginty, Fiona; Seth, Arun; Yaffe, Martin; Martel, Anne L.
Cancer Research, vol. 80, Abstracts: 2019 San Antonio Breast Cancer Symposium; December 10-14, 2019; San Antonio, Texas American Association for Cancer Research , 2020.
Abstract | Links | BibTeX | Tags: Medical Imaging
@conference{Chen2020,
title = {Texture heterogeneity of breast tumour in magnetic resonance imaging can be explained by differentially regulated genes},
author = {Jianan Chen and Yutaka Amemiya and Grey Kuling and Homa Fashandi and Yulia Yerofeyeva and Heba Hussein and Elzbieta Slodkowska and Fiona Ginty and Arun Seth and Martin Yaffe and Anne L. Martel},
doi = {10.1158/1538-7445.SABCS19-P6-10-12},
year = {2020},
date = {2020-02-01},
urldate = {2020-02-01},
booktitle = {Cancer Research},
volume = {80},
pages = {6-10-12},
publisher = {American Association for Cancer Research },
organization = {Abstracts: 2019 San Antonio Breast Cancer Symposium; December 10-14, 2019; San Antonio, Texas},
abstract = {Background: Magnetic resonance imaging (MRI) and molecular profiling of tumour tissues have become standard techniques to study breast cancer in recent years. However, despite the myriad imaging and genetic subtypes that have been identified, the underlying biological mechanisms of MRI features are seldom explained, and differentially regulated genes are rarely linked to the phenotypic appearance of tumours. In this study, we propose to fill this gap in knowledge by investigating the unbiased correlations between MRI phenotypes and differential gene expressions in breast cancer.
Methods: Patients diagnosed during 2002-15 with invasive breast cancer who went through surgery were retrospectively reviewed for magnetic resonance imaging (MRI) and genomics analysis. In total, we collected dynamic contrast-enhanced subtraction MRI and RNA sequencing results of surgical specimens from a cohort of 56 patients. Of these, 31 patients (aged 33 to 72 years) met our inclusion criteria. Tumour lesion segmentation was performed by a radiologist who has 10 years of experience. We extracted features that quantitatively describe tumour appearance from the segmented lesions using pyradiomics (v2.0.0). We then grouped the tumours into two imaging subtypes using an unsupervised clustering approach (SIMLR, v1.10.0). To probe the underlying biological mechanisms behind the difference in tumour appearance, we performed differential expression analysis (edgeR, v3.26.5) and pathway enrichment analysis (g:profiler) between the two imaging subtypes. Multiple testing correction was conducted with Benjamini-Hochberg correction using a false discovery rate of 0.05.
Results: We classified the breast tumours from our cohort into two imaging subtypes that have distinct levels of heterogeneity in texture (p=0.004). We found a list of genes that were significantly differentially expressed between the heterogenous (n=20) and homogenous (n=11) subtypes (Table 1), and their associated biological pathways. We found that the pathways controlling cell growth (p=0.022), cell migration and invasion (p=0.023), estrogen regulation (p=0.022) and DNA damage repair (p=0.015) mechanisms may have contributed to increased heterogeneity in tumour presentation when imaged with MRI.
Conclusion: The underlying biological mechanisms affecting breast MRI texture can be investigated by linking tumour appearance to gene expression profiling. Our results suggest that texture heterogeneity in breast MRI could be linked to a number of differentially expressed genes that may be further investigated as a biomarker of cancer risk assessment or recurrence. Further studies with a larger cohort will be conducted to validate and extend these results.},
keywords = {Medical Imaging},
pubstate = {published},
tppubtype = {conference}
}
Methods: Patients diagnosed during 2002-15 with invasive breast cancer who went through surgery were retrospectively reviewed for magnetic resonance imaging (MRI) and genomics analysis. In total, we collected dynamic contrast-enhanced subtraction MRI and RNA sequencing results of surgical specimens from a cohort of 56 patients. Of these, 31 patients (aged 33 to 72 years) met our inclusion criteria. Tumour lesion segmentation was performed by a radiologist who has 10 years of experience. We extracted features that quantitatively describe tumour appearance from the segmented lesions using pyradiomics (v2.0.0). We then grouped the tumours into two imaging subtypes using an unsupervised clustering approach (SIMLR, v1.10.0). To probe the underlying biological mechanisms behind the difference in tumour appearance, we performed differential expression analysis (edgeR, v3.26.5) and pathway enrichment analysis (g:profiler) between the two imaging subtypes. Multiple testing correction was conducted with Benjamini-Hochberg correction using a false discovery rate of 0.05.
Results: We classified the breast tumours from our cohort into two imaging subtypes that have distinct levels of heterogeneity in texture (p=0.004). We found a list of genes that were significantly differentially expressed between the heterogenous (n=20) and homogenous (n=11) subtypes (Table 1), and their associated biological pathways. We found that the pathways controlling cell growth (p=0.022), cell migration and invasion (p=0.023), estrogen regulation (p=0.022) and DNA damage repair (p=0.015) mechanisms may have contributed to increased heterogeneity in tumour presentation when imaged with MRI.
Conclusion: The underlying biological mechanisms affecting breast MRI texture can be investigated by linking tumour appearance to gene expression profiling. Our results suggest that texture heterogeneity in breast MRI could be linked to a number of differentially expressed genes that may be further investigated as a biomarker of cancer risk assessment or recurrence. Further studies with a larger cohort will be conducted to validate and extend these results.
Lin, Peter; Martel, Anne; Camilleri, Susan; Pop, Mihaela
Co-registered Cardiac ex vivo DT Images and Histological Images for Fibrosis Quantification Workshop
International Workshop on Statistical Atlases and Computational Models of the Heart (STACOM 2019) - MICCAI 2019, Springer, 2020.
Abstract | BibTeX | Tags: Medical Imaging
@workshop{Lin2020,
title = {Co-registered Cardiac ex vivo DT Images and Histological Images for Fibrosis Quantification},
author = {Lin, Peter and Martel, Anne and Camilleri, Susan and Pop, Mihaela},
year = {2020},
date = {2020-01-23},
booktitle = {International Workshop on Statistical Atlases and Computational Models of the Heart (STACOM 2019) - MICCAI 2019},
pages = {3-11},
publisher = {Springer},
abstract = {Cardiac magnetic resonance (MR) imaging can detect infarct scar, a major cause of lethal arrhythmia and heart failure. Here, we describe a robust image processing pipeline developed to quantitatively analyze collagen density and features in a pig model of chronic fibrosis. Specifically, we use ex vivo diffusion tensor imaging (DTI) ( 0.6×0.6×1.2 mm resolution) to calculate fractional anisotropy maps in: healthy tissue, infarct core (IC) and gray zone (GZ) (i.e., a mixture of viable myocytes and collagen fibrils bordering IC and healthy zones). The 3 zones were validated using collagen-sensitive histological slides co-registered with MR images. Our results showed a significant ( p<0.05 ) reduction in the mean FA values of GZ (by 17%) and IC (by 44%) compared to healthy areas; however, we found that these differences do not depend on the location of occluded coronary artery (LAD vs LCX). This work validates the utility of DTI-MR imaging for fibrosis quantification, with histological validation.},
keywords = {Medical Imaging},
pubstate = {published},
tppubtype = {workshop}
}
Mouraviev, Andrei; Detsky, Jay; Sahgal, Arjun; Ruschin, Mark; Lee, Young K; Karam, Irene; Heyn, Chris; Stanisz, Greg J; Martel, Anne L
Use of Radiomics for the Prediction of Local Control of Brain Metastases After Stereotactic Radiosurgery Journal Article
In: Neuro-Oncology, 2020.
Abstract | BibTeX | Tags: Brain
@article{Mouraviev2020,
title = {Use of Radiomics for the Prediction of Local Control of Brain Metastases After Stereotactic Radiosurgery},
author = {Mouraviev, Andrei and Detsky, Jay and Sahgal, Arjun and Ruschin, Mark and Lee, Young K and Karam, Irene and Heyn, Chris and Stanisz, Greg J and Martel, Anne L},
year = {2020},
date = {2020-01-20},
journal = {Neuro-Oncology},
abstract = {Background
Local response prediction for brain metastases (BM) after stereotactic radiosurgery (SRS) is challenging, particularly for smaller BM, as existing criteria are based solely on unidimensional measurements. This investigation sought to determine whether radiomic features provide additional value to routinely available clinical and dosimetric variables to predict local recurrence following SRS.
Methods
408 BM in 87 patients treated with SRS were analyzed. A total of 440 radiomic features were extracted from the tumor core, and the peritumoral regions, using the baseline pre-treatment volumetric post-contrast T1 (T1c) and volumetric T2 fluid-attenuated inversion recovery (FLAIR) MRI sequences. Local tumor progression was determined based on RANO-BM criteria, with a maximum axial diameter growth of >20% on the follow-up T1c indicating local failure. The top radiomic features were determined based on resampled Random Forest (RF) feature importance. An RF classifier was trained using each set of features and evaluated using the area under the receiver operating characteristic curve (AUC).
Results
The addition of any one of the top ten radiomic features to the set of clinical features resulted in a statistically significant (p<0.001) increase in the AUC. An optimized combination of radiomic and clinical features resulted in a 19% higher resampled AUC (mean = 0.793, 95% C.I. = 0.792-0.795) than clinical features alone (0.669, 0.668-0.671).
Conclusions
The increase in AUC of the RF classifier, after incorporating radiomic features, suggests that quantitative characterization of tumor appearance on pretreatment T1c and FLAIR adds value to known clinical and dosimetric variables for predicting local failure.},
keywords = {Brain},
pubstate = {published},
tppubtype = {article}
}
Local response prediction for brain metastases (BM) after stereotactic radiosurgery (SRS) is challenging, particularly for smaller BM, as existing criteria are based solely on unidimensional measurements. This investigation sought to determine whether radiomic features provide additional value to routinely available clinical and dosimetric variables to predict local recurrence following SRS.
Methods
408 BM in 87 patients treated with SRS were analyzed. A total of 440 radiomic features were extracted from the tumor core, and the peritumoral regions, using the baseline pre-treatment volumetric post-contrast T1 (T1c) and volumetric T2 fluid-attenuated inversion recovery (FLAIR) MRI sequences. Local tumor progression was determined based on RANO-BM criteria, with a maximum axial diameter growth of >20% on the follow-up T1c indicating local failure. The top radiomic features were determined based on resampled Random Forest (RF) feature importance. An RF classifier was trained using each set of features and evaluated using the area under the receiver operating characteristic curve (AUC).
Results
The addition of any one of the top ten radiomic features to the set of clinical features resulted in a statistically significant (p<0.001) increase in the AUC. An optimized combination of radiomic and clinical features resulted in a 19% higher resampled AUC (mean = 0.793, 95% C.I. = 0.792-0.795) than clinical features alone (0.669, 0.668-0.671).
Conclusions
The increase in AUC of the RF classifier, after incorporating radiomic features, suggests that quantitative characterization of tumor appearance on pretreatment T1c and FLAIR adds value to known clinical and dosimetric variables for predicting local failure.
Lin, Peter; Martel, Anne; Camilleri, Susan; Pop, Mihaela
Co-registered Cardiac ex vivo DT Images and Histological Images for Fibrosis Quantification Book Section
In: 10th Workshop on Statistical Atlases and Computational Modelling of the Heart, Shenzen, China, pp. 3–11, 2020.
Abstract | Links | BibTeX | Tags:
@incollection{Lin2020b,
title = {Co-registered Cardiac ex vivo DT Images and Histological Images for Fibrosis Quantification},
author = {Peter Lin and Anne Martel and Susan Camilleri and Mihaela Pop},
url = {http://link.springer.com/10.1007/978-3-030-39074-7_1},
doi = {10.1007/978-3-030-39074-7_1},
year = {2020},
date = {2020-01-01},
booktitle = {10th Workshop on Statistical Atlases and Computational Modelling of the Heart, Shenzen, China},
pages = {3–11},
abstract = {Cardiac magnetic resonance (MR) imaging can detect infarct scar, a major cause of lethal arrhythmia and heart failure. Here, we describe a robust image processing pipeline developed to quantitatively analyze collagen density and features in a pig model of chronic fibrosis. Specifically, we use ex vivo diffusion tensor imaging (DTI) ( 0.6×0.6×1.2 mm resolution) to calculate fractional anisotropy maps in: healthy tissue, infarct core (IC) and gray zone (GZ) (i.e., a mixture of viable myocytes and collagen fibrils bordering IC and healthy zones). The 3 zones were validated using collagen-sensitive histological slides co-registered with MR images. Our results showed a significant ( p<0.05 ) reduction in the mean FA values of GZ (by 17%) and IC (by 44%) compared to healthy areas; however, we found that these differences do not depend on the location of occluded coronary artery (LAD vs LCX). This work validates the utility of DTI-MR imaging for fibrosis quantification, with histological validation.},
keywords = {},
pubstate = {published},
tppubtype = {incollection}
}
Klein, Geoff; Martel, Anne; Sahgal, Arjun; Whyne, Cari; Hardisty, Michael
Metastatic Vertebrae Segmentation for Use in a Clinical Pipeline Proceedings Article
In: Computational Methods and Clinical Applications for Spine Imaging. CSI 2019, pp. 15–28, Shenzhen, China, 2020.
Abstract | Links | BibTeX | Tags:
@inproceedings{Klein2019,
title = {Metastatic Vertebrae Segmentation for Use in a Clinical Pipeline},
author = {Geoff Klein and Anne Martel and Arjun Sahgal and Cari Whyne and Michael Hardisty},
url = {http://link.springer.com/10.1007/978-3-030-39752-4_2},
doi = {10.1007/978-3-030-39752-4_2},
year = {2020},
date = {2020-01-01},
booktitle = {Computational Methods and Clinical Applications for Spine Imaging. CSI 2019},
volume = {LNCS vol 1},
pages = {15–28},
address = {Shenzhen, China},
abstract = {Vertebral metastases are common complications of primary cancers that alter bone architecture potentially leading to vertebral fracture and neurological compromise. Quantitative measures from vertebral body segmentations from Computed Tomography (CT) scans have been useful for assessing fracture risk predictions and vertebrae stability. Previous segmentation methods used to generate these metrics were slow and required manual intervention, limiting their utility. More accurate, robust and fast methods are needed for clinical assessments. This investigation proposes a 3D U-Net Convolutional Neural Network (CNN) to accurately segment individual trabecular centrum from metastatically compromised vertebrae of interest in CT imaging. Using different augmentation techniques achieved good performance (DSC = 0.904 ± 0.056) with the segmentation model remaining accurate with simulated lower image quality, and translation of the vertebrae within the image, especially compared to when no augmentations were used (DSC = 0.774 ± 0.188). Integration of this method into a clinical tool will allow accurate and robust quantitative assessment of mechanical stability, aiding clinical decision making to improve patient care.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Ozan Ciga, Anne L Martel
Learning to segment images with classification labels Journal Article
In: arXiv preprint arXiv:1912.12533, 2019.
Abstract | BibTeX | Tags: digital pathology
@article{Ciga2019b,
title = {Learning to segment images with classification labels},
author = {Ozan Ciga, Anne L Martel},
year = {2019},
date = {2019-12-28},
journal = {arXiv preprint arXiv:1912.12533},
abstract = {Two of the most common tasks in medical imaging are classification and segmentation.
Either task requires labeled data annotated by experts, which is scarce and expensive to
collect. Annotating data for segmentation is generally considered to be more laborious
as the annotator has to draw around the boundaries of regions of interest, as opposed
to assigning image patches a class label. Furthermore, in tasks such as breast cancer
histopathology, any realistic clinical application often includes working with whole
slide images, whereas most publicly available training data are in the form of image
patches, which are given a class label. We propose an architecture that can alleviate the
requirements for segmentation-level ground truth by making use of image-level labels
to reduce the amount of time spent on data curation. In addition, this architecture can
help unlock the potential of previously acquired image-level datasets on segmentation
tasks by annotating a small number of regions of interest. In our experiments, we show
using only one segmentation-level annotation per class, we can achieve performance
comparable to a fully annotated dataset.},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {article}
}
Either task requires labeled data annotated by experts, which is scarce and expensive to
collect. Annotating data for segmentation is generally considered to be more laborious
as the annotator has to draw around the boundaries of regions of interest, as opposed
to assigning image patches a class label. Furthermore, in tasks such as breast cancer
histopathology, any realistic clinical application often includes working with whole
slide images, whereas most publicly available training data are in the form of image
patches, which are given a class label. We propose an architecture that can alleviate the
requirements for segmentation-level ground truth by making use of image-level labels
to reduce the amount of time spent on data curation. In addition, this architecture can
help unlock the potential of previously acquired image-level datasets on segmentation
tasks by annotating a small number of regions of interest. In our experiments, we show
using only one segmentation-level annotation per class, we can achieve performance
comparable to a fully annotated dataset.
Ozan Ciga Chetan L Srinidhi, Anne L Martel
Deep neural network models for computational histopathology: A survey Journal Article
In: arXiv preprint arXiv:1912.12378, 2019.
Abstract | BibTeX | Tags: digital pathology
@article{Srinidhi2019,
title = {Deep neural network models for computational histopathology: A survey},
author = {Chetan L Srinidhi, Ozan Ciga, Anne L Martel},
year = {2019},
date = {2019-12-28},
journal = {arXiv preprint arXiv:1912.12378},
abstract = {Histopathological images contain rich phenotypic information that can be used to monitor underlying mechanisms contributing to diseases progression and patient survival outcomes. Recently, deep learning has become the mainstream methodological choice for analyzing and interpreting cancer histology images. In this paper, we present a comprehensive review of state-of-the-art deep learning approaches that have been used in the context of histopathological image analysis. From the survey of over 130 papers, we review the fields progress based on the methodological aspect of different machine learning strategies such as supervised, weakly supervised, unsupervised, transfer learning and various other sub-variants of these methods. We also provide an overview of deep learning based survival models that are applicable for disease-specific prognosis tasks. Finally, we summarize several existing open datasets and highlight critical challenges and limitations with current deep learning approaches, along with possible avenues for future research.},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {article}
}
Balki, Indranil; Amirabadi, Afsaneh; Levman, Jacob; Martel, Anne L; Emersic, Ziga; Meden, Blaz; Garcia-Pedrero, Angel; Ramirez, Saul C; Kong, Dehan; Moody, Alan R; Tyrrell, Pascal N
Sample-Size Determination Methodologies for Machine Learning in Medical Imaging Research: A Systematic Review Journal Article
In: Canadian Association of Radiologists Journal, vol. 70, no. 4, pp. 344–353, 2019, ISSN: 0846-5371.
Abstract | Links | BibTeX | Tags: Machine learning, Medical Imaging, Radiology, Sample size
@article{Balki2019b,
title = {Sample-Size Determination Methodologies for Machine Learning in Medical Imaging Research: A Systematic Review},
author = {Indranil Balki and Afsaneh Amirabadi and Jacob Levman and Anne L Martel and Ziga Emersic and Blaz Meden and Angel Garcia-Pedrero and Saul C Ramirez and Dehan Kong and Alan R Moody and Pascal N Tyrrell},
url = {https://doi.org/10.1016/j.carj.2019.06.002 http://journals.sagepub.com/doi/10.1016/j.carj.2019.06.002},
doi = {10.1016/j.carj.2019.06.002},
issn = {0846-5371},
year = {2019},
date = {2019-11-01},
journal = {Canadian Association of Radiologists Journal},
volume = {70},
number = {4},
pages = {344–353},
publisher = {Elsevier Inc.},
abstract = {Purpose: The required training sample size for a particular machine learning (ML) model applied to medical imaging data is often unknown. The purpose of this study was to provide a descriptive review of current sample-size determination methodologies in ML applied to medical imaging and to propose recommendations for future work in the field. Methods: We conducted a systematic literature search of articles using Medline and Embase with keywords including “machine learning,” “image,” and “sample size.” The search included articles published between 1946 and 2018. Data regarding the ML task, sample size, and train-test pipeline were collected. Results: A total of 167 articles were identified, of which 22 were included for qualitative analysis. There were only 4 studies that discussed sample-size determination methodologies, and 18 that tested the effect of sample size on model performance as part of an exploratory analysis. The observed methods could be categorized as pre hoc model-based approaches, which relied on features of the algorithm, or post hoc curve-fitting approaches requiring empirical testing to model and extrapolate algorithm performance as a function of sample size. Between studies, we observed great variability in performance testing procedures used for curve-fitting, model assessment methods, and reporting of confidence in sample sizes. Conclusions: Our study highlights the scarcity of research in training set size determination methodologies applied to ML in medical imaging, emphasizes the need to standardize current reporting practices, and guides future work in development and streamlining of pre hoc and post hoc sample size approaches.},
keywords = {Machine learning, Medical Imaging, Radiology, Sample size},
pubstate = {published},
tppubtype = {article}
}
Emmanuel Edward Ntiri Maged Goubran, Hassan Akhavein; Martel, Anne L; Mario Masellis Walter Swardfager, Richard Swartz
Hippocampal segmentation for brains with extensive atrophy using three‐dimensional convolutional neural networks Journal Article
In: Human brain mapping, 2019.
Abstract | BibTeX | Tags: Brain
@article{Goubran2019,
title = {Hippocampal segmentation for brains with extensive atrophy using three‐dimensional convolutional neural networks},
author = {Maged Goubran, Emmanuel Edward Ntiri, Hassan Akhavein, Melissa Holmes, Sean Nestor, Joel Ramirez, Sabrina Adamo, Miracle Ozzoude, Christopher Scott, Fuqiang Gao and Anne L Martel and Walter Swardfager, Mario Masellis, Richard Swartz, Bradley MacIntosh, Sandra E Black},
year = {2019},
date = {2019-10-14},
urldate = {2019-10-14},
journal = {Human brain mapping},
abstract = {Hippocampal volumetry is a critical biomarker of aging and dementia, and it is widely used as a predictor of cognitive performance; however, automated hippocampal segmentation methods are limited because the algorithms are (a) not publicly available, (b) subject to error with significant brain atrophy, cerebrovascular disease and lesions, and/or (c) computationally expensive or require parameter tuning. In this study, we trained a 3D convolutional neural network using 259 bilateral manually delineated segmentations collected from three studies, acquired at multiple sites on different scanners with variable protocols. Our training dataset consisted of elderly cases difficult to segment due to extensive atrophy, vascular disease, and lesions. Our algorithm, (HippMapp3r), was validated against four other publicly available state‐of‐the‐art techniques (HippoDeep, FreeSurfer, SBHV, volBrain, and FIRST). HippMapp3r outperformed the other techniques on all three metrics, generating an average Dice of 0.89 and a correlation coefficient of 0.95. It was two orders of magnitude faster than some of the tested techniques. Further validation was performed on 200 subjects from two other disease populations (frontotemporal dementia and vascular cognitive impairment), highlighting our method's low outlier rate. We finally tested the methods on real and simulated “clinical adversarial” cases to study their robustness to corrupt, low‐quality scans. The pipeline and models are available at: https://hippmapp3r.readthedocs.ioto facilitate the study of the hippocampus in large multisite studies.},
keywords = {Brain},
pubstate = {published},
tppubtype = {article}
}
Jianan Chen Ozan Ciga, Anne Martel
Multi-layer Domain Adaptation for Deep Convolutional Networks Conference
MICCAI Workshop on Domain Adaptation and Representation Transfer International Workshop on Medical Image Learning with Less Labels and Imperfect Data, vol. 11795, 2019.
Abstract | BibTeX | Tags: digital pathology
@conference{Ciga2019,
title = {Multi-layer Domain Adaptation for Deep Convolutional Networks},
author = {Ozan Ciga, Jianan Chen, Anne Martel},
year = {2019},
date = {2019-10-13},
booktitle = {MICCAI Workshop on Domain Adaptation and Representation Transfer
International Workshop on Medical Image Learning with Less Labels and Imperfect Data},
volume = {11795},
pages = {20-27},
abstract = {Despite their success in many computer vision tasks, convolutional networks tend to require large amounts of labeled data to achieve generalization. Furthermore, the performance is not guaranteed on a sample from an unseen domain at test time, if the network was not exposed to similar samples from that domain at training time. This hinders the adoption of these techniques in clinical setting where the imaging data is scarce, and where the intra- and inter-domain variance of the data can be substantial. We propose a domain adaptation technique that is especially suitable for deep networks to alleviate this requirement of labeled data. Our method utilizes gradient reversal layers [4] and Squeeze-and-Excite modules [6] to stabilize the training in deep networks. The proposed method was applied to publicly available histopathology and chest X-ray databases and achieved superior performance to existing state-of-the-art networks with and without domain adaptation. Depending on the application, our method can improve multi-class classification accuracy by 5–20% compared to DANN introduced in [4].},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {conference}
}
Chen, Jianan; Milot, Laurent; Cheung, Helen MC; Martel, Anne L
Unsupervised Clustering of Quantitative Imaging Phenotypes Using Autoencoder and Gaussian Mixture Model Conference
International Conference on Medical Image Computing and Computer-Assisted Intervention, vol. 11767, 2019.
Abstract | BibTeX | Tags: Medical Imaging
@conference{Chen2019,
title = {Unsupervised Clustering of Quantitative Imaging Phenotypes Using Autoencoder and Gaussian Mixture Model},
author = {Jianan Chen and Laurent Milot and Helen MC Cheung and Anne L Martel},
year = {2019},
date = {2019-10-13},
urldate = {2019-10-13},
booktitle = {International Conference on Medical Image Computing and Computer-Assisted Intervention},
volume = {11767},
pages = {575-582},
abstract = {Quantitative medical image computing (radiomics) has been widely applied to build prediction models from medical images. However, overfitting is a significant issue in conventional radiomics, where a large number of radiomic features are directly used to train and test models that predict genotypes or clinical outcomes. In order to tackle this problem, we propose an unsupervised learning pipeline composed of an autoencoder for representation learning of radiomic features and a Gaussian mixture model based on minimum message length criterion for clustering. By incorporating probabilistic modeling, disease heterogeneity has been taken into account. The performance of the proposed pipeline was evaluated on an institutional MRI cohort of 108 patients with colorectal cancer liver metastases. Our approach is capable of automatically selecting the optimal number of clusters and assigns patients into clusters (imaging subtypes) with significantly different survival rates. Our method outperforms other unsupervised clustering methods that have been used for radiomics analysis and has comparable performance to a state-of-the-art imaging biomarker.},
keywords = {Medical Imaging},
pubstate = {published},
tppubtype = {conference}
}
Annika Reinke Lena Maier-Hein, Michal Kozubek; Martel, Anne L.; Matthias Eisenmann Tal Arbel, Allan Hanbuary
BIAS: Transparent reporting of biomedical image analysis challenges Journal Article
In: arXiv preprint arXiv:1910.04071, 2019.
Abstract | BibTeX | Tags: Medical Imaging
@article{Maier-Hein2019,
title = {BIAS: Transparent reporting of biomedical image analysis challenges},
author = {Lena Maier-Hein, Annika Reinke, Michal Kozubek and Anne L. Martel and Tal Arbel, Matthias Eisenmann, Allan Hanbuary, Pierre Jannin, Henning Müller, Sinan Onogur, Julio Saez-Rodriguez, Bram van Ginneken, Annette Kopp-Schneider, Bennett Landman},
year = {2019},
date = {2019-10-09},
urldate = {2019-10-09},
journal = {arXiv preprint arXiv:1910.04071},
abstract = {The number of biomedical image analysis challenges organized per year is steadily increasing. These international competitions have the purpose of benchmarking algorithms on common data sets, typically to identify the best method for a given problem. Recent research, however, revealed that common practice related to challenge reporting does not allow for adequate interpretation and reproducibility of results. To address the discrepancy between the impact of challenges and the quality (control), the Biomedical I mage Analysis ChallengeS (BIAS) initiative developed a set of recommendations for the reporting of challenges. The BIAS statement aims to improve the transparency of the reporting of a biomedical image analysis challenge regardless of field of application, image modality or task category assessed. This article describes how the BIAS statement was developed and presents a checklist which authors of biomedical image analysis challenges are encouraged to include in their submission when giving a paper on a challenge into review. The purpose of the checklist is to standardize and facilitate the review process and raise interpretability and reproducibility of challenge results by making relevant information explicit.},
keywords = {Medical Imaging},
pubstate = {published},
tppubtype = {article}
}
Akbar, Shazia; Peikari, Mohammad; Salama, Sherine; Panah, Azadeh Yazdan; Nofech-Mozes, Sharon; Martel, Anne L
Automated and Manual Quantification of Tumour Cellularity in Digital Slides for Tumour Burden Assessment Journal Article
In: Scientific Reports, vol. 9, pp. 14099, 2019.
Abstract | BibTeX | Tags: digital pathology
@article{Akbar2019b,
title = {Automated and Manual Quantification of Tumour Cellularity in Digital Slides for Tumour Burden Assessment},
author = {Shazia Akbar and Mohammad Peikari and Sherine Salama and Azadeh Yazdan Panah and Sharon Nofech-Mozes and Anne L Martel},
year = {2019},
date = {2019-10-01},
urldate = {2019-10-01},
journal = {Scientific Reports},
volume = {9},
pages = {14099},
abstract = {Aims:
The residual cancer burden index is an important quantitative measure used for assessing treatment response following neoadjuvant therapy for breast cancer. It has shown to be predictive of overall survival and is composed of two key metrics: qualitative assessment of lymph nodes and the percentage of invasive or in-situ tumour cellularity (TC) in the tumour bed (TB). Currently, TC is assessed through eye-balling of routine histopathology slides estimating the proportion of tumour cells within the TB. With the advances in production of digitized slides and increasing availability of slide scanners in pathology laboratories, there is potential to measure TC using automated algorithms with greater precision and accuracy.
Methods:
We describe two methods for automated TC scoring: 1) a traditional approach to image analysis development whereby we mimic the pathologists9 workflow, and 2) a recent development in artificial intelligence in which features are learned automatically in deep neural networks using image data alone.
Results:
We show strong agreements between automated and manual analysis of digital slides. Agreements between our trained deep neural networks and experts in this study (0.82) approach the inter-rater agreements between pathologists (0.89). We also reveal properties that are captured when we apply deep neural network to whole slide images, and discuss the potential of using such visualisations to improve upon TC assessment in the future.
Conclusions:
TC scoring can be successfully automated by leveraging recent advancements in artificial intelligence, thereby alleviating the burden of manual analysis.},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {article}
}
The residual cancer burden index is an important quantitative measure used for assessing treatment response following neoadjuvant therapy for breast cancer. It has shown to be predictive of overall survival and is composed of two key metrics: qualitative assessment of lymph nodes and the percentage of invasive or in-situ tumour cellularity (TC) in the tumour bed (TB). Currently, TC is assessed through eye-balling of routine histopathology slides estimating the proportion of tumour cells within the TB. With the advances in production of digitized slides and increasing availability of slide scanners in pathology laboratories, there is potential to measure TC using automated algorithms with greater precision and accuracy.
Methods:
We describe two methods for automated TC scoring: 1) a traditional approach to image analysis development whereby we mimic the pathologists9 workflow, and 2) a recent development in artificial intelligence in which features are learned automatically in deep neural networks using image data alone.
Results:
We show strong agreements between automated and manual analysis of digital slides. Agreements between our trained deep neural networks and experts in this study (0.82) approach the inter-rater agreements between pathologists (0.89). We also reveal properties that are captured when we apply deep neural network to whole slide images, and discuss the potential of using such visualisations to improve upon TC assessment in the future.
Conclusions:
TC scoring can be successfully automated by leveraging recent advancements in artificial intelligence, thereby alleviating the burden of manual analysis.