2023
Ramanathan, Vishwesh; Han, Wenchao; Bassiouny, Dina; Rakovitch, Eileen; Martel, Anne L.
Ink removal in whole slide images using hallucinated data Proceedings Article
In: Tomaszewski, John E.; Ward, Aaron D. (Ed.): Medical Imaging 2023: Digital and Computational Pathology, pp. 36, SPIE, 2023, ISBN: 9781510660472.
Abstract | Links | BibTeX | Tags: _DCIS, _Histology_QA
@inproceedings{Ramanathan2023,
title = {Ink removal in whole slide images using hallucinated data},
author = {Vishwesh Ramanathan and Wenchao Han and Dina Bassiouny and Eileen Rakovitch and Anne L. Martel},
editor = {John E. Tomaszewski and Aaron D. Ward},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/12471/2653281/Ink-removal-in-whole-slide-images-using-hallucinated-data/10.1117/12.2653281.full},
doi = {10.1117/12.2653281},
isbn = {9781510660472},
year = {2023},
date = {2023-04-01},
urldate = {2023-04-01},
booktitle = {Medical Imaging 2023: Digital and Computational Pathology},
pages = {36},
publisher = {SPIE},
abstract = {Pathologists regularly use ink markings on histopathology slides to highlight specific areas of interest or orientation, making it an integral part of the workflow. Unfortunately, digitization of these ink-annotated slides hinders any computer-aided analyses, particularly deep learning algorithms, which require clean data free from artifacts. We propose a methodology that can identify and remove the ink markings for the purpose of computational analyses. We propose a two-stage network with a binary classifier for ink filtering and Pix2Pix for ink removal. We trained our network by artificially generating pseudo ink markings using only clean slides, requiring no manual annotation or curation of data. Furthermore, we demonstrate our algorithm's efficacy over an independent dataset of H&E stained breast carcinoma slides scanned before and after the removal of pen markings. Our quantitative analysis shows promising results, achieving 98.7% accuracy for the binary classifier. For Pix2Pix, we observed a 65.6% increase in structure similarity index, a 21.3% increase in peak signal-to-noise ratio, and a 30% increase in visual information fidelity. As only clean slides are required for training, the pipeline can be adapted to multiple colors of ink markings or new domains, making it easy to deploy over different sets of histopathology slides. Code and trained models are available at: https://github.com/Vishwesh4/Ink-WSI.},
keywords = {_DCIS, _Histology_QA},
pubstate = {published},
tppubtype = {inproceedings}
}
2019
Nikhil Seth; Shazia Akbar; Sharon Nofech-Mozes; Sherine, Salama; Anne L. Martel
Automated Segmentation of DCIS in Whole Slide Images Conference
European Congress on Digital Pathology ECDP 2019, vol. 11435, Springer Lecture Notes in Computer Science , 2019.
Abstract | BibTeX | Tags: _DCIS, deep learning, digital pathology
@conference{Seth2019,
title = {Automated Segmentation of DCIS in Whole Slide Images},
author = {Nikhil Seth; Shazia Akbar; Sharon Nofech-Mozes; Sherine, Salama; Anne L. Martel},
year = {2019},
date = {2019-07-03},
urldate = {2019-07-03},
booktitle = {European Congress on Digital Pathology ECDP 2019},
volume = {11435},
pages = {pp 67-74},
publisher = {Springer Lecture Notes in Computer Science },
abstract = {Segmentation of ducts in whole slide images is an important step needed to analyze ductal carcinoma in-situ (DCIS), an early form of breast cancer. Here, we train several U-Net architectures – deep convolutional neural networks designed to output probability maps – to segment DCIS in whole slide images and validate the optimal patch field of view necessary to achieve superior accuracy at the slide-level. We showed a U-Net trained at 5x achieved the best test results (DSC = 0.771, F1 = 0.601), implying the U-Net benefits from having wider contextual information. Our custom U-Net based architecture, trained to incorporate patches from all available resolutions, achieved test results of DSC = 0.759 (F1 = 0.682) showing improvement in the duct detecting capabilities of the model. Both architectures show comparable performance to a second expert annotator on an independent test set. This is preliminary work for a pipeline targeted at predicting recurrence risk in DCIS patients.},
keywords = {_DCIS, deep learning, digital pathology},
pubstate = {published},
tppubtype = {conference}
}
2017
Akbar, Shazia; Peikari, Mohammad; Salama, Sherine; Nofech-Mozes, Sharon; Martel, Anne L.
Transitioning between Convolutional and Fully Connected Layers in Neural Networks Proceedings Article
In: 3rd workshop on Deep Learning in Medical Image Analysis (DLMIA), MICCAI 2017, 2017.
Abstract | Links | BibTeX | Tags: _DCIS
@inproceedings{Akbar2017a,
title = {Transitioning between Convolutional and Fully Connected Layers in Neural Networks},
author = {Shazia Akbar and Mohammad Peikari and Sherine Salama and Sharon Nofech-Mozes and Anne L. Martel},
url = {https://arxiv.org/abs/1707.05743},
year = {2017},
date = {2017-00-00},
booktitle = {3rd workshop on Deep Learning in Medical Image Analysis (DLMIA), MICCAI 2017},
abstract = {Digital pathology has advanced substantially over the last decade however tumor localization continues to be a challenging problem due to highly complex patterns and textures in the underlying tissue bed. The use of convolutional neural networks (CNNs) to analyze such complex images has been well adopted in digital pathology. However in recent years, the architecture of CNNs have altered with the introduction of inception modules which have shown great promise for classification tasks. In this paper, we propose a modified "transition" module which learns global average pooling layers from filters of varying sizes to encourage class-specific filters at multiple spatial resolutions. We demonstrate the performance of the transition module in AlexNet and ZFNet, for classifying breast tumors in two independent datasets of scanned histology sections, of which the transition module was superior.},
keywords = {_DCIS},
pubstate = {published},
tppubtype = {inproceedings}
}