2023
Ramanathan, Vishwesh; Han, Wenchao; Bassiouny, Dina; Rakovitch, Eileen; Martel, Anne L.
Ink removal in whole slide images using hallucinated data Proceedings Article
In: Tomaszewski, John E.; Ward, Aaron D. (Ed.): Medical Imaging 2023: Digital and Computational Pathology, pp. 36, SPIE, 2023, ISBN: 9781510660472.
Abstract | Links | BibTeX | Tags: _DCIS, _Histology_QA
@inproceedings{Ramanathan2023,
title = {Ink removal in whole slide images using hallucinated data},
author = {Vishwesh Ramanathan and Wenchao Han and Dina Bassiouny and Eileen Rakovitch and Anne L. Martel},
editor = {John E. Tomaszewski and Aaron D. Ward},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/12471/2653281/Ink-removal-in-whole-slide-images-using-hallucinated-data/10.1117/12.2653281.full},
doi = {10.1117/12.2653281},
isbn = {9781510660472},
year = {2023},
date = {2023-04-01},
urldate = {2023-04-01},
booktitle = {Medical Imaging 2023: Digital and Computational Pathology},
pages = {36},
publisher = {SPIE},
abstract = {Pathologists regularly use ink markings on histopathology slides to highlight specific areas of interest or orientation, making it an integral part of the workflow. Unfortunately, digitization of these ink-annotated slides hinders any computer-aided analyses, particularly deep learning algorithms, which require clean data free from artifacts. We propose a methodology that can identify and remove the ink markings for the purpose of computational analyses. We propose a two-stage network with a binary classifier for ink filtering and Pix2Pix for ink removal. We trained our network by artificially generating pseudo ink markings using only clean slides, requiring no manual annotation or curation of data. Furthermore, we demonstrate our algorithm's efficacy over an independent dataset of H&E stained breast carcinoma slides scanned before and after the removal of pen markings. Our quantitative analysis shows promising results, achieving 98.7% accuracy for the binary classifier. For Pix2Pix, we observed a 65.6% increase in structure similarity index, a 21.3% increase in peak signal-to-noise ratio, and a 30% increase in visual information fidelity. As only clean slides are required for training, the pipeline can be adapted to multiple colors of ink markings or new domains, making it easy to deploy over different sets of histopathology slides. Code and trained models are available at: https://github.com/Vishwesh4/Ink-WSI.},
keywords = {_DCIS, _Histology_QA},
pubstate = {published},
tppubtype = {inproceedings}
}
Pathologists regularly use ink markings on histopathology slides to highlight specific areas of interest or orientation, making it an integral part of the workflow. Unfortunately, digitization of these ink-annotated slides hinders any computer-aided analyses, particularly deep learning algorithms, which require clean data free from artifacts. We propose a methodology that can identify and remove the ink markings for the purpose of computational analyses. We propose a two-stage network with a binary classifier for ink filtering and Pix2Pix for ink removal. We trained our network by artificially generating pseudo ink markings using only clean slides, requiring no manual annotation or curation of data. Furthermore, we demonstrate our algorithm's efficacy over an independent dataset of H&E stained breast carcinoma slides scanned before and after the removal of pen markings. Our quantitative analysis shows promising results, achieving 98.7% accuracy for the binary classifier. For Pix2Pix, we observed a 65.6% increase in structure similarity index, a 21.3% increase in peak signal-to-noise ratio, and a 30% increase in visual information fidelity. As only clean slides are required for training, the pipeline can be adapted to multiple colors of ink markings or new domains, making it easy to deploy over different sets of histopathology slides. Code and trained models are available at: https://github.com/Vishwesh4/Ink-WSI.