2019
Ozan Ciga, Anne L Martel
Learning to segment images with classification labels Journal Article
In: arXiv preprint arXiv:1912.12533, 2019.
Abstract | BibTeX | Tags: digital pathology
@article{Ciga2019b,
title = {Learning to segment images with classification labels},
author = {Ozan Ciga, Anne L Martel},
year = {2019},
date = {2019-12-28},
journal = {arXiv preprint arXiv:1912.12533},
abstract = {Two of the most common tasks in medical imaging are classification and segmentation.
Either task requires labeled data annotated by experts, which is scarce and expensive to
collect. Annotating data for segmentation is generally considered to be more laborious
as the annotator has to draw around the boundaries of regions of interest, as opposed
to assigning image patches a class label. Furthermore, in tasks such as breast cancer
histopathology, any realistic clinical application often includes working with whole
slide images, whereas most publicly available training data are in the form of image
patches, which are given a class label. We propose an architecture that can alleviate the
requirements for segmentation-level ground truth by making use of image-level labels
to reduce the amount of time spent on data curation. In addition, this architecture can
help unlock the potential of previously acquired image-level datasets on segmentation
tasks by annotating a small number of regions of interest. In our experiments, we show
using only one segmentation-level annotation per class, we can achieve performance
comparable to a fully annotated dataset.},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {article}
}
Either task requires labeled data annotated by experts, which is scarce and expensive to
collect. Annotating data for segmentation is generally considered to be more laborious
as the annotator has to draw around the boundaries of regions of interest, as opposed
to assigning image patches a class label. Furthermore, in tasks such as breast cancer
histopathology, any realistic clinical application often includes working with whole
slide images, whereas most publicly available training data are in the form of image
patches, which are given a class label. We propose an architecture that can alleviate the
requirements for segmentation-level ground truth by making use of image-level labels
to reduce the amount of time spent on data curation. In addition, this architecture can
help unlock the potential of previously acquired image-level datasets on segmentation
tasks by annotating a small number of regions of interest. In our experiments, we show
using only one segmentation-level annotation per class, we can achieve performance
comparable to a fully annotated dataset.
Ozan Ciga Chetan L Srinidhi, Anne L Martel
Deep neural network models for computational histopathology: A survey Journal Article
In: arXiv preprint arXiv:1912.12378, 2019.
Abstract | BibTeX | Tags: digital pathology
@article{Srinidhi2019,
title = {Deep neural network models for computational histopathology: A survey},
author = {Chetan L Srinidhi, Ozan Ciga, Anne L Martel},
year = {2019},
date = {2019-12-28},
journal = {arXiv preprint arXiv:1912.12378},
abstract = {Histopathological images contain rich phenotypic information that can be used to monitor underlying mechanisms contributing to diseases progression and patient survival outcomes. Recently, deep learning has become the mainstream methodological choice for analyzing and interpreting cancer histology images. In this paper, we present a comprehensive review of state-of-the-art deep learning approaches that have been used in the context of histopathological image analysis. From the survey of over 130 papers, we review the fields progress based on the methodological aspect of different machine learning strategies such as supervised, weakly supervised, unsupervised, transfer learning and various other sub-variants of these methods. We also provide an overview of deep learning based survival models that are applicable for disease-specific prognosis tasks. Finally, we summarize several existing open datasets and highlight critical challenges and limitations with current deep learning approaches, along with possible avenues for future research.},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {article}
}
Jianan Chen Ozan Ciga, Anne Martel
Multi-layer Domain Adaptation for Deep Convolutional Networks Conference
MICCAI Workshop on Domain Adaptation and Representation Transfer International Workshop on Medical Image Learning with Less Labels and Imperfect Data, vol. 11795, 2019.
Abstract | BibTeX | Tags: digital pathology
@conference{Ciga2019,
title = {Multi-layer Domain Adaptation for Deep Convolutional Networks},
author = {Ozan Ciga, Jianan Chen, Anne Martel},
year = {2019},
date = {2019-10-13},
booktitle = {MICCAI Workshop on Domain Adaptation and Representation Transfer
International Workshop on Medical Image Learning with Less Labels and Imperfect Data},
volume = {11795},
pages = {20-27},
abstract = {Despite their success in many computer vision tasks, convolutional networks tend to require large amounts of labeled data to achieve generalization. Furthermore, the performance is not guaranteed on a sample from an unseen domain at test time, if the network was not exposed to similar samples from that domain at training time. This hinders the adoption of these techniques in clinical setting where the imaging data is scarce, and where the intra- and inter-domain variance of the data can be substantial. We propose a domain adaptation technique that is especially suitable for deep networks to alleviate this requirement of labeled data. Our method utilizes gradient reversal layers [4] and Squeeze-and-Excite modules [6] to stabilize the training in deep networks. The proposed method was applied to publicly available histopathology and chest X-ray databases and achieved superior performance to existing state-of-the-art networks with and without domain adaptation. Depending on the application, our method can improve multi-class classification accuracy by 5–20% compared to DANN introduced in [4].},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {conference}
}
Akbar, Shazia; Peikari, Mohammad; Salama, Sherine; Panah, Azadeh Yazdan; Nofech-Mozes, Sharon; Martel, Anne L
Automated and Manual Quantification of Tumour Cellularity in Digital Slides for Tumour Burden Assessment Journal Article
In: Scientific Reports, vol. 9, pp. 14099, 2019.
Abstract | BibTeX | Tags: digital pathology
@article{Akbar2019b,
title = {Automated and Manual Quantification of Tumour Cellularity in Digital Slides for Tumour Burden Assessment},
author = {Shazia Akbar and Mohammad Peikari and Sherine Salama and Azadeh Yazdan Panah and Sharon Nofech-Mozes and Anne L Martel},
year = {2019},
date = {2019-10-01},
urldate = {2019-10-01},
journal = {Scientific Reports},
volume = {9},
pages = {14099},
abstract = {Aims:
The residual cancer burden index is an important quantitative measure used for assessing treatment response following neoadjuvant therapy for breast cancer. It has shown to be predictive of overall survival and is composed of two key metrics: qualitative assessment of lymph nodes and the percentage of invasive or in-situ tumour cellularity (TC) in the tumour bed (TB). Currently, TC is assessed through eye-balling of routine histopathology slides estimating the proportion of tumour cells within the TB. With the advances in production of digitized slides and increasing availability of slide scanners in pathology laboratories, there is potential to measure TC using automated algorithms with greater precision and accuracy.
Methods:
We describe two methods for automated TC scoring: 1) a traditional approach to image analysis development whereby we mimic the pathologists9 workflow, and 2) a recent development in artificial intelligence in which features are learned automatically in deep neural networks using image data alone.
Results:
We show strong agreements between automated and manual analysis of digital slides. Agreements between our trained deep neural networks and experts in this study (0.82) approach the inter-rater agreements between pathologists (0.89). We also reveal properties that are captured when we apply deep neural network to whole slide images, and discuss the potential of using such visualisations to improve upon TC assessment in the future.
Conclusions:
TC scoring can be successfully automated by leveraging recent advancements in artificial intelligence, thereby alleviating the burden of manual analysis.},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {article}
}
The residual cancer burden index is an important quantitative measure used for assessing treatment response following neoadjuvant therapy for breast cancer. It has shown to be predictive of overall survival and is composed of two key metrics: qualitative assessment of lymph nodes and the percentage of invasive or in-situ tumour cellularity (TC) in the tumour bed (TB). Currently, TC is assessed through eye-balling of routine histopathology slides estimating the proportion of tumour cells within the TB. With the advances in production of digitized slides and increasing availability of slide scanners in pathology laboratories, there is potential to measure TC using automated algorithms with greater precision and accuracy.
Methods:
We describe two methods for automated TC scoring: 1) a traditional approach to image analysis development whereby we mimic the pathologists9 workflow, and 2) a recent development in artificial intelligence in which features are learned automatically in deep neural networks using image data alone.
Results:
We show strong agreements between automated and manual analysis of digital slides. Agreements between our trained deep neural networks and experts in this study (0.82) approach the inter-rater agreements between pathologists (0.89). We also reveal properties that are captured when we apply deep neural network to whole slide images, and discuss the potential of using such visualisations to improve upon TC assessment in the future.
Conclusions:
TC scoring can be successfully automated by leveraging recent advancements in artificial intelligence, thereby alleviating the burden of manual analysis.
Nikhil Seth; Shazia Akbar; Sharon Nofech-Mozes; Sherine, Salama; Anne L. Martel
Automated Segmentation of DCIS in Whole Slide Images Conference
European Congress on Digital Pathology ECDP 2019, vol. 11435, Springer Lecture Notes in Computer Science , 2019.
Abstract | BibTeX | Tags: _DCIS, deep learning, digital pathology
@conference{Seth2019,
title = {Automated Segmentation of DCIS in Whole Slide Images},
author = {Nikhil Seth; Shazia Akbar; Sharon Nofech-Mozes; Sherine, Salama; Anne L. Martel},
year = {2019},
date = {2019-07-03},
urldate = {2019-07-03},
booktitle = {European Congress on Digital Pathology ECDP 2019},
volume = {11435},
pages = {pp 67-74},
publisher = {Springer Lecture Notes in Computer Science },
abstract = {Segmentation of ducts in whole slide images is an important step needed to analyze ductal carcinoma in-situ (DCIS), an early form of breast cancer. Here, we train several U-Net architectures – deep convolutional neural networks designed to output probability maps – to segment DCIS in whole slide images and validate the optimal patch field of view necessary to achieve superior accuracy at the slide-level. We showed a U-Net trained at 5x achieved the best test results (DSC = 0.771, F1 = 0.601), implying the U-Net benefits from having wider contextual information. Our custom U-Net based architecture, trained to incorporate patches from all available resolutions, achieved test results of DSC = 0.759 (F1 = 0.682) showing improvement in the duct detecting capabilities of the model. Both architectures show comparable performance to a second expert annotator on an independent test set. This is preliminary work for a pipeline targeted at predicting recurrence risk in DCIS patients.},
keywords = {_DCIS, deep learning, digital pathology},
pubstate = {published},
tppubtype = {conference}
}
M Peikari S Akbar, S Salama
The transition module: a method for preventing overfitting in convolutional neural networks Journal Article
In: Computer Methods in Biomechanics and Biomedical Engineering: Imaging & Visualization, vol. 7, no. 3, pp. 260-265, 2019.
Abstract | BibTeX | Tags: digital pathology
@article{Akbar2019,
title = {The transition module: a method for preventing overfitting in convolutional neural networks},
author = {S Akbar, M Peikari, S Salama, S Nofech-Mozes, AL Martel},
year = {2019},
date = {2019-05-04},
journal = {Computer Methods in Biomechanics and Biomedical Engineering: Imaging & Visualization},
volume = {7},
number = {3},
pages = {260-265},
abstract = {Abstract
Digital pathology has advanced substantially over the last decade with the adoption of slide scanners in pathology labs. The use of digital slides to analyse diseases at the microscopic level is both cost-effective and efficient. Identifying complex tumour patterns in digital slides is a challenging problem but holds significant importance for tumour burden assessment, grading and many other pathological assessments in cancer research. The use of convolutional neural networks (CNNs) to analyse such complex images has been well adopted in digital pathology. However, in recent years, the architecture of CNNs has altered with the introduction of inception modules which have shown great promise for classification tasks. In this paper, we propose a modified ‘transition’ module which encourages generalisation in a deep learning framework with few training samples. In the transition module, filters of varying sizes are used to encourage class-specific filters at multiple spatial resolutions followed by global average pooling. We demonstrate the performance of the transition module in AlexNet and ZFNet, for classifying breast tumours in two independent data-sets of scanned histology sections; the inclusion of the transition module in these CNNs improved performance.},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {article}
}
Digital pathology has advanced substantially over the last decade with the adoption of slide scanners in pathology labs. The use of digital slides to analyse diseases at the microscopic level is both cost-effective and efficient. Identifying complex tumour patterns in digital slides is a challenging problem but holds significant importance for tumour burden assessment, grading and many other pathological assessments in cancer research. The use of convolutional neural networks (CNNs) to analyse such complex images has been well adopted in digital pathology. However, in recent years, the architecture of CNNs has altered with the introduction of inception modules which have shown great promise for classification tasks. In this paper, we propose a modified ‘transition’ module which encourages generalisation in a deep learning framework with few training samples. In the transition module, filters of varying sizes are used to encourage class-specific filters at multiple spatial resolutions followed by global average pooling. We demonstrate the performance of the transition module in AlexNet and ZFNet, for classifying breast tumours in two independent data-sets of scanned histology sections; the inclusion of the transition module in these CNNs improved performance.
2018
Akbar, Shazia; Martel, Anne L.
Cluster-Based Learning from Weakly Labeled Bags in Digital Pathology Workshop
Machine Learning for Health (ML4H), NeurIPS 2018, Montreal, Canada, 2018.
Abstract | Links | BibTeX | Tags: digital pathology
@workshop{Akbar2018c,
title = {Cluster-Based Learning from Weakly Labeled Bags in Digital Pathology},
author = {Shazia Akbar and Anne L. Martel },
url = {https://arxiv.org/abs/1812.00884},
year = {2018},
date = {2018-12-08},
booktitle = {Machine Learning for Health (ML4H), NeurIPS 2018},
address = {Montreal, Canada},
abstract = {To alleviate the burden of gathering detailed expert annotations when training deep neural networks, we propose a weakly supervised learning approach to recognize metastases in microscopic images of breast lymph nodes. We describe an alternative training loss which clusters weakly labeled bags in latent space to inform relevance of patch-instances during training of a convolutional neural network. We evaluate our method on the Camelyon dataset which contains high-resolution digital slides of breast lymph nodes, where labels are provided at the image-level and only subsets of patches are made available during training.},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {workshop}
}
Akbar, Shazia; Peikari, Mohammad; Salama, Sherine; Nofech-Mozes, Sharon; Martel, Anne L.
Determining tumor cellularity in digital slides using ResNet Proceedings Article
In: SPIE Medical Imaging, Houston, Texas, 2018.
Abstract | Links | BibTeX | Tags: digital pathology
@inproceedings{Akbar2018a,
title = {Determining tumor cellularity in digital slides using ResNet},
author = {Shazia Akbar and Mohammad Peikari and Sherine Salama and Sharon Nofech-Mozes and Anne L. Martel},
url = {https://www.spiedigitallibrary.org/conference-proceedings-of-spie/10581/105810U/Determining-tumor-cellularity-in-digital-slides-using-ResNet/10.1117/12.2292813.full},
year = {2018},
date = {2018-01-01},
booktitle = {SPIE Medical Imaging},
volume = {10581},
address = {Houston, Texas},
abstract = {The residual cancer burden index is a powerful prognostic factor which is used to measure neoadjuvant therapy response in invasive breast cancers. Tumor cellularity is one component of the residual cancer burden index and is currently measured manually through eyeballing. As such it is subject to inter- and intra-variability and is currently restricted to discrete values. We propose a method for automatically determining tumor cellularity in digital slides using deep learning techniques. We train a series of ResNet architectures to output both discrete and continuous values and compare our outcomes with scores acquired manually by an expert pathologist. Our configurations were validated on a dataset of image patches extracted from digital slides, each containing various degrees of tumor cellularity. Results showed that, in the case of discrete values, our models were able to distinguish between regions-of-interest containing tumor and healthy cells with over 97% test accuracy rates. Overall, we achieved 76% accuracy over four predefined tumor cellularity classes (no tumor/tumor; low, medium and high tumor cellularity). When computing tumor cellularity scores on a continuous scale, ResNet showed good correlations with manually-identified scores, showing potential for computing reproducible scores consistent with expert opinion using deep learning techniques.},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {inproceedings}
}
2016
Rushin Shojaii, Anne L Martel
Optimized SIFTFlow for registration of whole-mount histology to reference optical images Journal Article
In: Journal of Medical Imaging, vol. 3, no. 4, pp. 047501-047501, 2016.
Abstract | Links | BibTeX | Tags: digital pathology, registration
@article{Shojaii2016,
title = {Optimized SIFTFlow for registration of whole-mount histology to reference optical images},
author = {Rushin Shojaii, Anne L Martel},
url = {http://medicalimaging.spiedigitallibrary.org/article.aspx?articleid=2571703},
doi = {10.1117/1.JMI.3.4.047501},
year = {2016},
date = {2016-10-19},
journal = {Journal of Medical Imaging},
volume = {3},
number = {4},
pages = {047501-047501},
abstract = {The registration of two-dimensional histology images to reference images from other modalities is an important preprocessing step in the reconstruction of three-dimensional histology volumes. This is a challenging problem because of the differences in the appearances of histology images and other modalities, and the presence of large nonrigid deformations which occur during slide preparation. This paper shows the feasibility of using densely sampled scale-invariant feature transform (SIFT) features and a SIFTFlow deformable registration algorithm for coregistering whole-mount histology images with blockface optical images. We present a method for jointly optimizing the regularization parameters used by the SIFTFlow objective function and use it to determine the most appropriate values for the registration of breast lumpectomy specimens. We demonstrate that tuning the regularization parameters results in significant improvements in accuracy and we also show that SIFTFlow outperforms a previously described edge-based registration method. The accuracy of the histology images to blockface images registration using the optimized SIFTFlow method was assessed using an independent test set of images from five different lumpectomy specimens and the mean registration error was 0.32±0.22 mm0.32±0.22 mm.},
keywords = {digital pathology, registration},
pubstate = {published},
tppubtype = {article}
}
Peikari, Mohammad; Martel, Anne L.
Automatic cell detection and segmentation from H and E stained pathology slides using colorspace decorrelation stretching Proceedings Article
In: Gurcan, Metin N.; Madabhushi, Anant (Ed.): Medical Imaging 2016: Digital Pathology, pp. 979114-1: 979114-6 , SPIE SPIE, 2016.
Abstract | Links | BibTeX | Tags: digital pathology
@inproceedings{Peikari2016,
title = {Automatic cell detection and segmentation from H and E stained pathology slides using colorspace decorrelation stretching},
author = {Mohammad Peikari and Anne L. Martel},
editor = {Metin N. Gurcan and Anant Madabhushi},
url = {http://proceedings.spiedigitallibrary.org/proceeding.aspx?articleid=2506827},
doi = {10.1117/12.2216507},
year = {2016},
date = {2016-03-23},
booktitle = {Medical Imaging 2016: Digital Pathology},
volume = {9791},
number = {2},
pages = {979114-1: 979114-6 },
publisher = {SPIE},
organization = {SPIE},
abstract = {Purpose: Automatic cell segmentation plays an important role in reliable diagnosis and prognosis of patients. Most of the state-of-the-art cell detection and segmentation techniques focus on complicated methods to subtract foreground cells from the background. In this study, we introduce a preprocessing method which leads to a better detection and segmentation results compared to a well-known state-of-the-art work. Method: We transform the original red-green-blue (RGB) space into a new space defined by the top eigenvectors of the RGB space. Stretching is done by manipulating the contrast of each pixel value to equalize the color variances. New pixel values are then inverse transformed to the original RGB space. This altered RGB image is then used to segment cells. Result: The validation of our method with a well-known state-of-the-art technique revealed a statistically significant improvement on an identical validation set. We achieved a mean F1-score of 0.901. Conclusion: Preprocessing steps to decorrelate colorspaces may improve cell segmentation performances},
keywords = {digital pathology},
pubstate = {published},
tppubtype = {inproceedings}
}