2019
Hesse, Linde S; Kuling, Grey; Veta, Mitko; Martel, Anne L
Intensity augmentation for domain transfer of whole breast segmentation in MRI Journal Article
In: arXiv preprint arXiv:1909.02642, 2019.
Abstract | BibTeX | Tags: Breast MRI
@article{Hesse2019,
title = {Intensity augmentation for domain transfer of whole breast segmentation in MRI},
author = {Linde S Hesse and Grey Kuling and Mitko Veta and Anne L Martel},
year = {2019},
date = {2019-09-05},
urldate = {2019-09-05},
journal = {arXiv preprint arXiv:1909.02642},
abstract = {The segmentation of the breast from the chest wall is an important first step in the analysis of breast magnetic resonance images. 3D U-nets have been shown to obtain high segmentation accuracy and appear to generalize well when trained on one scanner type and tested on another scanner, provided that a very similar T1-weighted MR protocol is used. There has, however, been little work addressing the problem of domain adaptation when image intensities or patient orientation differ markedly between the training set and an unseen test set. To overcome the domain shift we propose to apply extensive intensity augmentation in addition to geometric augmentation during training. We explored both style transfer and a novel intensity remapping approach as intensity augmentation strategies. For our experiments, we trained a 3D U-net on T1-weighted scans and tested on T2-weighted scans. By applying intensity augmentation we increased segmentation performance from a DSC of 0.71 to 0.90. This performance is very close to the baseline performance of training and testing on T2-weighted scans (0.92). Furthermore, we applied our network to an independent test set made up of publicly available scans acquired using a T1-weighted TWIST sequence and a different coil configuration. On this dataset we obtained a performance of 0.89, close to the inter-observer variability of the ground truth segmentations (0.92). Our results show that using intensity augmentation in addition to geometric augmentation is a suitable method to overcome the intensity domain shift and we expect it to be useful for a wide range of segmentation tasks.},
keywords = {Breast MRI},
pubstate = {published},
tppubtype = {article}
}
Yaffe, Theo Cleland; James G. Mainprize; Olivier Alonzo-Proulx; Jennifer A. Harvey; Roberta A Jong; Anne L. Martel; Martin J.
Use of convolutional neural networks to predict risk of masking by mammographic density Conference
SPIE Medical Imaging, 2019, Computer-Aided Diagnosis, vol. 10950, 2019.
Abstract | BibTeX | Tags: Breast MRI, Breast-CAD
@conference{Yaffe2019,
title = {Use of convolutional neural networks to predict risk of masking by mammographic density},
author = {Theo Cleland; James G. Mainprize; Olivier Alonzo-Proulx; Jennifer A. Harvey; Roberta A Jong; Anne L. Martel; Martin J. Yaffe},
year = {2019},
date = {2019-03-13},
booktitle = { SPIE Medical Imaging, 2019, Computer-Aided Diagnosis},
volume = {10950},
abstract = {Sensitivity of screening mammography is reduced by increased mammographic density (MD). MD can obscure or “mask” developing lesions making them harder to detect. Predicting masking risk may be an effective tool for a stratified screening program where selected women can receive alternative screening modalities that are less susceptible to masking. Here, we investigate whether the use of artificial intelligence can accurately predict the masking risk and compare its performance to that of conventional BI-RADS density classification. The analysis was based on mammograms of 214 subjects comprised of 147 women with a screen-detected (SD) or “non-masked” cancer and 67 that developed a non-screen detected (NSD) or presumably masked cancer within 2 years following a negative screen. Prior to analysis, mammograms were pre-processed into quantitative MD maps using an in-house algorithm. A transfer learning approach was used to train a convolutional neural network (CNN) based on VGG-16 in a seven cross-fold approach to classify masking status. A two-step transfer learning method was also used where the pre-trained CNN was initially trained on 5,865 mammograms to classify by BI-RADS density category and then trained for masking status. Using BI-RADS density as a masking risk predictor has an AUC of 0.64 [0.57 - 0.71 95CI]. The CNN-mask yielded an AUC of 0.76 [0.68 - 0.81]. Combining the CNN-mask with our previous hand-crafted masking risk predictor, the AUC improved to 0.78 [0.70 - 0.83]. The combined AUC improved to 0.81 [0.72-0.90] when analysis was restricted to NSD cancers surfacing clinically within one year after a negative screen. The two-step transfer learning yielded similar performance. This work suggests that a CNN masking risk predictor can be used to guide a stratified screening program to overcome the limitations of screening mammography in dense breasts.},
keywords = {Breast MRI, Breast-CAD},
pubstate = {published},
tppubtype = {conference}
}
Fashandi, Homa; Kuling, Grey; Lu, YingLi; Wu, Hongbo; Martel, Anne L.
An investigation of the effect of fat suppression and dimensionality on the accuracy of breast MRI segmentation using U-nets Journal Article
In: Medical Physics, 2019, (This is the pre-peer reviewed version. The definitive version is available at: https://aapm.onlinelibrary.wiley.com/doi/abs/10.1002/mp.13375).
Abstract | Links | BibTeX | Tags: _breast_segmentation, Breast MRI, deep learning, segmentation
@article{Fashandi2019,
title = {An investigation of the effect of fat suppression and dimensionality on the accuracy of breast MRI segmentation using U-nets},
author = {Homa Fashandi and Grey Kuling and YingLi Lu and Hongbo Wu and Anne L. Martel},
url = {http://hdl.handle.net/1807/93313},
doi = {10.1002/mp.13375},
year = {2019},
date = {2019-01-04},
urldate = {2019-01-04},
journal = {Medical Physics},
abstract = {Purpose
Accurate segmentation of the breast is required for breast density estimation and the assessment of background parenchymal enhancement, both of which have been shown to be related to breast cancer risk. The MRI breast segmentation task is challenging, and recent work has demonstrated that convolutional neural networks perform well for this task. In this study, we have investigated the performance of several 2D U‐Net and 3D U‐Net configurations using both fat‐suppressed and nonfat suppressed images. We have also assessed the effect of changing the number and quality of the ground truth segmentations.
Materials and methods
We designed 8 studies to investigate the effect of input types and the dimensionality of the U‐Net operations for the breast MRI segmentation. Our training data contained 70 whole breast volumes of T1‐weighted sequences without fat suppression(WOFS) and with fat suppression(FS). For each subject, we registered the WOFS and FS volumes together before manually segmenting the breast to generate ground truth. We compared 4 different input types to the U‐nets: WOFS, FS, MIXED(WOFS and FS images treated as separate samples) and MULTI(WOFS and FS images combined into a single multi‐channel image). We trained 2D U‐Nets and 3D U‐Nets with this data, which resulted in our 8 studies (2D‐WOFS, 3D‐WOFS,2D‐FS,3D‐FS,2D‐MIXED,3D‐MIXED,2D‐MULTI, and 3D‐MULT). For each of these studies, we performed a systematic grid search to tune the hyperparameters of the U‐Nets. A separate validation set with 15 whole breast volumes was used for hyperparameter tuning. We performed Kruskal‐Walis test on the results of our hyperparameter tuning and did not find a statistically significant difference in the 10 top models of each study. For this reason, we chose the best model as the model with the highest mean Dice Similarity Coefficient(DSC) value on the validation set. The reported test results are the results of the top model of each study on our test set which contained 19 whole breast volumes annotated by 3 readers fused with the STAPLE algorithm. We also investigated the effect of the quality of the training annotations and the number of training samples for this task.
Results
The study with the highest average DSC result was 3D‐MULTI with 0.96 ± 0.02. The second highest average is 2D WOFS (0.96 ± 0.03), and the third is 2D MULTI (0.96 ± 0.03). We performed the Kruskal‐Wallis 1‐way ANOVA test with Dunn's multiple comparison tests using Bonferroni p‐value correction on the results of the selected model of each study and found that 3D‐MULTI, 2D‐MULTI, 3D‐WOFS, 2D‐WOFS, 2D‐FS, and 3D‐FS were not statistically different in their distributions, which indicates that comparable results could be obtained in fat‐suppressed and nonfat suppressed volumes and that there is no significant difference between the 3D and 2D approach. Our results also suggested that the networks trained on single sequence images or multiple sequence images organized in multi‐channel images perform better than the models trained on a mixture of volumes from different sequences. Our investigation of the size of the training set revealed that training a U‐Net in this domain only requires a modest amount of training data and results obtained with 49 and 70 training datasets were not significantly different.
Conclusions
To summarize, we investigated the use of 2D U‐Nets and 3D U‐Nets for breast volume segmentation in T1 fat suppressed and without fat suppressed volumes. Although our highest score was obtained in the 3D MULTI study, when we took advantage of information in both fat suppressed and non fat suppressed volumes and their 3D structure, all of the methods we explored gave accurate segmentations with an average DSC on > 94% demonstrating that the U‐Net is a robust segmentation method for breast MRI volumes.},
note = {This is the pre-peer reviewed version. The definitive version is available at: https://aapm.onlinelibrary.wiley.com/doi/abs/10.1002/mp.13375},
keywords = {_breast_segmentation, Breast MRI, deep learning, segmentation},
pubstate = {published},
tppubtype = {article}
}
Accurate segmentation of the breast is required for breast density estimation and the assessment of background parenchymal enhancement, both of which have been shown to be related to breast cancer risk. The MRI breast segmentation task is challenging, and recent work has demonstrated that convolutional neural networks perform well for this task. In this study, we have investigated the performance of several 2D U‐Net and 3D U‐Net configurations using both fat‐suppressed and nonfat suppressed images. We have also assessed the effect of changing the number and quality of the ground truth segmentations.
Materials and methods
We designed 8 studies to investigate the effect of input types and the dimensionality of the U‐Net operations for the breast MRI segmentation. Our training data contained 70 whole breast volumes of T1‐weighted sequences without fat suppression(WOFS) and with fat suppression(FS). For each subject, we registered the WOFS and FS volumes together before manually segmenting the breast to generate ground truth. We compared 4 different input types to the U‐nets: WOFS, FS, MIXED(WOFS and FS images treated as separate samples) and MULTI(WOFS and FS images combined into a single multi‐channel image). We trained 2D U‐Nets and 3D U‐Nets with this data, which resulted in our 8 studies (2D‐WOFS, 3D‐WOFS,2D‐FS,3D‐FS,2D‐MIXED,3D‐MIXED,2D‐MULTI, and 3D‐MULT). For each of these studies, we performed a systematic grid search to tune the hyperparameters of the U‐Nets. A separate validation set with 15 whole breast volumes was used for hyperparameter tuning. We performed Kruskal‐Walis test on the results of our hyperparameter tuning and did not find a statistically significant difference in the 10 top models of each study. For this reason, we chose the best model as the model with the highest mean Dice Similarity Coefficient(DSC) value on the validation set. The reported test results are the results of the top model of each study on our test set which contained 19 whole breast volumes annotated by 3 readers fused with the STAPLE algorithm. We also investigated the effect of the quality of the training annotations and the number of training samples for this task.
Results
The study with the highest average DSC result was 3D‐MULTI with 0.96 ± 0.02. The second highest average is 2D WOFS (0.96 ± 0.03), and the third is 2D MULTI (0.96 ± 0.03). We performed the Kruskal‐Wallis 1‐way ANOVA test with Dunn's multiple comparison tests using Bonferroni p‐value correction on the results of the selected model of each study and found that 3D‐MULTI, 2D‐MULTI, 3D‐WOFS, 2D‐WOFS, 2D‐FS, and 3D‐FS were not statistically different in their distributions, which indicates that comparable results could be obtained in fat‐suppressed and nonfat suppressed volumes and that there is no significant difference between the 3D and 2D approach. Our results also suggested that the networks trained on single sequence images or multiple sequence images organized in multi‐channel images perform better than the models trained on a mixture of volumes from different sequences. Our investigation of the size of the training set revealed that training a U‐Net in this domain only requires a modest amount of training data and results obtained with 49 and 70 training datasets were not significantly different.
Conclusions
To summarize, we investigated the use of 2D U‐Nets and 3D U‐Nets for breast volume segmentation in T1 fat suppressed and without fat suppressed volumes. Although our highest score was obtained in the 3D MULTI study, when we took advantage of information in both fat suppressed and non fat suppressed volumes and their 3D structure, all of the methods we explored gave accurate segmentations with an average DSC on > 94% demonstrating that the U‐Net is a robust segmentation method for breast MRI volumes.
2018
Kuling, Grey; Fashandi, Homa; Lu, YingLi; Wu, Hongbo; Martel, Anne L.
Breast Volume and Fibroglandular Tissue Segmentation in MRI using a Deep Learning Unet Workshop
ISMRM Workshop on Breast MRI: Advancing the State of the Art, 2018.
Links | BibTeX | Tags: _breast_segmentation, Breast MRI, Breast-CAD
@workshop{Kuling2018,
title = {Breast Volume and Fibroglandular Tissue Segmentation in MRI using a Deep Learning Unet},
author = {Grey Kuling and Homa Fashandi and YingLi Lu and Hongbo Wu and Anne L. Martel},
url = {http://martellab.com/wp-content/uploads/2019/09/GCK_ISMRMAbstract_DLSegmatation_072018-3.pdf},
year = {2018},
date = {2018-09-10},
urldate = {2018-09-10},
booktitle = {ISMRM Workshop on Breast MRI: Advancing the State of the Art},
keywords = {_breast_segmentation, Breast MRI, Breast-CAD},
pubstate = {published},
tppubtype = {workshop}
}