|
4 | 4 | @string{aps = {American Physical Society,}} |
5 | 5 |
|
6 | 6 |
|
7 | | -@article{Chaves-de-Plaza:2024, |
8 | | - abbr = {CGF}, |
9 | | - bibtex_show = {true}, |
10 | | - title = {Depth for Multi-Modal Contour Ensembles}, |
11 | | - author = {Chaves-de-Plaza, N.F. and Molenaar, M. and Mody, P. and Staring, M. and van Egmond, R. and Eisemann, E. and Vilanova, A. and Hildebrandt, K.}, |
12 | | - journal = {Computer Graphics Forum}, |
13 | | - volume = {43}, |
14 | | - number = {3}, |
15 | | - pages = {e15083}, |
16 | | - year = {2024}, |
17 | | - pdf = {2024_j_CGF.pdf}, |
18 | | - html = {https://doi.org/10.1111/cgf.15083}, |
19 | | - code = {https://github.com/chadepl/paper-multimodal-contour-depth}, |
20 | | - abstract = {The contour depth methodology enables non-parametric summarization of contour ensembles by extracting their representatives, confidence bands, and outliers for visualization (via contour boxplots) and robust downstream procedures. We address two shortcomings of these methods. Firstly, we significantly expedite the computation and recomputation of Inclusion Depth (ID), introducing a linear-time algorithm for epsilon ID, a variant used for handling ensembles with contours with multiple intersections. We also present the inclusion matrix, which contains the pairwise inclusion relationships between contours, and leverage it to accelerate the recomputation of ID. Secondly, extending beyond the single distribution assumption, we present the Relative Depth (ReD), a generalization of contour depth for ensembles with multiple modes. Building upon the linear-time eID, we introduce CDclust, a clustering algorithm that untangles ensemble modes of variation by optimizing ReD. Synthetic and real datasets from medical image segmentation and meteorological forecasting showcase the speed advantages, illustrate the use case of progressive depth computation and enable non-parametric multimodal analysis. To promote research and adoption, we offer the contour-depth Python package.}, |
21 | | -} |
22 | | - |
23 | | - |
24 | | -@article{Mody:2024a, |
25 | | - abbr = {PhIRO}, |
26 | | - bibtex_show = {true}, |
27 | | - title = {Large-scale dose evaluation of deep learning organ contours in head-and-neck radiotherapy by leveraging existing plans}, |
28 | | - author = {Mody, Prerak and Huiskes, Merle and Chaves-de-Plaza, Nicolas and Onderwater, Alice and Lamsma, Rense and Hildebrandt, Klaus and Hoekstra, Nienke and Astreinidou, Eleftheria and Staring, Marius and Dankers, Frank}, |
29 | | - journal = {Physics and Imaging in Radiation Oncology}, |
30 | | - volume = {30}, |
31 | | - pages = {100572}, |
32 | | - month = {April}, |
33 | | - year = {2024}, |
34 | | - pdf = {2024_j_PHIRO.pdf}, |
35 | | - html = {https://doi.org/10.1016/j.phro.2024.100572}, |
36 | | - code = {https://github.com/prerakmody/dose-eval-via-existing-plan-parameters}, |
37 | | - abstract = {<b>Background and Purpose:</b> Retrospective dose evaluation for organ-at-risk auto-contours has previously used small cohorts due to additional manual effort required for treatment planning on auto-contours. We aimed to do this at large scale, by a) proposing and assessing an automated plan optimization workflow that used existing clinical plan parameters and b) using it for head-and-neck auto-contour dose evaluation.<br><b>Materials and Methods:</b> Our automated workflow emulated our clinic's treatment planning protocol and reused existing clinical plan optimization parameters. This workflow recreated the original clinical plan (P<sub>OG</sub>) with manual contours (P<sub>MC</sub>) and evaluated the dose effect (P<sub>OG</sub> - P<sub>MC</sub>) on 70 photon and 30 proton plans of head-and-neck patients. As a use-case, the same workflow (and parameters) created a plan using auto-contours (P<sub>AC</sub>) of eight head-and-neck organs-at-risk from a commercial tool and evaluated their dose effect (P<sub>MC</sub> - P<sub>AC</sub>).<br><b>Results:</b> For plan recreation (P<sub>OG</sub> - P<sub>MC</sub>), our workflow had a median impact of 1.0% and 1.5% across dose metrics of auto-contours, for photon and proton respectively. Computer time of automated planning was 25% (photon) and 42% (proton) of manual planning time. For auto-contour evaluation (P<sub>MC</sub> - P<sub>AC</sub>), we noticed an impact of 2.0% and 2.6% for photon and proton radiotherapy. All evaluations had a median ΔNTCP (Normal Tissue Complication Probability) less than 0.3%.<br><b>Conclusions:</b> The plan replication capability of our automated program provides a blueprint for other clinics to perform auto-contour dose evaluation with large patient cohorts. Finally, despite geometric differences, auto-contours had a minimal median dose impact, hence inspiring confidence in their utility and facilitating their clinical adoption.} |
38 | | -} |
39 | | - |
40 | | -@article{Stoel:2024, |
41 | | - abbr = {Nat. Rev. Rheumatol.}, |
42 | | - bibtex_show = {true}, |
43 | | - title = {Deep Learning in Rheumatologic Image Interpretation}, |
44 | | - author = {Stoel, Berend C. and Staring, Marius and Reijnierse, Monique and van der Helm-van Mil, Annette H.M.}, |
45 | | - journal = {Nature Reviews Rheumatology}, |
46 | | - volume = {20}, |
47 | | - pages = {182 -- 195}, |
48 | | - month = {March}, |
49 | | - year = {2024}, |
50 | | - pdf = {2024_j_NRR.pdf}, |
51 | | - html = {https://doi.org/10.1038/s41584-023-01074-5}, |
52 | | - abstract = {Artificial intelligence techniques, specifically deep learning, have already affected daily life in a wide range of areas. Likewise, initial applications have been explored in rheumatology. Deep learning might not easily surpass the accuracy of classic techniques when performing classification or regression on low-dimensional numerical data. With images as input, however, deep learning has become so successful that it has already outperformed the majority of conventional image-processing techniques developed during the past 50 years. As with any new imaging technology, rheumatologists and radiologists need to consider adapting their arsenal of diagnostic, prognostic and monitoring tools, and even their clinical role and collaborations. This adaptation requires a basic understanding of the technical background of deep learning, to efficiently utilize its benefits but also to recognize its drawbacks and pitfalls, as blindly relying on deep learning might be at odds with its capabilities. To facilitate such an understanding, it is necessary to provide an overview of deep-learning techniques for automatic image analysis in detecting, quantifying, predicting and monitoring rheumatic diseases, and of currently published deep-learning applications in radiological imaging for rheumatology, with critical assessment of possible limitations, errors and confounders, and conceivable consequences for rheumatologists and radiologists in clinical practice.} |
53 | | -} |
54 | | - |
55 | | -@article{Chen:2024, |
56 | | - abbr = {Melba}, |
57 | | - bibtex_show = {true}, |
58 | | - author = {Chen, Yunjie and Staring, Marius and Neve, Olaf M. and Romeijn, Stephan R. and Hensen, Erik F. and Verbist, Berit M. and Wolterink, Jelmer M. and Tao, Qian}, |
59 | | - title = {CoNeS: Conditional neural fields with shift modulation for multi-sequence MRI translation}, |
60 | | - journal = {The Journal of Machine Learning for Biomedical Imaging}, |
61 | | - volume = {2}, |
62 | | - pages = {657 -- 685}, |
63 | | - year = {2024}, |
64 | | - pdf = {2024_j_MELBAa.pdf}, |
65 | | - html = {https://doi.org/10.59275/j.melba.2024-d61g}, |
66 | | - arxiv = {2309.03320}, |
67 | | - code = {https://github.com/cyjdswx/CoNeS.git}, |
68 | | - abstract = {Multi-sequence magnetic resonance imaging (MRI) has found wide applications in both modern clinical studies and deep learning research. However, in clinical practice, it frequently occurs that one or more of the MRI sequences are missing due to different image acquisition protocols or contrast agent contraindications of patients, limiting the utilization of deep learning models trained on multi-sequence data. One promising approach is to leverage generative models to synthesize the missing sequences, which can serve as a surrogate acquisition. State-of-the-art methods tackling this problem are based on convolutional neural networks (CNN) which usually suffer from spectral biases, resulting in poor reconstruction of high-frequency fine details. In this paper, we propose Conditional Neural fields with Shift modulation (CoNeS), a model that takes voxel coordinates as input and learns a representation of the target images for multi-sequence MRI translation. The proposed model uses a multi-layer perceptron (MLP) instead of a CNN as the decoder for pixel-to-pixel mapping. Hence, each target image is represented as a neural field that is conditioned on the source image via shift modulation with a learned latent code. Experiments on BraTS 2018 and an in-house clinical dataset of vestibular schwannoma patients showed that the proposed method outperformed state-of-the-art methods for multi-sequence MRI translation both visually and quantitatively. Moreover, we conducted spectral analysis, showing that CoNeS was able to overcome the spectral bias issue common in conventional CNN models. To further evaluate the usage of synthesized images in clinical downstream tasks, we tested a segmentation network using the synthesized images at inference. The results showed that CoNeS improved the segmentation performance when some MRI sequences were missing and outperformed other synthesis models. We concluded that neural fields are a promising technique for multi-sequence MRI translation.}, |
69 | | -} |
70 | 7 |
|
71 | | -@article{Chaves-de-Plaza:2024, |
72 | | - abbr = {TVCG}, |
73 | | - bibtex_show = {true}, |
74 | | - author = {Chaves-de-Plaza, Nicolas and Mody, Prerak P. and Staring, Marius and van Egmond, Ren{\'e}; and Vilanova, Anna and Hildebrandt, Klaus}, |
75 | | - title = {Inclusion Depth for Contour Ensembles}, |
76 | | - journal = {IEEE Transactions on Visualization and Computer Graphics}, |
77 | | - volume = {}, |
78 | | - number = {}, |
79 | | - pages = {}, |
80 | | - year = {2024}, |
81 | | - pdf = {2024_j_TVCG.pdf}, |
82 | | - html = {https://doi.org/10.1109/TVCG.2024.3350076}, |
83 | | - arxiv = {}, |
84 | | - code = {}, |
85 | | - abstract = {Ensembles of contours arise in various applications like simulation, computer-aided design, and semantic segmentation. Uncovering ensemble patterns and analyzing individual members is a challenging task that suffers from clutter. Ensemble statistical summarization can alleviate this issue by permitting analyzing ensembles' distributional components like the mean and median, confidence intervals, and outliers. Contour boxplots, powered by Contour Band Depth (CBD), are a popular nonparametric ensemble summarization method that benefits from CBD's generality, robustness, and theoretical properties. In this work, we introduce Inclusion Depth (ID), a new notion of contour depth with three defining characteristics. First, ID is a generalization of functional Half-Region Depth, which offers several theoretical guarantees. Second, ID relies on a simple principle: the inside/outside relationships between contours. This facilitates implementing ID and understanding its results. Third, the computational complexity of ID scales quadratically in the number of members of the ensemble, improving CBD's cubic complexity. This also in practice speeds up the computation enabling the use of ID for exploring large contour ensembles or in contexts requiring multiple depth evaluations like clustering. In a series of experiments on synthetic data and case studies with meteorological and segmentation data, we evaluate ID's performance and demonstrate its capabilities for the visual analysis of contour ensembles.}, |
86 | | -} |
87 | | - |
88 | | -@article{Beljaards:2024, |
89 | | - abbr = {}, |
90 | | - bibtex_show = {true}, |
91 | | - author = {Beljaards, Laurens and Pezzotti, Nicola and Rao, Chinmay and Doneva, Mariya and van Osch, Matthias J.P. and Staring, Marius}, |
92 | | - title = {AI-Based Motion Artifact Severity Estimation in Undersampled MRI Allowing for Selection of Appropriate Reconstruction Models}, |
93 | | - journal = {Medical Physics}, |
94 | | - volume = {51}, |
95 | | - number = {5}, |
96 | | - pages = {3555 -- 3565}, |
97 | | - year = {2024}, |
98 | | - pdf = {2024_j_MP.pdf}, |
99 | | - html = {https://doi.org/10.1002/mp.16918}, |
100 | | - arxiv = {}, |
101 | | - code = {}, |
102 | | - abstract = {<b>Background:</b> MR acquisition is a time consuming process, making it susceptible to patient motion during scanning. Even motion in the order of a millimeter can introduce severe blurring and ghosting artifacts, potentially necessitating re-acquisition. MRI can be accelerated by acquiring only a fraction of k-space, combined with advanced reconstruction techniques leveraging coil sensitivity profiles and prior knowledge. AI-based reconstruction techniques have recently been popularized, but generally assume an ideal setting without intra-scan motion.<br><b>Purpose:</b> To retrospectively detect and quantify the severity of motion artifacts in undersampled MRI data. This may prove valuable as a safety mechanism for AI-based approaches, provide useful information to the reconstruction method, or prompt for re-acquisition while the patient is still in the scanner.<br><b>Methods:</b> We developed a deep learning approach that detects and quantifies motion artifacts in undersampled brain MRI. We demonstrate that synthetically motion-corrupted data can be leveraged to train the CNN-based motion artifact estimator, generalizing well to real-world data. Additionally, we leverage the motion artifact estimator by using it as a selector for a motion-robust reconstruction model in case a considerable amount of motion was detected, and a high data consistency model otherwise.<br><b>Results:</b> Training and validation were performed on 4387 and 1304 synthetically motion-corrupted images and their uncorrupted counterparts, respectively. Testing was performed on undersampled in vivo motion-corrupted data from 28 volunteers, where our model distinguished head motion from motion-free scans with 91% and 96% accuracy when trained on synthetic and on real data, respectively. It predicted a manually defined quality label (`Good', `Medium' or `Bad' quality) correctly in 76% and 85% of the time when trained on synthetic and real data, respectively. When used as a selector it selected the appropriate reconstruction network 93% of the time, achieving near optimal SSIM values.<br><b>Conclusions:</b> The proposed method quantified motion artifact severity in undersampled MRI data with high accuracy, enabling real-time motion artifact detection that can help improve the safety and quality of AI-based reconstructions.}, |
103 | | -} |
104 | 8 |
|
105 | 9 | @article{Jia:2023, |
106 | 10 | abbr = {}, |
|
0 commit comments