|
3 | 3 |
|
4 | 4 | @string{aps = {American Physical Society,}} |
5 | 5 |
|
6 | | -@article{Malimban2024, |
7 | | - abbr = {}, |
8 | | - bibtex_show = {true}, |
9 | | - title = {A simulation framework for preclinical proton irradiation workflow}, |
10 | | - author = {Malimban, Justin and Ludwig, Felix and Lathouwers, Danny and Staring, Marius and Verhaegen, Frank and Brandenburg, Sytze}, |
11 | | - journal = {Physics in Medicine and Biology}, |
12 | | - volume = {69}, |
13 | | - pages = {215040}, |
14 | | - month = {}, |
15 | | - year = {2024}, |
16 | | - pdf = {2024_j_PMB.pdf}, |
17 | | - html = {https://doi.org/10.1088/1361-6560/ad897f}, |
18 | | - arxiv = {}, |
19 | | - code = {}, |
20 | | - abstract = {<b>Objective:</b> The integration of proton beamlines with X-ray imaging/irradiation platforms has opened up possibilities for image-guided Bragg peak irradiations in small animals. Such irradiations allow selective targeting of normal tissue substructures and tumours. However, their small size and location pose challenges in designing experiments. This work presents a simulation framework useful for optimizing beamlines, imaging protocols, and design of animal experiments. The usage of the framework is demonstrated, mainly focusing on the imaging part.<br><b>Approach:</b> The fastCAT toolkit was modified with Monte Carlo (MC)-calculated primary and scatter data of a small animal imager for the simulation of micro-CT scans. The simulated CT of a mini-calibration phantom from fastCAT was validated against a full MC TOPAS CT simulation. A realistic beam model of a preclinical proton facility was obtained from beam transport simulations to create irradiation plans in matRad. Simulated CT images of a digital mouse phantom were generated using single-energy CT (SECT) and dual-energy CT (DECT) protocols and their accuracy in proton stopping power ratio (SPR) estimation and their impact on calculated proton dose distributions in a mouse were evaluated.<br><b>Main Results:</b> The CT numbers from fastCAT agree within 11 HU with TOPAS except for materials at the centre of the phantom. Discrepancies for central inserts are caused by beam hardening issues. The root mean square deviation in the SPR for the best SECT (90kV/Cu) and DECT (50kV/Al-90kV/Al) protocols are 3.7% and 1.0%, respectively. Dose distributions calculated for SECT and DECT datasets revealed range shifts <0.1 mm, gamma pass rates (3%/0.1mm) greater than 99%, and no substantial dosimetric differences for all structures. The outcomes suggest that SECT is sufficient for proton treatment planning in animals.<br><b>Significance:</b> The framework is a useful tool for the development of an optimized experimental configuration without using animals and beam time.}, |
21 | | -} |
22 | | - |
23 | | -@article{Jia2024b, |
24 | | - abbr = {}, |
25 | | - bibtex_show = {true}, |
26 | | - title = {Explainable fully automated CT scoring of interstitial lung disease for patients suspected of systemic sclerosis by cascaded regression neural networks and its comparison with experts}, |
27 | | - author = {Jia, Jingnan and Hern{\'a}ndez Gir{\'o}n, Irene and Schouffoer, Anne A. and De Vries-Bouwstra, Jeska K. and Ninaber, Maarten K. and Korving, Julie C. and Staring, Marius and Kroft, Lucia J.M. and Stoel, Berend C.}, |
28 | | - journal = {Scientific Reports}, |
29 | | - volume = {14}, |
30 | | - pages = {26666}, |
31 | | - month = {}, |
32 | | - year = {2024}, |
33 | | - pdf = {2024_j_SR.pdf}, |
34 | | - html = {https://doi.org/10.1038/s41598-024-78393-4}, |
35 | | - arxiv = {}, |
36 | | - code = {}, |
37 | | - abstract = {Visual scoring of interstitial lung disease in systemic sclerosis (SSc-ILD) from CT scans is laborious, subjective and time-consuming. This study aims to develop a deep learning framework to automate SSc-ILD scoring. The automated framework is a cascade of two neural networks. The first network selects the craniocaudal positions of the five scoring levels. Subsequently, for each level, the second network estimates the ratio of three patterns to the total lung area: the total extent of disease (TOT), ground glass (GG) and reticulation (RET). To overcome the score imbalance in the second network, we propose a method to augment the training dataset with synthetic data. To explain the network's output, a heat map method is introduced to highlight the candidate interstitial lung disease regions. The explainability of heat maps was evaluated by two human experts and a quantitative method that uses the heat map to produce the score. The results show that our framework achieved a κ of 0.66, 0.58, and 0.65, for the TOT, GG and RET scoring, respectively. Both experts agreed with the heat maps in 91%, 90% and 80% of cases, respectively. Therefore, it is feasible to develop a framework for automated SSc-ILD scoring, which performs competitively with human experts and provides high-quality explanations using heat maps. Confirming the model's generalizability is needed in future studies.}, |
38 | | -} |
39 | | - |
40 | | -@article{Jia2024a, |
41 | | - abbr = {}, |
42 | | - bibtex_show = {true}, |
43 | | - title = {Using 3D point cloud and graph-based neural networks to improve the estimation of pulmonary function tests from chest CT}, |
44 | | - author = {Jia, Jingnan and Yu, Bo and Mody, Prerak and Ninaber, Maarten K. and Schouffoer, Anne A. and Kroft, Lucia J.M. and Staring, Marius and Stoel, Berend C.}, |
45 | | - journal = {Computers in Biology and Medicine}, |
46 | | - volume = {182}, |
47 | | - pages = {109192}, |
48 | | - month = {November}, |
49 | | - year = {2024}, |
50 | | - pdf = {2024_j_CMB.pdf}, |
51 | | - html = {https://doi.org/10.1016/j.compbiomed.2024.109192}, |
52 | | - arxiv = {}, |
53 | | - code = {https://github.com/Jingnan-Jia/PFT_regression}, |
54 | | - abstract = {Pulmonary function tests (PFTs) are important clinical metrics to measure the severity of interstitial lung disease for systemic sclerosis patients. However, PFTs cannot always be performed by spirometry if there is a risk of disease transmission or other contraindications. In addition, it is unclear how lung function is affected by changes in lung vessels. Convolution neural networks (CNNs) have been previously proposed to estimate PFTs from chest CT scans (CNN-CT) and extracted vessels (CNNVessel). Due to GPU memory constraints, however, these networks used down-sampled images, which causes a loss of information on small vessels. Previous work based on CNNs has indicated that detailed vessel information from CT scans can be helpful for PFT estimation. Therefore, this paper proposes to use a point cloud neural network (PNN-Vessel) and graph neural network (GNN-Vessel) to estimate PFTs from point cloud and graph-based representations of pulmonary vessel centerlines, respectively. After that, we perform multiple variable step-wise regression analysis to explore if vessel-based networks can contribute to the PFT estimation, in addition to CNN-CT. Results showed that both PNN-Vessel and GNN-Vessel outperformed CNN-Vessel, by 14% and 4%, respectively, when averaged across the ICC scores of four PFTs metrics. In addition, compared to CNN-Vessel, PNNVessel used 30% of training time (1.1 hours) and 7% parameters (2.1 M) and GNN-Vessel used only 7% training time (0.25 hours) and 0.7% parameters (0.2 M). Our multiple variable regression analysis still verified that more detailed vessel information could provide further explanation for PFT estimation from anatomical imaging.}, |
55 | | -} |
56 | | - |
57 | | -@article{Mody:2024b, |
58 | | - abbr = {}, |
59 | | - bibtex_show = {true}, |
60 | | - title = {Improving Uncertainty-Error Correspondence in Deep Bayesian Medical Image Segmentation}, |
61 | | - author = {Mody, Prerak and Chaves-de-Plaza, Nicolas and Rao, Chinmay and Astreinidou, Eleftheria and De Ridder, Mischa, and Hoekstra, Nienke and Hildebrandt, Klaus and Staring, Marius}, |
62 | | - journal = {The Journal of Machine Learning for Biomedical Imaging}, |
63 | | - volume = {2}, |
64 | | - pages = {1048 -- 1082}, |
65 | | - month = {August}, |
66 | | - year = {2024}, |
67 | | - pdf = {2024_j_MELBAb.pdf}, |
68 | | - html = {https://doi.org/10.59275/j.melba.2024-5gc8}, |
69 | | - arxiv = {}, |
70 | | - code = {https://github.com/prerakmody/bayesuncertainty-error-correspondence}, |
71 | | - abstract = {Increased usage of automated tools like deep learning in medical image segmentation has alleviated the bottleneck of manual contouring. This has shifted manual labour to quality assessment (QA) of automated contours which involves detecting errors and correcting them. A potential solution to semi-automated QA is to use deep Bayesian uncertainty to recommend potentially erroneous regions, thus reducing time spent on error detection. Previous work has investigated the correspondence between uncertainty and error, however, no work has been done on improving the ``utility" of Bayesian uncertainty maps such that it is only present in inaccurate regions and not in the accurate ones. Our work trains the FlipOut model with the Accuracy-vs-Uncertainty (AvU) loss which promotes uncertainty to be present only in inaccurate regions. We apply this method on datasets of two radiotherapy body sites, c.f. head-and-neck CT and prostate MR scans. Uncertainty heatmaps (i.e. predictive entropy) are evaluated against voxel inaccuracies using Receiver Operating Characteristic (ROC) and Precision-Recall (PR) curves. Numerical results show that when compared to the Bayesian baseline the proposed method successfully suppresses uncertainty for accurate voxels, with similar presence of uncertainty for inaccurate voxels. Code to reproduce experiments is available at <a href="https://github.com/prerakmody/bayesuncertainty-error-correspondence">https://github.com/prerakmody/bayesuncertainty-error-correspondence</a>.} |
72 | | -} |
73 | 6 |
|
74 | 7 | @article{Chaves-de-Plaza:2024, |
75 | 8 | abbr = {CGF}, |
@@ -1230,4 +1163,3 @@ @article{Staring:2007b |
1230 | 1163 | abstract = {Medical images that are to be registered for clinical application often contain both structures that deform and ones that remain rigid. Nonrigid registration algorithms that do not model properties of different tissue types may result in deformations of rigid structures. In this article a local rigidity penalty term is proposed which is included in the registration function in order to penalize the deformation of rigid objects. This term can be used for any representation of the deformation field capable of modelling locally rigid transformations. By using a B-spline representation of the deformation field, a fast algorithm can be devised. The proposed method is compared with an unconstrained nonrigid registration algorithm. It is evaluated on clinical three-dimensional CT follow-up data of the thorax and on two-dimensional DSA image sequences. The results show that nonrigid registration using the proposed rigidity penalty term is capable of nonrigidly aligning images, while keeping user-defined structures locally rigid.}, |
1231 | 1164 | } |
1232 | 1165 |
|
1233 | | - |
0 commit comments