<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Serious Games</journal-id><journal-id journal-id-type="publisher-id">games</journal-id><journal-id journal-id-type="index">15</journal-id><journal-title>JMIR Serious Games</journal-title><abbrev-journal-title>JMIR Serious Games</abbrev-journal-title><issn pub-type="epub">2291-9279</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v14i1e75962</article-id><article-id pub-id-type="doi">10.2196/75962</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Usability Study of Augmented Reality Visualization Modalities on Localization Accuracy in the Head and Neck: Randomized Crossover Trial</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Li</surname><given-names>Yao</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Luijten</surname><given-names>Gijs</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Gsaxner</surname><given-names>Christina</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Grunert</surname><given-names>Kim</given-names></name><degrees>MD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Bader</surname><given-names>Alexis</given-names></name><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff6">6</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>H&#x00F6;lzle</surname><given-names>Frank</given-names></name><degrees>MD, DMD, PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>R&#x00F6;hrig</surname><given-names>Rainer</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>de la Fuente</surname><given-names>Mat&#x00ED;as</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff7">7</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Egger</surname><given-names>Jan</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Xie</surname><given-names>Kunpeng</given-names></name><degrees>MSM</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Hinrichs-Puladi</surname><given-names>Behrus</given-names></name><degrees>MD, DMD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="aff" rid="aff2">2</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Oral and Maxillofacial Surgery, University Hospital RWTH Aachen</institution><addr-line>Pauwelsstrasse 30</addr-line><addr-line>Aachen</addr-line><country>Germany</country></aff><aff id="aff2"><institution>Institute of Medical Informatics, University Hospital RWTH Aachen</institution><addr-line>Aachen</addr-line><country>Germany</country></aff><aff id="aff3"><institution>Institute for Artificial Intelligence in Medicine (IKIM), Essen University Hospital</institution><addr-line>Essen</addr-line><country>Germany</country></aff><aff id="aff4"><institution>Center for Virtual and Extended Reality in Medicine (ZvRM), Essen University Hospital</institution><addr-line>Essen</addr-line><country>Germany</country></aff><aff id="aff5"><institution>Institute of Computer Graphics and Vision (ICG), Graz University of Technology</institution><addr-line>Graz</addr-line><country>Austria</country></aff><aff id="aff6"><institution>Department of Systems Design Engineering, University of Waterloo</institution><addr-line>Waterloo</addr-line><addr-line>ON</addr-line><country>Canada</country></aff><aff id="aff7"><institution>Chair of Medical Engineering, RWTH Aachen University</institution><addr-line>Aachen</addr-line><country>Germany</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Coristine</surname><given-names>Andrew</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Berglund</surname><given-names>Erik</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Yang</surname><given-names>Yue</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Kunpeng Xie, MSM, Department of Oral and Maxillofacial Surgery, University Hospital RWTH Aachen, Pauwelsstrasse 30, Aachen, 52074, Germany, 49 2418088231; <email>kxie@ukaachen.de</email></corresp></author-notes><pub-date pub-type="collection"><year>2026</year></pub-date><pub-date pub-type="epub"><day>13</day><month>1</month><year>2026</year></pub-date><volume>14</volume><elocation-id>e75962</elocation-id><history><date date-type="received"><day>22</day><month>04</month><year>2025</year></date><date date-type="rev-recd"><day>21</day><month>10</month><year>2025</year></date><date date-type="accepted"><day>21</day><month>10</month><year>2025</year></date></history><copyright-statement>&#x00A9; Yao Li, Gijs Luijten, Christina Gsaxner, Kim Grunert, Alexis Bader, Frank H&#x00F6;lzle, Rainer R&#x00F6;hrig, Mat&#x00ED;as de la Fuente, Jan Egger, Kunpeng Xie, Behrus Hinrichs-Puladi. Originally published in JMIR Serious Games (<ext-link ext-link-type="uri" xlink:href="https://games.jmir.org">https://games.jmir.org</ext-link>), 13.1.2026. </copyright-statement><copyright-year>2026</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Serious Games, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://games.jmir.org">https://games.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://games.jmir.org/2026/1/e75962"/><abstract><sec><title>Background</title><p>Augmented reality head-mounted displays could overcome the spatial dissociation between medical imaging and the surgical field, which may be particularly important in anatomically dense regions, such as the head and neck. Although many head-mounted displays offer markerless inside-out tracking at a fraction of the cost of navigation systems, their overlay accuracy with superimposition (SI) modality onto the surgical field remains limited. The virtual twin (VT), displaying holography adjacent to the surgical field, may offer a viable alternative. However, its performance is still unclear.</p></sec><sec><title>Objective</title><p>This study aimed to compare the accuracy and efficiency of the two visualization modalities, SI and VT, for anatomical localization in the head and neck region.</p></sec><sec sec-type="methods"><title>Methods</title><p>In a randomized crossover trial to compare two augmented reality visualization modalities (SI and VT), 38 participants used a HoloLens 2 to localize point, line-based, and volume-based anatomical structures on head phantoms. Their performance was evaluated with respect to accuracy, workload, time, and user experience.</p></sec><sec sec-type="results"><title>Results</title><p>SI achieved significantly better point localization accuracy than VT both in absolute (mean 14.4, SD 4.2 mm vs mean 15.8, SD 5.5 mm; <italic>P</italic>=.003) and relative accuracy (mean 3.4, SD 2.2 mm vs mean 6.0, SD 5.0 mm; <italic>P</italic>&#x003C;.001). In line-based structures, accuracy was comparable between SI (average surface distance [ASD], mean 23.4, SD 4.1 mm; Hausdorff distance [HD], mean 31.5, SD 7.8 mm) and VT (ASD=mean 23.0, SD 4.5 mm; <italic>P</italic>=.51; HD=mean 31.0, SD 7.5 mm; <italic>P</italic>=.57). However, SI showed significantly higher deviation than VT in volume-based structure (ASD=mean 37.1, SD 13.8 mm vs mean 34.1, SD 14.2 mm; <italic>P</italic>=.01; HD=mean 52.0, SD 16.8 mm vs mean 49.1, SD 15.8 mm; <italic>P</italic>=.03). Participants were faster with SI (<italic>P</italic>=.02), while workload NASA-TLX (National Aeronautics and Space Administration Task Load Index) scores did not demonstrate a significant difference (<italic>P=</italic>.79).</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Given that SI did not clearly outperform VT under overlaid soft tissue and viewing challenges, VT remains a viable alternative in certain surgical scenarios where high accuracy is not required. Future research should focus on optimizing viewing angle guidance and the linkage between the anatomical target and the skin surface.</p></sec><sec><title>Trial Registration</title><p>German Clinical Trial Register DRKS00032835; https://drks.de/search/en/trial/DRKS00032835</p></sec></abstract><kwd-group><kwd>mixed reality</kwd><kwd>computer-assisted surgery</kwd><kwd>visualization techniques</kwd><kwd>human-machine interface</kwd><kwd>preoperative planning</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>The head and neck region contains a variety of complex anatomical structures, including numerous vital nerves, blood vessels, and organs [<xref ref-type="bibr" rid="ref1">1</xref>]. Accurate localization of these anatomical structures is crucial in surgical practice to minimize deviation and improve outcomes [<xref ref-type="bibr" rid="ref2">2</xref>]. Conventional medical imaging techniques, such as computed tomography (CT) and cone beam CT, as well as magnetic resonance imaging, are primarily used for diagnosis and preoperative planning [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. Medical images require surgeons to mentally map medical images onto the patient&#x2019;s anatomy during the operation. This process demands a high level of cognitive effort, especially in the anatomically dense head and neck region, where misinterpretation could compromise the surgical accuracy and outcomes [<xref ref-type="bibr" rid="ref5">5</xref>,<xref ref-type="bibr" rid="ref6">6</xref>]. Surgical navigation systems (SNS) offer solutions by integrating image data into the surgical workflow. However, the limitations of the 3D display still leave the operator reliant on spatial imagination to understand complex anatomy. Furthermore, the broader adoption of SNS has been impeded by high expenses, the inherently sophisticated configurations like optical tracking cameras and reflective markers, and the possible additional radiation exposure to patients and staff [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. As a result, there is still a lack of a cost-effective, intuitive, 3D interactive visualization approach that seamlessly displays the patient&#x2019;s medical images in the field.</p><p>Augmented reality (AR) could fill this gap by providing real-time holographic images directly within the surgical field mainly through head-mounted displays (HMDs) [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref9">9</xref>]. Moreover, many current AR HMDs can provide markerless inside-out tracking at a fraction of the cost of SNS and eliminate the need for additional markers [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. Unlike SNS, which typically tracks the patient and instruments, this kind of HMD-based tracking focuses on aligning virtual content with the patient&#x2019;s anatomy to enable hologram overlay. However, the overlay or registration accuracy of many HMDs is still not as accurate as traditional SNSs with external optical tracking at the millimeter level [<xref ref-type="bibr" rid="ref10">10</xref>]. This limitation becomes particularly critical for the superimposition (SI) visualization modality, where virtual anatomical structures need to be precisely placed on real anatomy, a process referred to as registration [<xref ref-type="bibr" rid="ref12">12</xref>-<xref ref-type="bibr" rid="ref14">14</xref>]. In addition, SI may introduce occlusion, as holograms can obstruct the surgeon&#x2019;s view of anatomy or instruments. These challenges raise concerns about the feasibility of SI as the optimal visualization modality for AR-assisted surgery, given the setup of currently available HMDs free of external tracking [<xref ref-type="bibr" rid="ref15">15</xref>].</p><p>An alternative visualization modality is the virtual twin (VT), where the holographic representation is displayed adjacent to the physical anatomy instead of directly overlaid on the anatomy [<xref ref-type="bibr" rid="ref15">15</xref>,<xref ref-type="bibr" rid="ref16">16</xref>]. By avoiding overlay, VT reduces dependence on registration accuracy and eliminates occlusion.</p><p>However, the accuracy between two modalities under markerless HMD-based tracking remains unexplored. Yet, this could be important, since if SI with intrinsic markerless tracking does not show any advantage over VT, then VT would be the favored modality for certain surgical scenarios. Therefore, the aim of this crossover randomized controlled trial (RCT) was to compare the accuracy and efficiency of the two visualization modalities, SI and VT, for anatomical localization in the head and neck region. Localization accuracy was assessed on phantom heads for clinically relevant targets, including nerve exit points, the inferior alveolar nerve, and the salivary glands. Task duration and subjective workload were evaluated as secondary endpoints.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Overview</title><p>In total, 38 participants with different professional backgrounds (dental and medical students, resident and specialist surgeons in oral and maxillofacial, oral, and plastic surgery) were recruited and performed drawings on polystyrene foam head phantoms (Model SAM, Friseurbedarf D. M. Rudolph) in a crossover RCT with SI and VT visualization modalities. The participants were asked to draw the structures on the head phantoms, wearing HoloLens 2 (HL2; Microsoft Corp). The primary endpoint was the localization accuracy of the anatomical points (0D), which encompass nerve exit points at the supraorbital, infraorbital, and mental foramina. Secondary endpoints included the delineation accuracy of the inferior alveolar nerve pathways (2D) and salivary glands (parotid and submandibular; 3D), cognitive workload, and user experience.</p></sec><sec id="s2-2"><title>System Description and Implementation</title><p>The AR visualization software for the HL2 was developed in-house to display anatomical 3D models in relation to the physical anatomy of patients or phantoms. Within the application, switching between the two different visualization modalities for the 3D models was possible. In addition to the HL2 software, a pipeline processed the medical image data. This pipeline converted volumetric CT scans into 3D models optimized for interventional planning and efficient rendering on the HL2.</p><p>Based on these requirements, the planning pipeline was built to segment the structures into meshes in 3D Slicer (version 5.2.2; The Slicer Community). The structures comprising the skull, salivary glands, and nerve exit points were manually segmented from a publicly available head and neck CT dataset [<xref ref-type="bibr" rid="ref17">17</xref>], while the inferior alveolar nerves were segmented from a nonpublic dataset from the Medical University of Graz. A head phantom mesh was scanned by the Artec Leo 3D scanner (Artec 3D) as the skin surface. Finally, all segmented anatomical structures were nonlinearly registered to the scanned skin surface.</p><p>Our AR application was developed using Unity (version 2022.3.6f1; Unity Technologies). The registration between the head phantoms and the virtual head was implemented using the Vuforia software development kit (version 10.16.5, Parametric Technology Corporation). Vuforia Engine is a cross-platform AR solution that offers a variety of tracking features, which was frequently used in research for AR registration in surgical scenarios [<xref ref-type="bibr" rid="ref18">18</xref>-<xref ref-type="bibr" rid="ref20">20</xref>]. The model targets (object tracking) were applied, which possibly used edge-based techniques (not revealed by Vuforia) to recognize and track objects in real-time [<xref ref-type="bibr" rid="ref21">21</xref>]. First, the scanned head model was uploaded to the model target generator tool and configured into a model target that could be integrated into Unity. After the software was deployed to the HL2, the Vuforia engine initiated tracking for target alignment. Once the participant is satisfied with the alignment, she or he could lock the tracking to anchor the virtual model in the environment. Similarly, in VT, Vuforia would track the phantom, and then the model would appear next to it; locking the tracking again would fix the model in place. The hand menu assisted users in controlling the visibility of various anatomical structures, including the skin and target structures. In addition, sliders were implemented to allow real-time adjustment of the transparency and brightness of these structures.</p></sec><sec id="s2-3"><title>Trial</title><p>The participants were asked to fill out the initial questionnaire, which included demographic information (age, gender, educational stage or professional experience, professional field, and prior experience with AR and HL2). Randomization was generated by BHP using a randomized allocation rule to determine the starting modality (sequence) and the side of the face (right or left). The experiment assistant (KG) enrolled and assigned participants to the sequence of intervention. Registration was done once at the beginning of each modality by the experiment assistant, who could lock or unlock the tracking for registration as needed. Subsequently, they wore HL2, ran the eye calibration, and received a brief introduction to the device and the user interface with the 2 modalities. During this short session, they familiarized themselves with the device and its functions. The entire familiarization process was completed in less than 3 minutes, although precise timing was not recorded. Participants were then instructed to delineate target anatomical structures on the head phantom surface using Point 88 fine liner pens (Stabilo; <xref ref-type="fig" rid="figure1">Figure 1</xref>). This task was performed on the assigned half of the face using the first modality, with time recorded via a stopwatch. Upon completion, participants filled out the Likert questionnaire and NASA-TLX (National Aeronautics and Space Administration Task Load Index) for that method and an open-ended questionnaire. The same procedure was then repeated on the other side of the face using the second modality, followed by the corresponding questionnaires. Finally, an open-ended questionnaire for preference was answered.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Illustration of two augmented reality visualization modalities using HoloLens 2. (A) Participant drawing anatomical structures (nerve exit points, inferior alveolar nerves, and salivary glands) on the polystyrene head phantom with HoloLens 2. (B) Schematic illustration of SI showing physical and holographic alignment with potential rigid offset and occlusion. (C) Schematic illustration of virtual twin showing how holograms are displayed free of misalignment and occlusion problem. (D) SI modality in HoloLens 2, where holograms were overlaid directly into the physical head phantom. (E) Virtual twin modality in HoloLens 2, where the holograms were displayed spatially adjacent to the physical head phantom. SI: superimposition; VT: virtual twin.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="games_v14i1e75962_fig01.png"/></fig></sec><sec id="s2-4"><title>Evaluation</title><p>After the trial, all the polystyrene head phantoms were scanned with the Artec Leo 3D scanner (<xref ref-type="fig" rid="figure2">Figure 2</xref>). To enable comparison, all head phantoms with the participant&#x2019;s delineations were registered to the virtual planned head in a pipeline by a Python (version 3.10; Python Software Foundation) script. The two-stage pipeline was initiated with a global random sample consensus alignment, followed by a local refinement with point-to-plane iterative closest point, achieving &#x003C;0.4 mm root mean square error. Two independent investigators (YL and KG) evaluated the scanned heads using Blender (version 4.2; Blender Foundation). Both investigators were blinded to the applied visualization modality. To minimize a possible recall bias, KG, who served as the experiment assistant during data acquisition, underwent a washout period of 2 months before participating in the blinded evaluation. Nerve exit points were drawn by the stroke points and placed spheres. The nerve paths and salivary glands were drawn by the grease pencil tool along the curves on the head phantom surface, and the strokes were transformed into meshes in Blender (<xref ref-type="fig" rid="figure2">Figure 2</xref>).</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>(A) Scanned polystyrene head phantom with delineation. (B) In Blender, the scanned polystyrene head phantom is shown with inferior alveolar nerve and salivary glands annotations, and spheres marking the nerve exit points (orange).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="games_v14i1e75962_fig02.png"/></fig><p>Afterward, the points and curves were automatically compared using a Python script. The analysis for nerve exit points (0D) involved calculating Euclidean distance, which is the shortest distance in 3D space between the planned and drawn points, and we referred to this as absolute accuracy. Relative accuracy, defined as landmark-to-landmark localization accuracy, was compared by the Euclidean distance between the supraorbital-infraorbital and infraorbital-mental foramina on drawn versus planned landmarks. Since all anatomical targets were located on the underlying bone, yet localization was performed on the phantom&#x2019;s external surface, the concept of soft-tissue thickness was additionally introduced to capture the distance between the target structures and the skin. It was defined as the shortest distance from each anatomical point (0D) to the surface and as the mean of the vertex-to-surface distances for 2D nerve pathways and 3D salivary glands. Furthermore, the Hausdorff distance (HD) and the average surface distance (ASD) were used in order to assess the alignment and accuracy of the contours of the nerve paths (line, 2D) and salivary glands (volume, 3D). HD captures the maximum of the minimum distances between the two surfaces, providing insight into the worst-case alignment error, while the ASD quantifies the mean discrepancy, reflecting the overall degree of alignment.</p><p>The Likert questionnaire and NASA-TLX were quantitatively analyzed to assess usability and perceived workload. In addition, the feedback from open-ended questions was summarized by YL and reviewed by BHP.</p></sec><sec id="s2-5"><title>Sample Size Calculation</title><p>The sample size calculation was conducted in R software (version 4.3.1; R Foundation for Statistical Computing). A minimum effect size of 5 mm was established as the threshold for an acceptable difference between the two modalities in absolute accuracy. A 5 mm difference in absolute accuracy causes a surface discrepancy exceeding 5 mm due to the geometric relationship, making it clinically relevant and detectable by oral and maxillofacial surgeons, corresponding to the widely accepted minimum margin in head and neck oncologic surgery [<xref ref-type="bibr" rid="ref22">22</xref>,<xref ref-type="bibr" rid="ref23">23</xref>]. Based on the results of a pretrial with 4 participants, the mean absolute accuracy was 10.1 (SD 4.8) mm (SI) and 12.1 (SD 5.0) mm (VT) across all nerve exit points. A normal distribution of the pretest values (Shapiro-Wilk test; <italic>P</italic>=.70) resulted in a required number of cases of 34 for the unpaired <italic>t</italic> test. An additional 4 participants were included to compensate for nonevaluable datasets and for dropout or withdrawal of consent.</p></sec><sec id="s2-6"><title>Statistical Analysis</title><p>Statistical analysis was also performed in R. A linear mixed-effects model (LMM) was applied using the <italic>lmerTest</italic> package [<xref ref-type="bibr" rid="ref24">24</xref>]. This LMM assessed the absolute accuracy at the point structures, modalities (SI vs VT), the sequence (starting method), the group (dental and medical students, and surgeons), subcutaneous soft tissue thickness, and side (left or right) as fixed effects and the participants as a random effect. When analyzing the ASD and HD for line and volume-based structures, the same LMM framework was applied. Subcutaneous soft tissue thickness was specifically included to account for anatomical variation across different locations. However, it was not considered in the analysis of relative accuracy for point structures, which instead relied more on spatial reference to other anatomical landmarks.</p><p>The normality of the data distribution was assessed using the Shapiro-Wilk test. Duration and each Likert question between methods were compared using the Mann-Whitney <italic>U</italic> test. The NASA-TLX scales were compared by unpaired two-tailed <italic>t</italic> test. For all tests mentioned, a <italic>P</italic> value of &#x003C;.05 was considered significant.</p></sec><sec id="s2-7"><title>Ethical Considerations</title><p>This study was approved by the local ethics committee of the University Hospital RWTH (Rheinisch-Westf&#x00E4;lische Technische Hochschule) Aachen (EK 24&#x2010;127; Chairman Prof Ralf Hausmann; April 3, 2024). The study was registered with a study protocol in advance in the German Clinical Trial Register (DRKS00032835) and followed the CONSORT (Consolidated Standards of Reporting Trials) 2010 guidelines (<xref ref-type="supplementary-material" rid="app1">Checklist 1</xref>) and its extension designed and modified specifically for crossover studies, as illustrated by the flow diagram [<xref ref-type="bibr" rid="ref25">25</xref>,<xref ref-type="bibr" rid="ref26">26</xref>]. Informed consent was obtained from all participants involved in the study. To protect the privacy of the participants, all participants were anonymized, and no personally identifiable information was stored with the research data. It is to be noted that no financial compensation was provided to the participants involved in the present trial. Nevertheless, as a token of appreciation, two vouchers with a total value of &#x20AC;15 (US $17.5) were distributed through a raffle.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Cohort</title><p>A total of 38 participants (16 females and 22 males) were successfully included in the study, comprising two groups, namely surgeons, and medical and dental students following the flow (<xref ref-type="fig" rid="figure3">Figure 3</xref>). Among the 18 surgeons, there were 12 residents and 6 specialists. This group included 9 oral and maxillofacial surgeons, 5 oral surgeons, and 4 plastic surgeons. In the student group, which consisted of 20 participants, 17 were dental students and 3 were medical students. The average age of participants was 26.8 (SD 5.1; range 20&#x2010;43). The average clinical experience of surgeons was 4.0 (SD 4.3) years, and the average clinical experience of medical and dental students was 4.2 (SD 0.9) years; mean 8.3 (SD 1.6) semesters (<xref ref-type="table" rid="table1">Table 1</xref>).</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>CONSORT (Consolidated Standards of Reporting Trials) flow diagram illustrating the enrollment, allocation, crossover, follow-up, and analysis of participants in the study. SI: superimposition; VT: virtual twin.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="games_v14i1e75962_fig03.png"/></fig><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Characteristics of the cohort.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Parameter</td><td align="left" valign="bottom">Surgeon (n=18)</td><td align="left" valign="bottom">Student (n=20)</td><td align="left" valign="bottom">Total (n=38)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="5">Sex, n (%)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Female</td><td align="left" valign="top">4 (22.2)</td><td align="left" valign="top">12 (60)</td><td align="left" valign="top">16 (42.1)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Male</td><td align="left" valign="top">14 (77.8)</td><td align="left" valign="top">8 (40)</td><td align="left" valign="top">22 (57.9)</td></tr><tr><td align="left" valign="top" colspan="5">Age (years)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Mean (SD)</td><td align="left" valign="top">30.3 (5.2)</td><td align="left" valign="top">23.8 (2.5)</td><td align="left" valign="top">26.8 (5.1)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Range</td><td align="left" valign="top">23-43</td><td align="left" valign="top">20-30</td><td align="left" valign="top">20-43</td></tr><tr><td align="left" valign="top" colspan="5">Profession, n (%)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Medical</td><td align="left" valign="top">&#x2014;<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup></td><td align="left" valign="top">3 (15)</td><td align="left" valign="top">3 (7.9)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Dental student</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">17 (85)</td><td align="left" valign="top">17 (44.7)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Oral surgery</td><td align="left" valign="top">5 (27.8)</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">5 (13.2)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Oral and maxillofacial surgery</td><td align="left" valign="top">9 (50)</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">9 (23.7)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Plastic surgery</td><td align="left" valign="top">4 (22.2)</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">4 (10.5)</td></tr><tr><td align="left" valign="top" colspan="5">Clinical study/work experience (years)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Mean (SD)</td><td align="left" valign="top">4.0 (4.3)</td><td align="left" valign="top">4.2 (0.9)</td><td align="left" valign="top">4.1 (3.0)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Range</td><td align="left" valign="top">0.0-15.0</td><td align="left" valign="top">3.0-6.0</td><td align="left" valign="top">0.0-15.0</td></tr><tr><td align="left" valign="top" colspan="5">Previous experience with AR (Likert score, 1&#x2013;5)<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup></td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Mean (SD)</td><td align="left" valign="top">2.4 (0.8)</td><td align="left" valign="top">2.1 (1.0)</td><td align="left" valign="top">2.2 (0.9)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Range</td><td align="left" valign="top">1.0-4.0</td><td align="left" valign="top">1.0-5.0</td><td align="left" valign="top">1.0-5.0</td></tr><tr><td align="left" valign="top" colspan="5">Previous experience with HL (Likert score, 1&#x2013;5)<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup></td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Mean (SD)</td><td align="left" valign="top">1.7 (0.8)</td><td align="left" valign="top">1.1 (0.3)</td><td align="left" valign="top">1.4 (0.6)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Range</td><td align="left" valign="top">1.0-3.0</td><td align="left" valign="top">1.0-2.0</td><td align="left" valign="top">1.0-3.0</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>Not applicable.</p></fn><fn id="table1fn2"><p><sup>b</sup>AR: augmented reality; Likert scores from 1=&#x201D;never heard of&#x201D; to 5=&#x201D;expert.&#x201D;</p></fn><fn id="table1fn3"><p><sup>c</sup>HL: HoloLens; Likert scores from 1=&#x201C;never used&#x201D; to 5=&#x201C;I use it several times a week.&#x201D; </p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Localization Accuracy</title><p>In the 38 scanned head phantoms, all the required structures were successfully delineated, except for 1 pair of nerve exit points at infraorbital foramina and 1 pair at supraorbital foramina, which were missed by a single participant. The absolute accuracy of the nerve exit points (0D) was significantly higher in SI (mean 14.4, SD 4.2 mm) than VT (mean 15.8, SD 5.5 mm), with a mean difference of 1.4 (95% CI 0.5&#x2010;2.3; LMM; <italic>P</italic>=.003) mm. The absolute accuracy was correlated with the soft tissue thickness. For each 1 mm soft tissue thickness, the accuracy decreased by 1.4 mm (<italic>P</italic>&#x003C;.001), while no significant difference was found in sequence (<italic>P=</italic>.84) and group (<italic>P</italic>=.40) as fixed effects in the LMM. The average participant bias was 0.8 (SD 0.8) mm. The mean absolute error of the LMM residuals was 1.8 (SD 2.9) mm for SI and 2.5 (SD 3.5) mm for VT, respectively. The relative accuracy of the points was significantly higher for SI (mean 3.4, SD 2.2 mm) than VT (mean 6.0, SD 5.0 mm) by 2.6 (95% CI 1.3&#x2010;3.8 mm; LMM; <italic>P</italic>&#x003C;.001; <xref ref-type="fig" rid="figure4">Figure 4</xref>). In <xref ref-type="fig" rid="figure4">Figure 4</xref>, each violin plot (colored) includes a boxplot (white), with a red dot indicating the mean value. The black points represent the outliers. The dashed line marked the average subcutaneous soft tissue thickness over the nerve exit points.</p><p>The localization accuracy of the inferior alveolar nerve pathways (2D) assessed with ASD and HD was comparable between SI (ASD/HD=mean 23.4, SD 4.1 mm/mean 31.5, SD 7.8 mm) and VT (ASD/HD=mean 23.0, SD 4.5 mm/mean 31.0, SD 7.5 mm), with no significant difference (ASD/HD=mean difference 0.4 mm, 95% CI &#x2212;1.0 to 2.0 mm; LMM; <italic>P</italic>=.51/mean difference 0.6 mm, 95% CI &#x2013;1.6 to 2.9 mm; LMM; <italic>P</italic>=.57). Regarding the salivary glands (3D), the localization accuracy measured with ASD/HD (mean 34.1, SD 14.2 mm/mean 49.1, SD 15.8 mm) for VT was significantly more accurate than SI (ASD/HD=mean 37.1, SD 13.8 mm/mean 52.0, SD 16.8 mm) by ASD 3.0 (95% CI 0.7&#x2010;5.4 mm; LMM; <italic>P</italic>=.01) mm and HD 2.9 (95% CI 0.2&#x2010;5.8 mm; LMM; <italic>P</italic>=.03) mm (<xref ref-type="fig" rid="figure5">Figure 5</xref>). In <xref ref-type="fig" rid="figure5">Figure 5</xref>, each violin plot (colored) includes a boxplot (white), with a red dot indicating the mean value. The black points represent the outliers. The dashed line marked the average subcutaneous soft tissue thickness over the inferior alveolar nerves and salivary glands.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Comparison of localization accuracy (y-axis) at nerve exit points (0D) between superimposition (purple) and virtual twin (green; x-axis). (A) Euclidean distance for absolute accuracy. (B) Absolute residual error from the linear mixed-effects model. (C) Euclidean distance for relative accuracy. (D) Relationship between Euclidean distance for absolute accuracy (y-axis) and subcutaneous soft tissue thickness (x-axis). The solid blue line depicts the fitted linear mixed-effects model regression. ED: Euclidean distance; LMM: linear mixed-effects model; SI: superimposition; VT: virtual twin.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="games_v14i1e75962_fig04.png"/></fig><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Comparison of localization accuracy (y-axis) for inferior alveolar nerve pathways (2D) and salivary glands (3D) between superimposition (purple) and virtual twin (green; x-axis). (A) Average surface distance for inferior alveolar nerve pathways (2D). (B) Hausdorff distance for inferior alveolar nerve pathways (2D). (C) Average surface distance for salivary glands (3D). (D) Hausdorff distance for salivary glands (3D). ASD: average surface distance; HD: Hausdorff distance; LMM: linear mixed-effects model; SI: superimposition; VT: virtual twin.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="games_v14i1e75962_fig05.png"/></fig></sec><sec id="s3-3"><title>Workload and Time</title><p>The SI method (mean 61.3, SD 29.6 seconds) was significantly faster than the VT method (mean 77.4, SD 34.5 seconds) by 16.1 (95% CI 2.0&#x2010;29.0; Mann-Whitney <italic>U</italic> test; <italic>P</italic>=.02) seconds. The NASA-TLX score for the SI method (mean 39.8, SD 17.3) and VT method (mean 40.8, SD 15.2) was comparable, with no significant difference (mean difference 1.0, 95% CI &#x2013;4.2 to 6.2; <italic>t</italic> test; <italic>P</italic>=.79; <xref ref-type="fig" rid="figure6">Figure 6</xref>). In <xref ref-type="fig" rid="figure6">Figure 6</xref>, each violin plot (colored) includes a boxplot (white), with a red dot indicating the mean value. The black points represent the outliers.</p><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>Subjective ratings and task completion time between superimposition (purple) and virtual twin (green) visualizations (x-axis). (A) Subjective workload assessed using NASA-TLX (National Aeronautics and Space Administration Task Load Index) scores (y-axis). (B) Task completion time in seconds (y-axis). NASA TLX: National Aeronautics and Space Administration Task Load Index; SI: superimposition; VT: virtual twin.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="games_v14i1e75962_fig06.png"/></fig></sec><sec id="s3-4"><title>Questionnaires</title><p>The Likert-type questions (scale 1-4; 1=strong disagreement; 4=strong agreement) showed no significant difference (Mann-Whitney <italic>U</italic> test) between the two modalities (<xref ref-type="table" rid="table2">Table 2</xref>). The participants perceived no clear advantage in accurate localization of target structures between SI and VT (mean 3.0, SD 0.9 vs mean 3.0, SD 0.6 points; <italic>P</italic>=.61; mean 2.9, SD 0.8 vs mean 2.8, SD 0.7 lines; <italic>P</italic>=.37; mean 2.7, SD 0.8 vs mean 2.7, SD 0.7 volume; <italic>P=</italic>.95). Participants also reported similar levels of confidence (mean 2.7, SD 0.7 vs mean 2.7, SD 0.6; <italic>P=</italic>.84), distraction (mean 2.2, SD 1.0 vs mean 1.8, SD 0.9; <italic>P=</italic>.05), provided assistance (mean 2.9, SD 0.8 vs mean 3.1, SD 0.6; <italic>P=</italic>.46), practicality (mean 2.4, SD 0.9 vs mean 2.8, SD 0.8; <italic>P=</italic>.09), perceived feasibility in interventions (mean 2.8, SD 1.1 vs mean 2.7, SD 0.9; <italic>P=</italic>.57), safety enhancement (mean 2.5, SD 0.9 vs mean 2.7, SD 0.9; <italic>P=</italic>.19), and overall satisfaction (mean 2.8, SD 0.9 vs mean 2.9, SD 0.8; <italic>P=</italic>.66). In addition, positive and negative detailed feedback was provided for both visualization modalities (<xref ref-type="table" rid="table3">Table 3</xref>). It is noteworthy that 19 participants expressed a preference for VT, 18 participants for SI, and 1 participant expressed equal preference for both.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Likert questionnaire.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Likert questions</td><td align="left" valign="bottom">SI<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup>, mean (SD)</td><td align="left" valign="bottom">VT<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup>, mean (SD)</td><td align="left" valign="bottom">Total, mean (SD)</td><td align="left" valign="bottom"><italic>P</italic> value</td></tr></thead><tbody><tr><td align="left" valign="top">I was able to accurately mark the nerve exit points using the (SI or VT) visualization.</td><td align="left" valign="top">3.0 (0.9)</td><td align="left" valign="top">3.0 (0.6)</td><td align="left" valign="top">3.0 (0.8)</td><td align="left" valign="top">.61</td></tr><tr><td align="left" valign="top">I was able to accurately mark the nerve pathways using the (SI or VT) visualization.</td><td align="left" valign="top">2.9 (0.8)</td><td align="left" valign="top">2.8 (0.7)</td><td align="left" valign="top">2.8 (0.7)</td><td align="left" valign="top">.37</td></tr><tr><td align="left" valign="top">I was able to accurately mark the salivary glands using the (SI or VT) visualization.</td><td align="left" valign="top">2.7 (0.8)</td><td align="left" valign="top">2.7 (0.7)</td><td align="left" valign="top">2.7 (0.7)</td><td align="left" valign="top">.95</td></tr><tr><td align="left" valign="top">I was sure where the anatomical structures were located and where to mark them.</td><td align="left" valign="top">2.7 (0.7)</td><td align="left" valign="top">2.7 (0.6)</td><td align="left" valign="top">2.7 (0.7)</td><td align="left" valign="top">.84</td></tr><tr><td align="left" valign="top">I found the using (SI or VT) visualization distracting while marking.</td><td align="left" valign="top">2.2 (1.0)</td><td align="left" valign="top">1.8 (0.9)</td><td align="left" valign="top">2.0 (1.0)</td><td align="left" valign="top">.05</td></tr><tr><td align="left" valign="top">The using (SI or VT) visualization facilitated the localization of anatomical structures in the face.</td><td align="left" valign="top">2.9 (0.8)</td><td align="left" valign="top">3.1 (0.6)</td><td align="left" valign="top">3.0 (0.7)</td><td align="left" valign="top">.46</td></tr><tr><td align="left" valign="top">I found the using (SI or VT) visualization to be practical for use.</td><td align="left" valign="top">2.4 (0.9)</td><td align="left" valign="top">2.8 (0.8)</td><td align="left" valign="top">2.6 (0.8)</td><td align="left" valign="top">.09</td></tr><tr><td align="left" valign="top">I could imagine performing interventions with AR support using (SI or VT) visualization.</td><td align="left" valign="top">2.8 (1.1)</td><td align="left" valign="top">2.7 (0.9)</td><td align="left" valign="top">2.7 (1.0)</td><td align="left" valign="top">.57</td></tr><tr><td align="left" valign="top">I believe that AR<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup> support through (SI or VT) visualization enhances patient safety.</td><td align="left" valign="top">2.5 (0.9)</td><td align="left" valign="top">2.7 (0.9)</td><td align="left" valign="top">2.6 (0.9)</td><td align="left" valign="top">.19</td></tr><tr><td align="left" valign="top">I was generally satisfied with the AR support through the using (SI or VT) visualization.</td><td align="left" valign="top">2.8 (0.9)</td><td align="left" valign="top">2.9 (0.8)</td><td align="left" valign="top">2.8 (0.8)</td><td align="left" valign="top">.66</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>SI: superimposition.</p></fn><fn id="table2fn2"><p><sup>b</sup>VT: virtual twin.</p></fn><fn id="table2fn3"><p><sup>c</sup>AR: augmented reality.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Summarized open questions.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Visualization modalities</td><td align="left" valign="bottom">Positive</td><td align="left" valign="bottom">Negative</td></tr></thead><tbody><tr><td align="left" valign="top">Superimposition</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>3D, intuitive<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>A novelty experience<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Accurate<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Beginner-friendly<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Clear and detailed<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Contrasting colors enhance structural differentiation<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Could be observed in all directions<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Easy localization of structures<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Easy to use<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Feeling of safety<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Free of time delay<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Good guidance and spatial relationship<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Inner structures could be easily seen in all directions<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Potential to simplify the process<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Simple design<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Time-saving<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Depth is perceived differently in different angles<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Difficult to map 3D structures to 3D surface<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Hard to identify the position of the structures<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Hard to recognize the tip of the pen and place to draw<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Have to lock the registration and move the head phantom to fine-tune it to the hologram<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Inaccurate overlay, holograms are partially overlaid to the physical head phantom<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Need familiarization time<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Need to move the head phantom to overlay<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Not practical<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Relatively lacks sharpness<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Restriction of viewpoint, cannot rotate the head phantom to observe after locking the registration<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Some structures have merged<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>The guidance makes the user neglect the critical anatomical landmarks, causing imprecise localization<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>The hologram is blurred, and the double image is tiring<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>The position of the head phantom and the participant should be kept constant<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item></list></td></tr><tr><td align="left" valign="top">Virtual twin</td><td align="left" valign="top"><list list-type="bullet"><list-item><p>3D visualization, intuitive<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Accurate<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Assistive setup<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Clear visualization of the anatomical structures&#x2019; location<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Direct views<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Easy to use<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Good guidance<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Guidance to the targets depends on the distance to the landmarks<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Head could be moved to draw<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Intuitive<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Less irritating than SI<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Like working with a textbook on the side<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>No registration problem, 3D model hardly disturbs as an aid<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Only exit points are good to paint and recognizable<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Opportunity to apply to other structures<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>With improved guidance, anatomical structures can be localized more effectively, referring to reference only when necessary<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Without overlapping, both the pen and the drawing position are clearly visible<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item></list></td><td align="left" valign="top"><list list-type="bullet"><list-item><p>Better able to rotate or zoom in<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Better to reposition or move the model without moving yourself<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Confusing and inaccurate<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Deficiency of necessary landmarks<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Hard to estimate where to draw<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Image lacking sharpness<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Lack of 3D guidance<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Limited transferability to the real head<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Little added value compared to drawing according to anatomical landmarks<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Localization cannot be tracked as precisely as with SI<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Longer time for eyes to adapt to<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Need to turn around the head phantom and hard to find the correct position<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Not practical for clinical use<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Possible spatial discrepancy, inaccurate drawing, impractical<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Required more cognitive effort compared to direct projection<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Rotation of the virtual head is restricted<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>Slower in time<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup><sup>,</sup><sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Spatial depth is hard to estimate<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item><list-item><p>Switching attention back and forth between the head and hologram is confusing<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup></p></list-item><list-item><p>The smooth white head phantom offers few points of reference, hard to transfer the anatomical structures<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></p></list-item></list></td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>Dental and medical student.</p></fn><fn id="table3fn2"><p><sup>b</sup>Oral and maxillofacial, plastic, and oral surgeons.</p></fn></table-wrap-foot></table-wrap></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>We systematically evaluated the localization accuracy between two visualization modalities: SI with markerless inside-out tracking and VT for different types of anatomical structures in the head and neck region. The primary endpoint (absolute accuracy of 0D structure) revealed that SI was significantly more accurate than VT by 1.4 mm (<italic>P=</italic>.003). In terms of relative accuracy of 0D point structures, SI also outperformed VT by a margin of 2.6 mm (<italic>P</italic>&#x003C;.001). VT showed comparable accuracy for 2D structures and notably superior accuracy (ASD, <italic>P=</italic>.01; HD, <italic>P</italic>=.03) for 3D structures, although it required an additional 16 seconds on average (<italic>P=</italic>.02). Likert questions revealed comparable results between two modalities. Feedback from open-ended questions (<xref ref-type="table" rid="table3">Table 3</xref>) highlighted SI for ease of understanding, intuitiveness, and time efficiency, yet noted persistent challenges with depth perception, visual occlusion, and virtual-real misalignment. Conversely, VT was perceived as simpler, clearer, and free of occlusion and misalignment issues, despite lacking direct positional cues on real head phantoms and requiring frequent attention shifts between physical and virtual models. Overall, user preferences were evenly split, reflecting comparable experiences despite each modality&#x2019;s distinct strengths and limitations.</p></sec><sec id="s4-2"><title>Respective Strengths and Weaknesses</title><p>In contrast to VT without tracking, the accuracy of SI depends on the inside-out tracking of the HMD used and can be attributed to 3 main factors, namely the registration accuracy of the tracking (Vuforia), the spatial mapping performance (HL2), and the visual occlusion [<xref ref-type="bibr" rid="ref27">27</xref>]. Previous studies illustrated Vuforia software development kit&#x2019;s registration in the HL2 highly depended on the richness of the shape and texture of the tracked target and ranged from less than 2 mm to more than 10 mm for translational error [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref28">28</xref>], which can propagate into an angular deviation of the task-specific cutting plane up to 14.7&#x00B0; [<xref ref-type="bibr" rid="ref20">20</xref>]. Furthermore, Vuforia tracking is sensitive to environmental light intensity, distance to the target object, and the extent of the surface covered [<xref ref-type="bibr" rid="ref29">29</xref>]. In addition, HL2 used visual inertial-simultaneous localization and mapping (VI-SLAM) to continuously map the environment and update its position and orientation within a global coordinate system, anchoring virtual content to real-world features [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. However, VI-SLAM&#x2019;s accuracy can be affected by factors, such as pose prediction latency, user motion, environment, and sensor fusion, such as poor integration between the red, green, blue camera and inertial measurement unit [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. This VI-SLAM error accumulated along the way, reaching 5 mm per 628 mm traveled in the clinical environment [<xref ref-type="bibr" rid="ref31">31</xref>,<xref ref-type="bibr" rid="ref32">32</xref>]. Moreover, the jitter latency caused by such sensor fusion could further compromise user experience, increase cognitive load, and induce fatigue [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref33">33</xref>]. Last but not least, visual occlusion, where virtual objects can obstruct or distort the view of the physical counterpart, further compromises the accuracy of SI. Many participants reported difficulty in identifying the position of the pen, drawn line, and occluded virtual content, which was also observed in another study [<xref ref-type="bibr" rid="ref34">34</xref>]. This occlusion problem could lead to severe damage during surgery by overlooking anatomical structures and events [<xref ref-type="bibr" rid="ref35">35</xref>,<xref ref-type="bibr" rid="ref36">36</xref>]. All these factors together may contribute to the overall accuracy achieved by SI.</p><p>On the other hand, VT showed comparable accuracy (inferior in 0D, comparable in 2D, and superior in 3D structures) to SI with markerless inside-out tracking of the HMD, but without the aforementioned problems of SI. This was largely due to VT&#x2019;s design, which bypassed the need for precise virtual-real overlay or accurate anchoring by displaying the virtual model next to the real head phantom. Nevertheless, VT as a visualization modality free of misalignment, unaffected by occlusion, and less sensitive to spatial mapping instability could substitute SI in macro localization tasks. Since VT lacked direct positional cues to guide localization, it likely depended on the surgeon&#x2019;s ability to estimate distance, where surgeons performed an average error of 1.4 (SD 1.2) mm in 5 mm and 2.0 (SD 1.9) mm in 1 cm estimation in a research [<xref ref-type="bibr" rid="ref37">37</xref>]. The distances between the nerve exit points in our study were approximately 4 cm between supraorbital and infraorbital foramina and 7 cm between infraorbital and mental foramina. If we assume the estimation error was in a linear model, this corresponded to a mean error of 5.6-9.2 mm, which aligned with VT&#x2019;s average relative accuracy (mean 6.0, SD 5.0 mm), inferior to SI (mean 3.4, SD 2.2) mm). Therefore, one could argue that SI is only meaningful if its accuracy exceeds the limits of human distance estimation.</p></sec><sec id="s4-3"><title>Comparison to Prior Work</title><p>In scenarios where precise localization is required, such as orbital fracture reconstruction or trajectory drilling, optical tracking remains the most accurate method to date [<xref ref-type="bibr" rid="ref15">15</xref>]. Consequently, numerous studies have adopted optical tracking to optimize registration of SI. For instance, Tu et al [<xref ref-type="bibr" rid="ref38">38</xref>] achieved entry point accuracy of mean 2.8 (SD 1.3) mm and angular accuracy of mean 3.0&#x00B0; (SD 1.2&#x00B0;), optimizing registration accuracy to mean 2.0 (SD 0.7) mm through optical tracking. Similarly, Iqbal et al [<xref ref-type="bibr" rid="ref39">39</xref>] combined the HL2 built-in camera with an external optical tracking camera, further reducing translation and rotation errors to 2.1 mm and 1.5&#x00B0;, respectively. In contrast, VT with external optical tracking could also visualize both the virtual instrument and the target anatomy but adjacent to the patient in real-time. This framework achieved higher accuracy than the aforementioned SI systems and comparable accuracy to SNS, with translational deviations of mean 0.9 (SD 0.4) mm and mean 1.0 (SD 0.5) mm at entry and end points, respectively, and a rotational deviation of mean 1.1&#x00B0; (SD 0.6&#x00B0;) [<xref ref-type="bibr" rid="ref15">15</xref>], within the clinically feasible range (~2 mm) [<xref ref-type="bibr" rid="ref12">12</xref>]. The noticeable difference between VT by 0.9 (SD 0.4) mm and SI by 2.1 mm with a similar optical tracking framework likely resulted from the aforementioned factors, such as registration errors, VI-SLAM instability, jitter, and visual occlusion. This raises the question of whether SI with optical tracking should be considered the optimal AR visualization modality for surgical scenarios, particularly given that VT achieved similar accuracy under similar tracking conditions without encountering these limitations.</p><p>However, all these values assume that localization accuracy is measured in anatomically exposed structures, where perfect localization could theoretically reach 0 mm. However, this ignores a crucial aspect of real-world scenarios: anatomical structures are typically covered by tissue, which prevents direct access and inherently limits localization accuracy. In our study, this was particularly relevant due to the soft and bone tissue overlying the anatomical target structures (ie, nerve exit points, inferior alveolar nerves, and salivary glands). According to the literature, the average soft tissue thickness of the head, face, and neck is 9.4 (SD 6.2; range 2.4-28.1 mm) mm in women and 10.5 (SD 7.2; range 2.7-32.4) mm in men [<xref ref-type="bibr" rid="ref40">40</xref>]. Thus, the measured localization accuracies observed with SI and VT for nerve exit points cannot be directly compared to classical navigation scenarios with fully exposed anatomical targets, as they are composed of 4 main influencing factors</p><disp-formula id="E3"><mml:math id="eqn1"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mi>L</mml:mi><mml:mi>o</mml:mi><mml:mi>c</mml:mi><mml:mi>a</mml:mi><mml:mi>l</mml:mi><mml:mi>i</mml:mi><mml:mi>z</mml:mi><mml:mi>a</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mtext>&#x00A0;</mml:mtext><mml:mi>A</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:msub><mml:mi>y</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>=</mml:mo><mml:munder><mml:mrow><mml:munder><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>1</mml:mn></mml:mrow></mml:msub><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mtext>Overlaying Tissue Thickness</mml:mtext><mml:mrow><mml:mi>i</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x23DF;</mml:mo></mml:munder></mml:mrow><mml:mrow><mml:mtext>Anatomical Constraint</mml:mtext></mml:mrow></mml:munder><mml:mo>+</mml:mo></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="E4"><mml:math id="eqn2"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mstyle displaystyle="true" scriptlevel="0"><mml:munder><mml:mrow><mml:munder><mml:mrow><mml:msub><mml:mi>&#x03B2;</mml:mi><mml:mrow><mml:mn>2</mml:mn></mml:mrow></mml:msub><mml:mo>&#x22C5;</mml:mo><mml:msub><mml:mtext>Modality</mml:mtext><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub></mml:mrow><mml:mo>&#x23DF;</mml:mo></mml:munder></mml:mrow><mml:mrow><mml:mtext>Visualization Modality (SI vs. VT)</mml:mtext></mml:mrow></mml:munder><mml:mo>+</mml:mo><mml:munder><mml:mrow><mml:munder><mml:msub><mml:mi>u</mml:mi><mml:mrow><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x23DF;</mml:mo></mml:munder></mml:mrow><mml:mrow><mml:mtext>Subject Bias</mml:mtext></mml:mrow></mml:munder><mml:mo>+</mml:mo><mml:munder><mml:mrow><mml:munder><mml:msub><mml:mi>&#x03B5;</mml:mi><mml:mrow><mml:mi>i</mml:mi><mml:mi>j</mml:mi></mml:mrow></mml:msub><mml:mo>&#x23DF;</mml:mo></mml:munder></mml:mrow><mml:mrow><mml:mtext>Residual Error (AR Noise)</mml:mtext></mml:mrow></mml:munder></mml:mstyle></mml:mrow></mml:mstyle></mml:math></disp-formula><p>First, the tissue thickness overlaid the target structure. This resulted in a decrease in localization accuracy of 1.4 mm per 1 mm of overlying tissue thickness (LMM, unstandardized coefficient &#x03B2;<sub>1</sub>=1.4; <italic>P</italic>&#x003C;.001). Then there is the influence of the VT modality, which added an additional error of 1.4 mm compared to SI (LMM, &#x03B2;<sub>2</sub>=1.4; <italic>P=</italic>.003), and the average participant-specific bias, which was 0.8 mm (average magnitude of random intercept for individuals in the LMM). Finally, the residuals described the general pattern of localization error, with a mean absolute error of 1.8 (SD 2.9) mm for SI and 2.5 (SD 3.5) mm for VT.</p><p>For line- or volume-based structures, this correlation could not be reliably captured by the model. This was probably because localization accuracy depended not only on the viewing direction but also on dynamic changes in the perceived target margin along that direction. This contrasts with the single-point structure, which is invariably depicted as a point in all directions. As a result, the localization accuracy for line- and volume-based structures is biased by viewing direction and margin variability, in addition to the tissue thickness. As in Van Gestel et al [<xref ref-type="bibr" rid="ref41">41</xref>], where a brain tumor was dynamically projected onto the skin along a vector from its center to the instrument tip, the participant&#x2019;s line of sight in our study played a comparable role to the instrument tip. As the viewing angle shifted, the visible margin of the gland changed in real time, introducing variability in the drawn curves and affecting both ASD and HD. Even for targets on the skin, like in wound area estimation using photography, variation in camera angle could introduce 10% error [<xref ref-type="bibr" rid="ref42">42</xref>]. Although we could not directly quantify viewing angles and changing margins, aligning the participant&#x2019;s gaze with the vector from the structure&#x2019;s centroid to its nearest skin projection may help minimize delineation errors related to such bias.</p></sec><sec id="s4-4"><title>Clinical Implications</title><p>First, VT appears particularly advantageous for tasks requiring coarse localization and stable spatial orientation. VT provides a reliable anatomical context and could help mitigate cognitive errors, such as confusion of lateral sides or anatomical levels. These errors often arise in apparent symmetrical regions, especially in the absence of clear preoperative marking or adequate visual guidance. For example, in thoracolumbar spine surgery, reliance solely on intraoperative fluoroscopy may be insufficient to reliably distinguish vertebral levels, especially in the presence of anatomic variants, inadequate intraoperative imaging fields, and unreliable surface landmarks, with 50%&#x2010;67% of surgeons reporting such errors [<xref ref-type="bibr" rid="ref43">43</xref>,<xref ref-type="bibr" rid="ref44">44</xref>]. VT could orient the surgeon by allowing the user to align CT-based virtual models with the patient&#x2019;s posture, enabling clear visualization of the spine and reducing wrong-level or wrong-side misorientation. Second, in maxillofacial reconstruction, the VT technique offers significant value by displaying planned bone segments and prebent fixation plates alongside the operative field. This side-by-side visualization enables real-time comparison and intraoperative adjustment of plate bending, reducing the need for repeated fitting at the surgical site as standard techniques do [<xref ref-type="bibr" rid="ref45">45</xref>], and thereby lowering the risk of infection. Compared with preoperative 3D printing, such a technique could also minimize fabrication time and offer greater flexibility for intraoperative adjustments. In the following free flap reconstruction procedures, VT offers robustness in environments prone to bleeding, swelling, or tissue deformation, where SI overlays can drift or become unreliable. By anchoring the virtual model generated from virtual surgical planning adjacent to the surgical site, VT provides a stable frame of reference with consistent skeletal landmarks, even when soft tissues shift [<xref ref-type="bibr" rid="ref46">46</xref>]. Third, VT is well-suited to fractures and postoncologic defects of the orbit and midface requiring symmetry (eg, zygomatic arch, orbital floor, and medial wall) [<xref ref-type="bibr" rid="ref47">47</xref>]. By rendering the contralateral mirrored anatomy, target orbital volume, planned implant contour, and craniofacial buttresses adjacent to the field, the surgeon could continuously compare the intraoperative reduction with the surgical plan.</p><p>While VT may help reduce orientation errors, SI demonstrates its strength in scenarios that demand high-precision localization. For example, in mandibular reconstruction surgery using the anterolateral femoral flap, accurate localization of the perforator vessels is crucial to flap viability and surgical success. One study found the SI with remote-controlled overlay (mean 3.5, SD 2.8 mm) achieved significant superior localization accuracy in anterolateral femoral perforator vessels than ultrasonic color Doppler (mean 9.6, SD 5.8 mm; <italic>P</italic>&#x003C;.001) [<xref ref-type="bibr" rid="ref48">48</xref>]. Our findings showed that SI had clear advantages in point-based localization tasks. This feature is particularly important in procedures such as sentinel lymph node biopsy, where accurately identifying nodes just a few millimeters beneath the surface is crucial for surgical success. Duan et al [<xref ref-type="bibr" rid="ref49">49</xref>] reported that AR SI with motion compensation achieved sub-3 mm localization error in melanoma sentinel lymph node biopsy. Moreover, SI demonstrated significantly superior relative accuracy (<italic>P</italic>&#x003C;.001). This is because, despite the offset, SI preserved the spatial relationships between landmarks. The scenario that benefits from this strength is when relative distances between anatomical points must be accurately estimated, especially when a landmark has already been explored and exposed. For example, in head and neck tumor surgeries, surgeons often use the tragal pointer as a surgical landmark to identify the facial nerve trunk and the maxillary artery during procedures, such as parotidectomy, mandibular osteotomy, and temporomandibular joint arthroplasty [<xref ref-type="bibr" rid="ref50">50</xref>]. In addition, in skull base surgery, surgeons often rely on stable bony landmarks, such as the occipital condyle or mastoid process, to sequentially locate cranial nerve exit points, including the jugular foramen and hypoglossal canal [<xref ref-type="bibr" rid="ref51">51</xref>].</p><p>SI with markerless inside-out tracking and VT could be combined across different stages of the tasks. First, VT provides general spatial awareness, such as adapting to specific patient positioning, orienting with comprehensive medical imaging, or selecting approximate entry points. Once a key anatomical landmark is exposed, SI could rapidly guide surgeons to adjacent structures by using relative spatial relationships, minimizing the need for repeated attention switching [<xref ref-type="bibr" rid="ref52">52</xref>,<xref ref-type="bibr" rid="ref53">53</xref>]. If SI causes visual obstruction, cognitive overload, or registration instability and inaccuracy, SI can be temporarily deactivated, allowing VT to take over as a stable spatial reference. This hybrid modality enables adaptive assistance, providing surgeons with tailored support at different procedure stages based on clinical needs.</p><p>Our findings showed that user preferences were almost evenly split between SI and VT, underscoring the limitations of relying on either visualization method in isolation. Rather than competing alternatives, SI and VT could be viewed as complementary tools that respond to different scenarios. While SI enables precise overlay of subcutaneous landmarks, VT provides more reliable orientation under deformation or registration drift. These complementary features suggest that future AR systems should integrate both approaches within a single workflow.</p></sec><sec id="s4-5"><title>Limitations</title><p>This study has some limitations. First, the polystyrene foam head phantoms used in the experiment lacked realistic features, such as skin texture, natural color, and anatomical details, which are critical for accurate identification of anatomical landmarks in the real clinical scenarios. However, using these phantoms allowed for reproducible evaluation of the performance of two modalities. Second, the homogeneous and rigid phantom surface may have favored SI by registration. Unlike real surgical environments, phantoms lack deformable soft tissues, surgical draping, fluids, and light reflections, all of which can substantially increase registration and tracking errors for SI [<xref ref-type="bibr" rid="ref54">54</xref>,<xref ref-type="bibr" rid="ref55">55</xref>]. In contrast, VT does not require accurate overlay; thus, it was not hindered by those problems. These considerations suggest that the relative advantage of SI observed in phantom experiments may be attenuated in vivo, whereas VT could perform more robustly in real surgical settings. Third, in real clinical scenarios, the phantom&#x2019;s components, such as the mandible, could not replicate the mobility of human anatomy. This mobility may pose a significant challenge to markerless inside-out registration and further accurate anatomical localization for SI. In contrast, mobile parts in VT may be a potential solution. To address these challenges, cadaver studies or studies with high-fidelity phantoms replicating the mobility of anatomical structures should be conducted to validate the clinical applicability and generalizability of the findings. Fourth, since the difference in absolute accuracy in the sample size calculation was less than 5 mm, the study may have been underpowered to detect the influence of some fixed effects. Subsequent studies should consider increasing the sample size to enhance statistical power and generalizability. Finally, current findings are constrained to the facial region, where underlying bone structures provide a stable spatial reference. It would be valuable to investigate the performance of two modalities in other regions of the body like the abdomen, where soft tissue may bring additional challenges.</p></sec><sec id="s4-6"><title>Future Directions</title><p>In addition to further validation with cadaveric studies or high-fidelity phantoms, future work should also address technical factors that directly influence localization accuracy. In particular, subcutaneous soft tissue thickness, variations in viewing perspective, and the resulting margin variability were shown to pose consistent challenges for both AR modalities. To mitigate these effects, new visualization approaches need to be developed to reduce the effects of viewing perspective and account for the effects of the overlying tissue, regardless of the visualization modality. First, the user&#x2019;s viewing angle could be guided in AR. One possible strategy would be to create a virtual cylindrical tunnel of 2 circles between the target structure and the skin surface, orienting the user to view in a planned direction. Second, the AR visualization should establish a clear connection between the overlying tissue and the target structures, for example, for nerve exit points, a line connecting the points and their planned skin projection, clearly identifying the planned margin and mitigating the inaccuracy introduced by the overlying tissue.</p><p>While these approaches address specific visualization challenges, the next step lies in advancing toward a hybrid, context-aware AR system. With advances in registration accuracy, hardware performance, and integration of AI technologies, such a system could autonomously detect procedural phases, surgical context, and anatomical exposure. Based on this contextual understanding, it could dynamically switch between VT and SI modes, providing global spatial orientation and reference by VT and precise overlays for local structure localization by SI. This intelligent modality would reduce cognitive load and enable phase-specific surgical guidance.</p></sec><sec id="s4-7"><title>Conclusion</title><p>This study systematically compared SI with markerless inside-out tracking and VT for surgical localization tasks in the head and neck region. SI demonstrated superior localization accuracy in 0D structures, whereas VT revealed robust spatial orientation, comparable accuracy in 2D, and superior accuracy in 3D structures. These complementary strengths suggested that VT represents a viable alternative for macro localization, while SI may be preferable for fine-grained, sequential landmark tasks. Rather than assuming SI to be universally applicable across all surgical contexts, our findings emphasize the need for context-adaptive AR strategies that can dynamically leverage the strengths of both modalities.</p></sec></sec></body><back><ack><p>We sincerely thank all participating students and surgeons for their valuable contributions to this study. Open access funding provided by the Open Access Publishing Fund of RWTH (Rheinisch-Westf&#x00E4;lische Technische Hochschule) Aachen University.</p></ack><notes><sec><title>Funding</title><p>This work was supported by the REACT-EU project KITE (grant number EFRE-0801977, Plattform f&#x00FC;r KI-Translation Essen (https://kite.ikim.nrw/), FWF enFaced 2.0 (grant number KLI-1044 (https://enfaced2.ikim.nrw/), Clinician Scientist Program of the Faculty of Medicine RWTH Aachen University (BHP). CG was funded by the Advanced Research Opportunities Program (AROP) of RWTH (Rheinisch-Westf&#x00E4;lische Technische Hochschule) Aachen University.</p></sec><sec><title>Data Availability</title><p>The datasets generated or analyzed during this study, as well as the Python scripts used for data processing and analysis (stored on GitHub [<xref ref-type="bibr" rid="ref58">58</xref>]), are available from the corresponding author on reasonable request.</p></sec></notes><fn-group><fn fn-type="con"><p>Conceptualization: BHP, CG</p><p>Methodology: CG, YL</p><p>Software: CG, GL, YL</p><p>Validation: YL, KG, KX</p><p>Formal analysis: YL, BHP, KX</p><p>Investigation: YL, KG, GL, KX</p><p>Resources: BHP, JE, GL, FH, RR</p><p>Data curation: YL, KG, GL, KX</p><p>Writing&#x2013;original draft: YL</p><p>Writing&#x2013;review and editing: BHP, YL, KX, GL, CG, KG, AB, FH, RR, MdlF, JE</p><p>Visualization: YL, BHP</p><p>Supervision: BHP, JE</p><p>Project administration: BHP</p><p>Funding acquisition: CG, BHP</p></fn><fn fn-type="conflict"><p>BHP is an associate editor of the <italic>Journal of Medical Internet Research</italic>. All other authors declare no other conflicts of interest.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AR</term><def><p>augmented reality</p></def></def-item><def-item><term id="abb2">ASD</term><def><p>average surface distance</p></def></def-item><def-item><term id="abb3">CONSORT</term><def><p>Consolidated Standards of Reporting Trials</p></def></def-item><def-item><term id="abb4">CT</term><def><p>computed tomography</p></def></def-item><def-item><term id="abb5">HD</term><def><p>Hausdorff distance</p></def></def-item><def-item><term id="abb6">HL2</term><def><p>HoloLens 2</p></def></def-item><def-item><term id="abb7">LMM</term><def><p>linear mixed-effects model</p></def></def-item><def-item><term id="abb8">NASA-TLX</term><def><p>National Aeronautics and Space Administration Task Load Index</p></def></def-item><def-item><term id="abb9">RWTH</term><def><p>Rheinisch-Westf&#x00E4;lische Technische Hochschule</p></def></def-item><def-item><term id="abb10">SNS</term><def><p>surgical navigation systems</p></def></def-item><def-item><term id="abb11">VI-SLAM</term><def><p>visual inertial-simultaneous localization and mapping</p></def></def-item><def-item><term id="abb12">VT</term><def><p>virtual twin</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Christison-Lagay</surname><given-names>E</given-names> </name></person-group><article-title>Complications in head and neck surgery</article-title><source>Semin Pediatr Surg</source><year>2016</year><month>12</month><volume>25</volume><issue>6</issue><fpage>338</fpage><lpage>346</lpage><pub-id pub-id-type="doi">10.1053/j.sempedsurg.2016.10.007</pub-id><pub-id pub-id-type="medline">27989359</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nadershah</surname><given-names>M</given-names> </name><name name-style="western"><surname>Salama</surname><given-names>A</given-names> </name></person-group><article-title>Removal of parotid, submandibular, and sublingual glands</article-title><source>Oral Maxillofac Surg Clin North Am</source><year>2012</year><month>05</month><volume>24</volume><issue>2</issue><fpage>295</fpage><lpage>305</lpage><pub-id pub-id-type="doi">10.1016/j.coms.2012.01.005</pub-id><pub-id pub-id-type="medline">22364886</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bachar</surname><given-names>G</given-names> </name><name name-style="western"><surname>Siewerdsen</surname><given-names>JH</given-names> </name><name name-style="western"><surname>Daly</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Jaffray</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Irish</surname><given-names>JC</given-names> </name></person-group><article-title>Image quality and localization accuracy in C-arm tomosynthesis-guided head and neck surgery</article-title><source>Med Phys</source><year>2007</year><month>12</month><volume>34</volume><issue>12</issue><fpage>4664</fpage><lpage>4677</lpage><pub-id pub-id-type="doi">10.1118/1.2799492</pub-id><pub-id pub-id-type="medline">18196794</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wippold</surname><given-names>FJ</given-names> </name></person-group><article-title>Head and neck imaging: the role of CT and MRI</article-title><source>J Magn Reson Imaging</source><year>2007</year><month>03</month><volume>25</volume><issue>3</issue><fpage>453</fpage><lpage>465</lpage><pub-id pub-id-type="doi">10.1002/jmri.20838</pub-id><pub-id pub-id-type="medline">17279529</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ishibashi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Fujii</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kawamoto</surname><given-names>K</given-names> </name><etal/></person-group><article-title>The ability to identify the intraparotid facial nerve for locating parotid gland lesions in comparison to other indirect landmark methods: evaluation by 3.0 T MR imaging with surface coils</article-title><source>Neuroradiology</source><year>2010</year><month>11</month><volume>52</volume><issue>11</issue><fpage>1037</fpage><lpage>1045</lpage><pub-id pub-id-type="doi">10.1007/s00234-010-0718-1</pub-id><pub-id pub-id-type="medline">20505928</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Glas</surname><given-names>HH</given-names> </name><name name-style="western"><surname>Kraeima</surname><given-names>J</given-names> </name><name name-style="western"><surname>van Ooijen</surname><given-names>PMA</given-names> </name><name name-style="western"><surname>Spijkervet</surname><given-names>FKL</given-names> </name><name name-style="western"><surname>Yu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Witjes</surname><given-names>MJH</given-names> </name></person-group><article-title>Augmented reality visualization for image-guided surgery: a validation study using a three-dimensional printed phantom</article-title><source>J Oral Maxillofac Surg</source><year>2021</year><month>09</month><volume>79</volume><issue>9</issue><fpage>1943</fpage><pub-id pub-id-type="doi">10.1016/j.joms.2021.04.001</pub-id><pub-id pub-id-type="medline">34033801</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Remschmidt</surname><given-names>B</given-names> </name><name name-style="western"><surname>Rieder</surname><given-names>M</given-names> </name><name name-style="western"><surname>Gsaxner</surname><given-names>C</given-names> </name><name name-style="western"><surname>Gaessler</surname><given-names>J</given-names> </name><name name-style="western"><surname>Payer</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wallner</surname><given-names>J</given-names> </name></person-group><article-title>Augmented reality&#x2013;guided apicoectomy based on maxillofacial CBCT scans</article-title><source>Diagnostics (Basel)</source><year>2023</year><month>09</month><day>25</day><volume>13</volume><issue>19</issue><fpage>3037</fpage><pub-id pub-id-type="doi">10.3390/diagnostics13193037</pub-id><pub-id pub-id-type="medline">37835780</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Landaeta-Quinones</surname><given-names>CG</given-names> </name><name name-style="western"><surname>Hernandez</surname><given-names>N</given-names> </name><name name-style="western"><surname>Zarroug</surname><given-names>NK</given-names> </name></person-group><article-title>Computer-assisted surgery: applications in dentistry and oral and maxillofacial surgery</article-title><source>Dent Clin North Am</source><year>2018</year><month>07</month><volume>62</volume><issue>3</issue><fpage>403</fpage><lpage>420</lpage><pub-id pub-id-type="doi">10.1016/j.cden.2018.03.009</pub-id><pub-id pub-id-type="medline">29903558</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Karnatz</surname><given-names>N</given-names> </name><name name-style="western"><surname>M&#x00F6;llmann</surname><given-names>HL</given-names> </name><name name-style="western"><surname>Wilkat</surname><given-names>M</given-names> </name><name name-style="western"><surname>Parviz</surname><given-names>A</given-names> </name><name name-style="western"><surname>Rana</surname><given-names>M</given-names> </name></person-group><article-title>Advances and innovations in ablative head and neck oncologic surgery using mixed reality technologies in personalized medicine</article-title><source>J Clin Med</source><year>2022</year><month>08</month><day>16</day><volume>11</volume><issue>16</issue><fpage>4767</fpage><pub-id pub-id-type="doi">10.3390/jcm11164767</pub-id><pub-id pub-id-type="medline">36013006</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Gsaxner</surname><given-names>C</given-names> </name><name name-style="western"><surname>Pepe</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wallner</surname><given-names>J</given-names> </name><name name-style="western"><surname>Schmalstieg</surname><given-names>D</given-names> </name><name name-style="western"><surname>Egger</surname><given-names>J</given-names> </name></person-group><article-title>Markerless image-to-face registration for untethered augmented reality in head and neck surgery</article-title><year>2019</year><conf-name>Medical Image Computing and Computer Assisted Intervention (MICCAI 2019)</conf-name><conf-date>Oct 13-17, 2019</conf-date><conf-loc>Cham, Switzerland</conf-loc><publisher-name>Springer</publisher-name><fpage>236</fpage><lpage>244</lpage><pub-id pub-id-type="doi">10.1007/978-3-030-32254-0</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pepe</surname><given-names>A</given-names> </name><name name-style="western"><surname>Trotta</surname><given-names>GF</given-names> </name><name name-style="western"><surname>Mohr-Ziak</surname><given-names>P</given-names> </name><etal/></person-group><article-title>A marker-less registration approach for mixed reality&#x2013;aided maxillofacial surgery: a pilot evaluation</article-title><source>J Digit Imaging</source><year>2019</year><month>12</month><volume>32</volume><issue>6</issue><fpage>1008</fpage><lpage>1018</lpage><pub-id pub-id-type="doi">10.1007/s10278-019-00272-6</pub-id><pub-id pub-id-type="medline">31485953</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Andrews</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Henry</surname><given-names>AB</given-names> </name><name name-style="western"><surname>Soriano</surname><given-names>IM</given-names> </name><name name-style="western"><surname>Southworth</surname><given-names>MK</given-names> </name><name name-style="western"><surname>Silva</surname><given-names>JR</given-names> </name></person-group><article-title>Registration techniques for clinical applications of three-dimensional augmented reality devices</article-title><source>IEEE J Transl Eng Health Med</source><year>2021</year><volume>9</volume><fpage>4900214</fpage><pub-id pub-id-type="doi">10.1109/JTEHM.2020.3045642</pub-id><pub-id pub-id-type="medline">33489483</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>P&#x00E9;rez-Pach&#x00F3;n</surname><given-names>L</given-names> </name><name name-style="western"><surname>Poyade</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lowe</surname><given-names>T</given-names> </name><name name-style="western"><surname>Gr&#x00F6;ning</surname><given-names>F</given-names> </name></person-group><article-title>Image overlay surgery based on augmented reality: a systematic review</article-title><source>Adv Exp Med Biol</source><year>2020</year><volume>1260</volume><fpage>175</fpage><lpage>195</lpage><pub-id pub-id-type="doi">10.1007/978-3-030-47483-6_10</pub-id><pub-id pub-id-type="medline">33211313</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rose</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>H</given-names> </name><name name-style="western"><surname>Fuchs</surname><given-names>H</given-names> </name><name name-style="western"><surname>Frahm</surname><given-names>JM</given-names> </name></person-group><article-title>Development of augmented-reality applications in otolaryngology-head and neck surgery</article-title><source>Laryngoscope</source><year>2019</year><month>10</month><volume>129</volume><issue>S3</issue><fpage>S1</fpage><lpage>S11</lpage><pub-id pub-id-type="doi">10.1002/lary.28098</pub-id><pub-id pub-id-type="medline">31260127</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Drobinsky</surname><given-names>S</given-names> </name><name name-style="western"><surname>Becker</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Accuracy and efficiency of drilling trajectories with augmented reality versus conventional navigation randomized crossover trial</article-title><source>NPJ Digit Med</source><year>2024</year><month>11</month><day>10</day><volume>7</volume><issue>1</issue><fpage>316</fpage><pub-id pub-id-type="doi">10.1038/s41746-024-01314-2</pub-id><pub-id pub-id-type="medline">39523443</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wolf</surname><given-names>J</given-names> </name><name name-style="western"><surname>Luchmann</surname><given-names>D</given-names> </name><name name-style="western"><surname>Lohmeyer</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Farshad</surname><given-names>M</given-names> </name><name name-style="western"><surname>F&#x00FC;rnstahl</surname><given-names>P</given-names> </name><name name-style="western"><surname>Meboldt</surname><given-names>M</given-names> </name></person-group><article-title>How different augmented reality visualizations for drilling affect trajectory deviation, visual attention, and user experience</article-title><source>Int J Comput Assist Radiol Surg</source><year>2023</year><month>08</month><volume>18</volume><issue>8</issue><fpage>1363</fpage><lpage>1371</lpage><pub-id pub-id-type="doi">10.1007/s11548-022-02819-5</pub-id><pub-id pub-id-type="medline">36808552</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Grossberg</surname><given-names>A</given-names> </name><name name-style="western"><surname>Mohamed</surname><given-names>A</given-names> </name><name name-style="western"><surname>Elhalawani</surname><given-names>H</given-names> </name><etal/></person-group><article-title>HNSCC</article-title><source>The Cancer Imaging Archive</source><year>2020</year><access-date>2026-02-02</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://stage.cancerimagingarchive.net/collection/hnscc/">https://stage.cancerimagingarchive.net/collection/hnscc/</ext-link></comment></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thabit</surname><given-names>A</given-names> </name><name name-style="western"><surname>Benmahdjoub</surname><given-names>M</given-names> </name><name name-style="western"><surname>van Veelen</surname><given-names>MLC</given-names> </name><name name-style="western"><surname>Niessen</surname><given-names>WJ</given-names> </name><name name-style="western"><surname>Wolvius</surname><given-names>EB</given-names> </name><name name-style="western"><surname>van Walsum</surname><given-names>T</given-names> </name></person-group><article-title>Augmented reality navigation for minimally invasive craniosynostosis surgery: a phantom study</article-title><source>Int J CARS</source><year>2022</year><month>08</month><volume>17</volume><issue>8</issue><fpage>1453</fpage><lpage>1460</lpage><pub-id pub-id-type="doi">10.1007/s11548-022-02634-y</pub-id><pub-id pub-id-type="medline">35507209</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Frantz</surname><given-names>T</given-names> </name><name name-style="western"><surname>Jansen</surname><given-names>B</given-names> </name><name name-style="western"><surname>Duerinck</surname><given-names>J</given-names> </name><name name-style="western"><surname>Vandemeulebroucke</surname><given-names>J</given-names> </name></person-group><article-title>Augmenting Microsoft&#x2019;s HoloLens with vuforia tracking for neuronavigation</article-title><source>Healthc Technol Lett</source><year>2018</year><month>10</month><volume>5</volume><issue>5</issue><fpage>221</fpage><lpage>225</lpage><pub-id pub-id-type="doi">10.1049/htl.2018.5079</pub-id><pub-id pub-id-type="medline">30464854</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chui</surname><given-names>ECS</given-names> </name><name name-style="western"><surname>Mak</surname><given-names>KKK</given-names> </name><name name-style="western"><surname>Ng</surname><given-names>RHT</given-names> </name><etal/></person-group><article-title>Application of image recognition-based tracker-less augmented reality navigation system in a series of sawbone trials</article-title><source>Arthroplasty</source><year>2024</year><month>08</month><day>2</day><volume>6</volume><issue>1</issue><fpage>39</fpage><pub-id pub-id-type="doi">10.1186/s42836-024-00263-1</pub-id><pub-id pub-id-type="medline">39090719</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="thesis"><person-group person-group-type="author"><name name-style="western"><surname>Vorobiov</surname><given-names>V</given-names> </name></person-group><article-title>On 3D pose estimation for XR. classic approaches vs neural networks [Bachelor&#x2019;s thesis]</article-title><year>2021</year><access-date>2025-12-03</access-date><publisher-name>Ukrainian Catholic University</publisher-name><comment><ext-link ext-link-type="uri" xlink:href="https://er.ucu.edu.ua/items/3696ed7e-49d4-45c4-bba4-be06fd4320eb">https://er.ucu.edu.ua/items/3696ed7e-49d4-45c4-bba4-be06fd4320eb</ext-link></comment></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Palla</surname><given-names>B</given-names> </name><name name-style="western"><surname>Callahan</surname><given-names>N</given-names> </name></person-group><article-title>Does the use of computer-assisted surgery affect the margin status in resections of ameloblastoma?</article-title><source>J Oral Maxillofac Surg</source><year>2021</year><month>07</month><volume>79</volume><issue>7</issue><fpage>1467</fpage><lpage>1473</lpage><pub-id pub-id-type="doi">10.1016/j.joms.2020.09.007</pub-id><pub-id pub-id-type="medline">33031772</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fowler</surname><given-names>J</given-names> </name><name name-style="western"><surname>Campanile</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Warner</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Surgical margins of the oral cavity: is 5 mm really necessary?</article-title><source>J Otolaryngol Head Neck Surg</source><year>2022</year><month>10</month><day>4</day><volume>51</volume><issue>1</issue><fpage>38</fpage><pub-id pub-id-type="doi">10.1186/s40463-022-00584-8</pub-id><pub-id pub-id-type="medline">36195903</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kuznetsova</surname><given-names>A</given-names> </name><name name-style="western"><surname>Brockhoff</surname><given-names>PB</given-names> </name><name name-style="western"><surname>Christensen</surname><given-names>RHB</given-names> </name></person-group><article-title>lmerTest package: tests in linear mixed effects models</article-title><source>J Stat Soft</source><year>2017</year><volume>82</volume><issue>13</issue><fpage>1</fpage><lpage>26</lpage><pub-id pub-id-type="doi">10.18637/jss.v082.i13</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dwan</surname><given-names>K</given-names> </name><name name-style="western"><surname>Li</surname><given-names>T</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name><name name-style="western"><surname>Elbourne</surname><given-names>D</given-names> </name></person-group><article-title>CONSORT 2010 statement: extension to randomised crossover trials</article-title><source>BMJ</source><year>2019</year><month>07</month><day>31</day><volume>366</volume><fpage>l4378</fpage><pub-id pub-id-type="doi">10.1136/bmj.l4378</pub-id><pub-id pub-id-type="medline">31366597</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schulz</surname><given-names>KF</given-names> </name><name name-style="western"><surname>Altman</surname><given-names>DG</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><collab>CONSORT Group</collab></person-group><article-title>CONSORT 2010 statement: updated guidelines for reporting parallel group randomised trials</article-title><source>BMJ</source><year>2010</year><month>03</month><day>23</day><volume>340</volume><fpage>c332</fpage><pub-id pub-id-type="doi">10.1136/bmj.c332</pub-id><pub-id pub-id-type="medline">20332509</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Doughty</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ghugre</surname><given-names>NR</given-names> </name><name name-style="western"><surname>Wright</surname><given-names>GA</given-names> </name></person-group><article-title>Augmenting performance: a systematic review of optical see-through head-mounted displays in surgery</article-title><source>J Imaging</source><year>2022</year><month>07</month><day>20</day><volume>8</volume><issue>7</issue><fpage>203</fpage><pub-id pub-id-type="doi">10.3390/jimaging8070203</pub-id><pub-id pub-id-type="medline">35877647</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Allen</surname><given-names>DR</given-names> </name><name name-style="western"><surname>Peters</surname><given-names>TM</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>ECS</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Linte</surname><given-names>CA</given-names> </name><name name-style="western"><surname>Siewerdsen</surname><given-names>JH</given-names> </name></person-group><article-title>A co-calibration framework for the accuracy assessment of vision-based tracking systems</article-title><conf-name>SPIE Medical Imaging 2022: Image-Guided Procedures, Robotic Interventions, and Modeling</conf-name><conf-date>Feb 20-24, 2022</conf-date><conf-loc>San Diego, United States</conf-loc><fpage>598</fpage><lpage>608</lpage><pub-id pub-id-type="doi">10.1117/12.2606815</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Sendari</surname><given-names>S</given-names> </name><name name-style="western"><surname>Firmansah</surname><given-names>A</given-names> </name></person-group><article-title>Performance analysis of augmented reality based on Vuforia using 3D marker detection</article-title><access-date>2025-12-03</access-date><conf-name>2020 4th International Conference on Vocational Education and Training (ICOVET)</conf-name><conf-date>Sep 19, 2020</conf-date><conf-loc>Malang, Indonesia</conf-loc><fpage>294</fpage><lpage>298</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=9229480">https://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=9229480</ext-link></comment><pub-id pub-id-type="doi">10.1109/ICOVET50258.2020.9230276</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Wilmott</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Erkelens</surname><given-names>IM</given-names> </name><name name-style="western"><surname>Murdison</surname><given-names>TS</given-names> </name><name name-style="western"><surname>Rio</surname><given-names>KW</given-names> </name></person-group><article-title>Perceptibility of jitter in augmented reality head-mounted displays</article-title><access-date>2025-12-03</access-date><conf-name>2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)</conf-name><conf-date>Oct 17-21, 2022</conf-date><conf-loc>Singapore, Singapore</conf-loc><fpage>470</fpage><lpage>478</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://ieeexplore.ieee.org/document/9995228">https://ieeexplore.ieee.org/document/9995228</ext-link></comment><pub-id pub-id-type="doi">10.1109/ISMAR55827.2022.00063</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Feigl</surname><given-names>T</given-names> </name><name name-style="western"><surname>Porada</surname><given-names>A</given-names> </name><name name-style="western"><surname>Steiner</surname><given-names>S</given-names> </name><name name-style="western"><surname>L&#x00F6;ffler</surname><given-names>C</given-names> </name><name name-style="western"><surname>Mutschler</surname><given-names>C</given-names> </name><name name-style="western"><surname>Philippsen</surname><given-names>M</given-names> </name></person-group><article-title>Localization limitations of ARCore, ARKit, and Hololens in dynamic large-scale industry environments</article-title><conf-name>15th International Conference on Computer Graphics Theory and Applications</conf-name><conf-date>Feb 27-29, 2020</conf-date><conf-loc>Valletta, Malta</conf-loc><fpage>307</fpage><lpage>318</lpage><pub-id pub-id-type="doi">10.5220/0008989903070318</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Matyash</surname><given-names>I</given-names> </name><name name-style="western"><surname>Kutzner</surname><given-names>R</given-names> </name><name name-style="western"><surname>Neumuth</surname><given-names>T</given-names> </name><name name-style="western"><surname>Rockstroh</surname><given-names>M</given-names> </name></person-group><article-title>Accuracy measurement of HoloLens2 IMUs in medical environments</article-title><source>Curr Dir Biomed Eng</source><year>2021</year><month>10</month><day>1</day><volume>7</volume><issue>2</issue><fpage>633</fpage><lpage>636</lpage><pub-id pub-id-type="doi">10.1515/cdbme-2021-2161</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Gsaxner</surname><given-names>C</given-names> </name><name name-style="western"><surname>Li</surname><given-names>J</given-names> </name><name name-style="western"><surname>Pepe</surname><given-names>A</given-names> </name><name name-style="western"><surname>Schmalstieg</surname><given-names>D</given-names> </name><name name-style="western"><surname>Egger</surname><given-names>J</given-names> </name></person-group><article-title>Inside-out instrument tracking for surgical navigation in augmented reality</article-title><access-date>2025-12-03</access-date><conf-name>VRST &#x2019;21</conf-name><conf-date>Dec 8-10, 2021</conf-date><conf-loc>Osaka, Japan</conf-loc><fpage>1</fpage><lpage>11</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://dl.acm.org/doi/proceedings/10.1145/3489849">https://dl.acm.org/doi/proceedings/10.1145/3489849</ext-link></comment><pub-id pub-id-type="doi">10.1145/3489849.3489863</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Koyachi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Sugahara</surname><given-names>K</given-names> </name><name name-style="western"><surname>Tachizawa</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Mixed-reality and computer-aided design/computer-aided manufacturing technology for mandibular reconstruction: a case description</article-title><source>Quant Imaging Med Surg</source><year>2023</year><month>06</month><day>1</day><volume>13</volume><issue>6</issue><fpage>4050</fpage><lpage>4056</lpage><pub-id pub-id-type="doi">10.21037/qims-22-1118</pub-id><pub-id pub-id-type="medline">37284085</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dixon</surname><given-names>BJ</given-names> </name><name name-style="western"><surname>Daly</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>H</given-names> </name><name name-style="western"><surname>Vescan</surname><given-names>AD</given-names> </name><name name-style="western"><surname>Witterick</surname><given-names>IJ</given-names> </name><name name-style="western"><surname>Irish</surname><given-names>JC</given-names> </name></person-group><article-title>Surgeons blinded by enhanced navigation: the effect of augmented reality on attention</article-title><source>Surg Endosc</source><year>2013</year><month>02</month><volume>27</volume><issue>2</issue><fpage>454</fpage><lpage>461</lpage><pub-id pub-id-type="doi">10.1007/s00464-012-2457-3</pub-id><pub-id pub-id-type="medline">22833264</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dixon</surname><given-names>BJ</given-names> </name><name name-style="western"><surname>Daly</surname><given-names>MJ</given-names> </name><name name-style="western"><surname>Chan</surname><given-names>HHL</given-names> </name><name name-style="western"><surname>Vescan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Witterick</surname><given-names>IJ</given-names> </name><name name-style="western"><surname>Irish</surname><given-names>JC</given-names> </name></person-group><article-title>Inattentional blindness increased with augmented reality surgical navigation</article-title><source>Am J Rhinol Allergy</source><year>2014</year><volume>28</volume><issue>5</issue><fpage>433</fpage><lpage>437</lpage><pub-id pub-id-type="doi">10.2500/ajra.2014.28.4067</pub-id><pub-id pub-id-type="medline">25198032</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Conway</surname><given-names>RG</given-names> </name><name name-style="western"><surname>O&#x2019;Neill</surname><given-names>N</given-names> </name><name name-style="western"><surname>Brown</surname><given-names>J</given-names> </name><name name-style="western"><surname>Kavic</surname><given-names>S</given-names> </name></person-group><article-title>An educated guess - distance estimation by surgeons</article-title><source>Surg Open Sci</source><year>2020</year><month>07</month><volume>2</volume><issue>3</issue><fpage>113</fpage><lpage>116</lpage><pub-id pub-id-type="doi">10.1016/j.sopen.2020.04.001</pub-id><pub-id pub-id-type="medline">33981983</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tu</surname><given-names>P</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Joskowicz</surname><given-names>L</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>X</given-names> </name></person-group><article-title>A multi-view interactive virtual-physical registration method for mixed reality based surgical navigation in pelvic and acetabular fracture fixation</article-title><source>Int J Comput Assist Radiol Surg</source><year>2023</year><month>09</month><volume>18</volume><issue>9</issue><fpage>1715</fpage><lpage>1724</lpage><pub-id pub-id-type="doi">10.1007/s11548-023-02884-4</pub-id><pub-id pub-id-type="medline">37031310</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Iqbal</surname><given-names>H</given-names> </name><name name-style="western"><surname>Rodriguez y Baena</surname><given-names>F</given-names> </name></person-group><article-title>Semi-automatic infrared calibration for augmented reality systems in surgery</article-title><conf-name>Proceedings of the 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)</conf-name><conf-date>Oct 23-27, 2022</conf-date><conf-loc>Kyoto, Japan</conf-loc><fpage>4957</fpage><lpage>4964</lpage><pub-id pub-id-type="doi">10.1109/IROS47612.2022.9982215</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Yang</surname><given-names>W</given-names> </name><name name-style="western"><surname>He</surname><given-names>RK</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>T</given-names> </name><name name-style="western"><surname>Haining</surname><given-names>W</given-names> </name><name name-style="western"><surname>Goossens</surname><given-names>R</given-names> </name><name name-style="western"><surname>Huysmans</surname><given-names>T</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Rebelo</surname><given-names>F</given-names> </name></person-group><article-title>Soft tissue thickness estimation for head, face, and neck from CT data for product design purposes</article-title><source>Ergonomics In Design</source><year>2022</year><publisher-name>AHFE International</publisher-name><fpage>558</fpage><lpage>565</lpage><pub-id pub-id-type="doi">10.54941/ahfe1001983</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Van Gestel</surname><given-names>F</given-names> </name><name name-style="western"><surname>Frantz</surname><given-names>T</given-names> </name><name name-style="western"><surname>Buyck</surname><given-names>F</given-names> </name><etal/></person-group><article-title>Neuro-oncological augmented reality planning for intracranial tumor resection</article-title><source>Front Neurol</source><year>2023</year><volume>14</volume><fpage>1104571</fpage><pub-id pub-id-type="doi">10.3389/fneur.2023.1104571</pub-id><pub-id pub-id-type="medline">36998774</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chang</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Dearman</surname><given-names>B</given-names> </name><name name-style="western"><surname>Greenwood</surname><given-names>JE</given-names> </name></person-group><article-title>A comparison of wound area measurement techniques: visitrak versus photography</article-title><source>Eplasty</source><year>2011</year><month>04</month><day>18</day><volume>11</volume><fpage>e18</fpage><pub-id pub-id-type="medline">21559060</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Epstein</surname><given-names>N</given-names> </name></person-group><article-title>A perspective on wrong level, wrong side, and wrong site spine surgery</article-title><source>Surg Neurol Int</source><year>2021</year><volume>12</volume><fpage>286</fpage><pub-id pub-id-type="doi">10.25259/SNI_402_2021</pub-id><pub-id pub-id-type="medline">34221617</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shah</surname><given-names>M</given-names> </name><name name-style="western"><surname>Halalmeh</surname><given-names>DR</given-names> </name><name name-style="western"><surname>Sandio</surname><given-names>A</given-names> </name><name name-style="western"><surname>Tubbs</surname><given-names>RS</given-names> </name><name name-style="western"><surname>Moisi</surname><given-names>MD</given-names> </name></person-group><article-title>Anatomical variations that can lead to spine surgery at the wrong level: part III lumbosacral</article-title><source>Cureus</source><year>2020</year><month>07</month><day>28</day><volume>12</volume><issue>7</issue><fpage>e9433</fpage><pub-id pub-id-type="doi">10.7759/cureus.9433</pub-id><pub-id pub-id-type="medline">32864257</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wilde</surname><given-names>F</given-names> </name><name name-style="western"><surname>Winter</surname><given-names>K</given-names> </name><name name-style="western"><surname>Kletsch</surname><given-names>K</given-names> </name><name name-style="western"><surname>Lorenz</surname><given-names>K</given-names> </name><name name-style="western"><surname>Schramm</surname><given-names>A</given-names> </name></person-group><article-title>Mandible reconstruction using patient-specific pre-bent reconstruction plates: comparison of standard and transfer key methods</article-title><source>Int J Comput Assist Radiol Surg</source><year>2015</year><month>02</month><volume>10</volume><issue>2</issue><fpage>129</fpage><lpage>140</lpage><pub-id pub-id-type="doi">10.1007/s11548-014-1065-1</pub-id><pub-id pub-id-type="medline">24810111</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Metzler</surname><given-names>P</given-names> </name><name name-style="western"><surname>Geiger</surname><given-names>EJ</given-names> </name><name name-style="western"><surname>Alcon</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>X</given-names> </name><name name-style="western"><surname>Steinbacher</surname><given-names>DM</given-names> </name></person-group><article-title>Three-dimensional virtual surgery accuracy for free fibula mandibular reconstruction: planned versus actual results</article-title><source>J Oral Maxillofac Surg</source><year>2014</year><month>12</month><volume>72</volume><issue>12</issue><fpage>2601</fpage><lpage>2612</lpage><pub-id pub-id-type="doi">10.1016/j.joms.2014.07.024</pub-id><pub-id pub-id-type="medline">25315311</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khaqani</surname><given-names>MS</given-names> </name><name name-style="western"><surname>Tavosi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Gholami</surname><given-names>M</given-names> </name><name name-style="western"><surname>Eftekharian</surname><given-names>HR</given-names> </name><name name-style="western"><surname>Khojastepour</surname><given-names>L</given-names> </name></person-group><article-title>Analysis of facial symmetry after zygomatic bone fracture management</article-title><source>J Oral Maxillofac Surg</source><year>2018</year><month>03</month><volume>76</volume><issue>3</issue><fpage>595</fpage><lpage>604</lpage><pub-id pub-id-type="doi">10.1016/j.joms.2017.10.005</pub-id><pub-id pub-id-type="medline">29121488</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wei</surname><given-names>B</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>G</given-names> </name><name name-style="western"><surname>Bai</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Augmented reality in preoperative anterolateral thigh flap perforators positioning: a pilot diagnostic study</article-title><source>Oral Oncol</source><year>2025</year><month>03</month><volume>162</volume><fpage>107189</fpage><pub-id pub-id-type="doi">10.1016/j.oraloncology.2025.107189</pub-id><pub-id pub-id-type="medline">39954606</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Duan</surname><given-names>H</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Niu</surname><given-names>WL</given-names> </name><etal/></person-group><article-title>Localization of sentinel lymph nodes using augmented-reality system: a cadaveric feasibility study</article-title><source>Eur J Nucl Med Mol Imaging</source><year>2025</year><month>08</month><volume>52</volume><issue>10</issue><fpage>3643</fpage><lpage>3652</lpage><pub-id pub-id-type="doi">10.1007/s00259-025-07216-z</pub-id><pub-id pub-id-type="medline">40123008</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Muhleman</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Wartmann</surname><given-names>CT</given-names> </name><name name-style="western"><surname>Hage</surname><given-names>R</given-names> </name><etal/></person-group><article-title>A review of the tragal pointer: anatomy and its importance as a landmark in surgical procedures</article-title><source>Folia Morphol (Warsz)</source><year>2012</year><month>05</month><volume>71</volume><issue>2</issue><fpage>59</fpage><lpage>64</lpage><pub-id pub-id-type="medline">22648581</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Thintharua</surname><given-names>P</given-names> </name><name name-style="western"><surname>Chentanez</surname><given-names>V</given-names> </name></person-group><article-title>Morphological analysis and morphometry of the occipital condyle and its relationship to the foramen magnum, jugular foramen, and hypoglossal canal: implications for craniovertebral junction surgery</article-title><source>Anat Cell Biol</source><year>2023</year><month>03</month><day>31</day><volume>56</volume><issue>1</issue><fpage>61</fpage><lpage>68</lpage><pub-id pub-id-type="doi">10.5115/acb.22.105</pub-id><pub-id pub-id-type="medline">36635090</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liebmann</surname><given-names>F</given-names> </name><name name-style="western"><surname>Roner</surname><given-names>S</given-names> </name><name name-style="western"><surname>von Atzigen</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Pedicle screw navigation using surface digitization on the Microsoft HoloLens</article-title><source>Int J Comput Assist Radiol Surg</source><year>2019</year><month>07</month><volume>14</volume><issue>7</issue><fpage>1157</fpage><lpage>1165</lpage><pub-id pub-id-type="doi">10.1007/s11548-019-01973-7</pub-id><pub-id pub-id-type="medline">30993519</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pietruski</surname><given-names>P</given-names> </name><name name-style="western"><surname>Majak</surname><given-names>M</given-names> </name><name name-style="western"><surname>&#x015A;wiatek-Najwer</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Supporting mandibular resection with intraoperative navigation utilizing augmented reality technology - a proof of concept study</article-title><source>J Craniomaxillofac Surg</source><year>2019</year><month>06</month><volume>47</volume><issue>6</issue><fpage>854</fpage><lpage>859</lpage><pub-id pub-id-type="doi">10.1016/j.jcms.2019.03.004</pub-id><pub-id pub-id-type="medline">30914226</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Schlueter-Brust</surname><given-names>K</given-names> </name><name name-style="western"><surname>Henckel</surname><given-names>J</given-names> </name><name name-style="western"><surname>Katinakis</surname><given-names>F</given-names> </name><etal/></person-group><article-title>Augmented-reality-assisted K-wire placement for glenoid component positioning in reversed shoulder arthroplasty: a proof-of-concept study</article-title><source>J Pers Med</source><year>2021</year><month>08</month><day>10</day><volume>11</volume><issue>8</issue><fpage>777</fpage><pub-id pub-id-type="doi">10.3390/jpm11080777</pub-id><pub-id pub-id-type="medline">34442421</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Suter</surname><given-names>D</given-names> </name><name name-style="western"><surname>Hodel</surname><given-names>S</given-names> </name><name name-style="western"><surname>Liebmann</surname><given-names>F</given-names> </name><name name-style="western"><surname>F&#x00FC;rnstahl</surname><given-names>P</given-names> </name><name name-style="western"><surname>Farshad</surname><given-names>M</given-names> </name></person-group><article-title>Factors affecting augmented reality head-mounted device performance in real OR</article-title><source>Eur Spine J</source><year>2023</year><month>10</month><volume>32</volume><issue>10</issue><fpage>3425</fpage><lpage>3433</lpage><pub-id pub-id-type="doi">10.1007/s00586-023-07826-x</pub-id><pub-id pub-id-type="medline">37552327</pub-id></nlm-citation></ref><ref id="ref58"><label>56</label><nlm-citation citation-type="web"><article-title>AR-VIZ [accessible by request]</article-title><source>GitHub</source><access-date>2025-12-24</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://github.com/OMFSdigital/AR-VIZ">https://github.com/OMFSdigital/AR-VIZ</ext-link></comment></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Checklist 1</label><p>CONSORT checklist.</p><media xlink:href="games_v14i1e75962_app1.pdf" xlink:title="PDF File, 68 KB"/></supplementary-material></app-group></back></article>