<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article xmlns:xlink="http://www.w3.org/1999/xlink" article-type="research-article" dtd-version="2.0">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JSG</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Serious Games</journal-id>
      <journal-title>JMIR Serious Games</journal-title>
      <issn pub-type="epub">2291-9279</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v10i3e38669</article-id>
      <article-id pub-id-type="pmid">35793129</article-id>
      <article-id pub-id-type="doi">10.2196/38669</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Designing Virtual Reality–Based Conversational Agents to Train Clinicians in Verbal De-escalation Skills: Exploratory Usability Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Zary</surname>
            <given-names>Nabil</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Alvarez-Lopez</surname>
            <given-names>Fernando</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Bustillo</surname>
            <given-names>Andres</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Lerner</surname>
            <given-names>Dieter</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Moore</surname>
            <given-names>Nathan</given-names>
          </name>
          <degrees>BN, MEd</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <address>
            <institution>Digital Health Solutions</institution>
            <institution>Western Sydney Local Health District</institution>
            <addr-line>Cumberland Hospital East Campus</addr-line>
            <addr-line>Building 106</addr-line>
            <addr-line>North Parramatta, 2151</addr-line>
            <country>Australia</country>
            <phone>61 0427850889</phone>
            <email>nathan.moore@health.nsw.gov.au</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-2056-7955</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Ahmadpour</surname>
            <given-names>Naseem</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-1818-1930</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Brown</surname>
            <given-names>Martin</given-names>
          </name>
          <degrees>MH</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6225-3770</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Poronnik</surname>
            <given-names>Philip</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-6246-528X</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author">
          <name name-style="western">
            <surname>Davids</surname>
            <given-names>Jennifer</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-0861-9994</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Digital Health Solutions</institution>
        <institution>Western Sydney Local Health District</institution>
        <addr-line>North Parramatta</addr-line>
        <country>Australia</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Design Lab</institution>
        <institution>Sydney School of Architecture, Design and Planning</institution>
        <institution>The University of Sydney</institution>
        <addr-line>Sydney</addr-line>
        <country>Australia</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Faculty of Medicine and Health Media Lab, Education Innovation, School of Medical Sciences</institution>
        <institution>Faculty of Medicine and Health</institution>
        <institution>The University of Sydney</institution>
        <addr-line>Sydney</addr-line>
        <country>Australia</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Research and Education Network</institution>
        <institution>Western Sydney Local Health District</institution>
        <addr-line>Westmead</addr-line>
        <country>Australia</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Nathan Moore <email>nathan.moore@health.nsw.gov.au</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <season>Jul-Sep</season>
        <year>2022</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>6</day>
        <month>7</month>
        <year>2022</year>
      </pub-date>
      <volume>10</volume>
      <issue>3</issue>
      <elocation-id>e38669</elocation-id>
      <history>
        <date date-type="received">
          <day>12</day>
          <month>4</month>
          <year>2022</year>
        </date>
        <date date-type="rev-request">
          <day>5</day>
          <month>5</month>
          <year>2022</year>
        </date>
        <date date-type="rev-recd">
          <day>17</day>
          <month>5</month>
          <year>2022</year>
        </date>
        <date date-type="accepted">
          <day>12</day>
          <month>6</month>
          <year>2022</year>
        </date>
      </history>
      <copyright-statement>©Nathan Moore, Naseem Ahmadpour, Martin Brown, Philip Poronnik, Jennifer Davids. Originally published in JMIR Serious Games (https://games.jmir.org), 06.07.2022.</copyright-statement>
      <copyright-year>2022</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Serious Games, is properly cited. The complete bibliographic information, a link to the original publication on https://games.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://games.jmir.org/2022/3/e38669" xlink:type="simple"/>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>Violence and aggression are significant workplace challenges faced by clinicians worldwide. Traditional methods of training consist of “on-the-job learning” and role-play simulations. Although both approaches can result in improved skill levels, they are not without limitation. Interactive simulations using virtual reality (VR) can complement traditional training processes as a cost-effective, engaging, easily accessible, and flexible training tool.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>In this exploratory study, we aimed to determine the feasibility of and barriers to verbal engagement with a virtual agent in the context of the Code Black VR application. Code Black VR is a new interactive VR-based verbal de-escalation trainer that we developed based on the Clinical Training Through VR Design Framework.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>In total, 28 participants with varying clinical expertise from 4 local hospitals enrolled in the Western Sydney Local Health District Clinical Initiative Nurse program and Transition to Emergency Nursing Programs and participated in 1 of 5 workshops. They completed multiple playthroughs of the Code Black VR verbal de-escalation trainer application and verbally interacted with a virtual agent. We documented observations and poststudy reflection notes. After the playthroughs, the users completed the System Usability Scale and provided written comments on their experience. A thematic analysis was conducted on the results. Data were also obtained through the application itself, which also recorded the total interactions and successfully completed interactions.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>The Code Black VR verbal de-escalation training application was well received. The findings reinforced the factors in the existing design framework and identified 3 new factors—motion sickness, perceived value, and privacy—to be considered for future application development.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Verbal interaction with a virtual agent is feasible for training staff in verbal de-escalation skills. It is an effective medium to supplement clinician training in verbal de-escalation skills. We provide broader design considerations to guide further developments in this area.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>virtual reality</kwd>
        <kwd>code black</kwd>
        <kwd>verbal de-escalation</kwd>
        <kwd>violence and aggression</kwd>
        <kwd>education</kwd>
        <kwd>clinical training</kwd>
        <kwd>conversational agent</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <sec>
        <title>Background</title>
        <p>Violence and aggression in health care settings is an international problem, with one-fifth of health care professionals experiencing violence perpetrated by patients or family members every year [<xref ref-type="bibr" rid="ref1">1</xref>]. The implementation of effective training in high-quality verbal de-escalation skills has been shown to be vital for the well-being of staff and patients. When used early, these skills can prevent escalation of the situation to violence [<xref ref-type="bibr" rid="ref2">2</xref>].</p>
        <p>“A Code Black is any incident where hospital staff are threatened with or experience verbal, physical or psychological abuse or attack during the course of their employment and a staff response is required” [<xref ref-type="bibr" rid="ref3">3</xref>]. The code black response involves a variety of interventions that may be required preincident occurrence, during the incident, or postincident occurrence [<xref ref-type="bibr" rid="ref3">3</xref>]. There is evidence that the delivery of high-quality violence and aggression minimization training has a positive impact on staff member’s perceived confidence in managing these situations [<xref ref-type="bibr" rid="ref4">4</xref>]. In addition, this style of training corelates with a reduction in the number of incidents of violence and aggression occurring in health care [<xref ref-type="bibr" rid="ref5">5</xref>].</p>
        <p>Current practice for Violence Prevention and Management training within our Local Health District (LHD) is conducted using a traditional face-to-face didactic simulation and skills station format. This training allows clinicians to collaborate and practice these skills in a group setting with expert support from course instructors. Simulation has been clearly established as an effective way to train clinicians in the application of skills and to reduce anxiety before the application of these skills [<xref ref-type="bibr" rid="ref6">6</xref>]. However, a significant challenge posed by this type of training is that it requires significant resources and clinicians to be present in a fixed location and time [<xref ref-type="bibr" rid="ref7">7</xref>]. Given the resource limitations, the complexity of staffing requirements, and the high number of staff members requiring training, the current demand for this training far exceeds the capacity for the program’s delivery. In addition, there is a need for refresher training for staff, and this is not factored into the current program’s face-to-face delivery schedule. e-Learning training modules and videos are available; however, there are questions about the efficacy of e-learning to impact health care professional’s behaviors, skills, or knowledge [<xref ref-type="bibr" rid="ref8">8</xref>].</p>
        <p>Owing to these challenges and the increasing occurrence of code black, we developed a pilot supplemental virtual reality (VR)–based application to support clinician education in code black management. The initial phase of this project scoped the breadth of the problem and involved staff interviews and incident review. Our initial findings highlighted that staff required training on how to recognize behaviors of concern in an individual and practice in verbal de-escalation skills to minimize or prevent aggression or violence toward a staff member [<xref ref-type="bibr" rid="ref2">2</xref>]. Therefore, we reasoned that there is a need for readily accessible and frequently repeatable experiential training in managing code black events [<xref ref-type="bibr" rid="ref3">3</xref>].</p>
        <p>To address the barriers of access, cost, and availability of training, VR is being adopted as a supplemental modality to support clinician education in several scenarios [<xref ref-type="bibr" rid="ref9">9</xref>]. For example, VR has been explored to train students in conflict management as part of the research being conducted by the University of Newcastle [<xref ref-type="bibr" rid="ref10">10</xref>]. While still in trial, the approach is being well received by students and is showing potential for providing education in this challenging area. Previously, we used VR as a tool within the health district to train clinicians in the management of advanced life support (ALS) [<xref ref-type="bibr" rid="ref11">11</xref>]. Building upon our experience in this area and our findings in previous VR user needs analysis [<xref ref-type="bibr" rid="ref12">12</xref>], we developed a VR-based application to support code black training.</p>
      </sec>
      <sec>
        <title>VR Technology for Verbal De-escalation Training</title>
        <p>The novel capabilities of VR provide significant potential for educators to use this approach to supplement and, in some cases, replace traditional learning modalities [<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref14">14</xref>]. The interaction with 3D representations of people, items, and environments in real time can allow users to practice the application of skills. Deploying a VR app on portable and standalone VR head-mounted displays (HMDs) enables users to engage with training in a time and setting of their choosing, which may not have been possible otherwise [<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. In addition, the level of <italic>blocking out</italic> of the physical world made possible by the VR HMDs can further increase the immersion of the user and increase presence; that is, the feeling of being “present” in the scenario [<xref ref-type="bibr" rid="ref16">16</xref>]. This results in the suspension of disbelief and the generation of authentic reactions in users due to increased engagement with the experience. The ability to record and assess user interactions within a VR app allows for completely objective, structured assessment and feedback on the platform, which can be difficult with human assessors [<xref ref-type="bibr" rid="ref17">17</xref>]. Voice recognition within the application can add an element of increased engagement with the technology but also brings with it challenges and limitations with regard to misinterpretation or misunderstanding errors, which must be overcome [<xref ref-type="bibr" rid="ref18">18</xref>].</p>
      </sec>
      <sec>
        <title>Conversational Agents for Training</title>
        <p>Increasing processor power and technological advancements have resulted in the emergence of virtual conversational agents as a potential training modality. BodySwaps is a company developing VR-based “soft skills” training where users can observe interactions between virtual agents and make structured observations and interact with the virtual agent to provide comment and feedback [<xref ref-type="bibr" rid="ref19">19</xref>]. The responses are recorded and provided as feedback to the user for development. The content provided to the users focuses on human resources and management-style scenarios. Other studies using virtual conversational agents have been conducted by the University of Newcastle in an application built to train student nurses called “Angry Stan.” The application uses biofeedback to train clinicians to remain calm in confronting situations. The user responses are chosen from a multiple-choice list of scripted responses, and the agent responds to both the choices made by the user and their chosen responses [<xref ref-type="bibr" rid="ref20">20</xref>].</p>
      </sec>
      <sec>
        <title>Objectives</title>
        <p>It is clearly identified that a structured and coordinated response by well-trained staff using a shared organizational approach is vital to minimize the risk to both staff and patients [<xref ref-type="bibr" rid="ref21">21</xref>]. Given the identified barriers to the accessibility of existing training approaches, we aimed to develop a VR-based supplemental code black training application. We have previously developed VR-based applications to support clinical education in areas such as ALS management, leadership, clinical handover, and dignity in the workplace [<xref ref-type="bibr" rid="ref11">11</xref>]. However, verbal interaction with a virtual agent that can respond to user input is a far more complex challenge, and there is little research that can guide the design of such interactions.</p>
        <p>The aim of this study was 2-fold: (1) to determine the validity of using the existing “Clinical Training Through VR Design Framework” to assess the feasibility of VR-based education modules [<xref ref-type="bibr" rid="ref12">12</xref>] and (2) to identify specific design requirements for a VR-based agent to train clinicians in verbal de-escalation skills in the context of code black management.</p>
        <p>The Clinical Training Through VR Design Framework involves 8 factors that define the clinician training needs that must be supported within the VR environment and through the interactions [<xref ref-type="bibr" rid="ref12">12</xref>]. These factors are realistic tasks, visibility, completion, accessibility, agency, diverse input modalities, mental models, and advanced roles [<xref ref-type="bibr" rid="ref12">12</xref>]. In this original iteration of the framework, a subgrouping of some commonly associated factors was provided.</p>
      </sec>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Overview</title>
        <p>This was a mixed methods study. Opportunistic recruitment occurred during the Western Sydney LHD (WSLHD) Transition to Emergency Nursing and Clinical Initiative Nurse programs. Five workshops were conducted during these programs, each lasting approximately 20 minutes at the Westmead Hospital Simulated Learning Environment for Clinical Training simulation center. Data were collected through qualitative observation notes by researchers and a postuse survey was completed by participants, which were put into the context of analytic data obtained from the Code Black VR system.</p>
        <p>At the start of the study, participants were given an orientation that consisted of a brief overview of the study and instructions regarding the use of the Oculus Quest 2 headset. Following the orientation, the participants undertook multiple unguided playthroughs in VR with instructors (NM, MB, and JD) observing the group and answering any questions. Each individual playthrough lasted approximately 10 minutes. Upon completion, the participants completed a questionnaire based on the System Usability Scale (SUS) [<xref ref-type="bibr" rid="ref22">22</xref>] and had the opportunity to add open-ended comments at the end. The SUS was used for this study because it is a widely adopted tool for the evaluation of user interfaces with a focus on usability [<xref ref-type="bibr" rid="ref23">23</xref>]. The SUS consists of 10 distinct questions designed to evaluate the user interface. Each question was rated on a 5-point scale ranging from strongly agree to strongly disagree. The final calculation provides a score of 0 to 100 with a SUS score of more than 68 being deemed “above average” [<xref ref-type="bibr" rid="ref22">22</xref>]:</p>
        <list list-type="order">
          <list-item>
            <p>I think that I would like to use this system frequently.</p>
          </list-item>
          <list-item>
            <p>I found the system unnecessarily complex.</p>
          </list-item>
          <list-item>
            <p>I thought the system was easy to use.</p>
          </list-item>
          <list-item>
            <p>I think that I would need the support of a technical person to be able to use this system.</p>
          </list-item>
          <list-item>
            <p>I found the various functions of this system were well integrated.</p>
          </list-item>
          <list-item>
            <p>I thought there was too much inconsistency in this system.</p>
          </list-item>
          <list-item>
            <p>I would imagine that most people would learn to use this system very quickly.</p>
          </list-item>
          <list-item>
            <p>I found the system very cumbersome to use.</p>
          </list-item>
          <list-item>
            <p>I felt very confident using the system.</p>
          </list-item>
          <list-item>
            <p>I needed to learn a lot of things before I could get going with the system.</p>
          </list-item>
        </list>
        <p>The researchers present during the session also documented their reflections on the overall session after the workshops. The data obtained from the SUS were consolidated, and a final score was calculated to determine the overall usability of the system. A thematic analysis was conducted on participants’ open-ended comments to identify common themes and better understand the user experience of the tool. Researcher observations were summarized separately, offering insights into the identified themes. In addition, the back-end application records the success and failure of speech-to-text (STT) interpretations. These analytics were collated to provide objective data on in-app use.</p>
      </sec>
      <sec>
        <title>Code Black VR</title>
        <sec>
          <title>Design Rationale</title>
          <p>On the basis of prior experience with developing simulation-based educational experiences in both traditional and virtual settings, we identified that any solution to support this training needed to be flexible and must address the needs of both clinicians and educators alike. In our view, to be truly flexible, the VR solution had to run on portable and untethered HMDs. This allows “free range” deployment into any environment as there is no need for any additional external computers.</p>
          <p>To address the requirement of verbal de-escalation skill training, a realistic dialogue between the user and the virtual agent is vital. In collaboration with Frameless Interactive, we used microphone access within the applications for the Oculus Quest 2 HMD. Microphone access allows voice-to-speech transcription to occur within the VR application. Keyword analysis and categorization of the resulting text file enables the app linguistic program to produce text-based responses that are then converted by text-to-speech software using the Google text-to-speech application programming interface into credible verbal interaction with a virtual agent within the HMD. Using the 8 preidentified affordances is vital for VR design for clinician education, and this guided the initial prototype build of the Code Black VR verbal de-escalation trainer that was tested in this study.</p>
          <p>The application was created to work on the Oculus Quest 2 HMD [<xref ref-type="bibr" rid="ref24">24</xref>]. The portable nature, processing power, and microphone capability of the Quest 2 makes it an ideal choice for this application. The Code Black VR verbal de-escalation training application was built on the Unreal game engine [<xref ref-type="bibr" rid="ref25">25</xref>]. Unreal was used because it has high-level textures and editing capabilities and provides a finished product of a high visual standard. In addition, we were able to leverage developments and learnings from the ALS-SimVR [<xref ref-type="bibr" rid="ref11">11</xref>] app to improve the cost-effectiveness of the development process.</p>
        </sec>
        <sec>
          <title>Verbal De-escalation Trainer Walkthrough</title>
          <p>The interactive simulation positions the user in an emergency department (ED) waiting room in front of a visibly distressed digital conversational agent named Louis. The man identifies himself as the son of a patient who is awaiting review by a physician. The man is distressed by how long they have been waiting. The user who was an ED nurse, was instructed during the orientation to press the “Y” or “B” buttons on the Quest 2 controller using a push-to-talk approach to verbally respond to the patient. Following the user response, the agent responds in either a positive or negative fashion, depending on the input. Responses categorized as compassionate resulted in a decrease in the agent’s level of aggression; responses categorized as confrontational resulted in an increase in the agent’s level of aggression. The responses also drive an increase or decrease in the background “frustration level,” which controls the agent’s animations and subsequent responses. In the prototype build, the user can also see information about the progress of the scenario, such as the current “frustration level,” recorded STT results, and agent responses (<xref rid="figure1" ref-type="fig">Figure 1</xref>).</p>
          <p>Following a series of verbal interactions or a set period, the user will have either increased or decreased the agent’s frustration level, and the scenario will conclude depending on what the user said and how they responded. Following the end of the scenario, the user is presented with their performance data (<xref rid="figure2" ref-type="fig">Figure 2</xref>). These data are also recorded on the SimDash Learning Management System created by Frameless Interactive to support all of our VR-based applications. This learning management system allows for more in-depth feedback. The user can later log in to review their performance over time.</p>
          <fig id="figure1" position="float">
            <label>Figure 1</label>
            <caption>
              <p>Virtual conversational agent with status, frustration, response, and speech-to-text result.</p>
            </caption>
            <graphic xlink:href="games_v10i3e38669_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
          <fig id="figure2" position="float">
            <label>Figure 2</label>
            <caption>
              <p>User feedback after the scenario.</p>
            </caption>
            <graphic xlink:href="games_v10i3e38669_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
          </fig>
        </sec>
      </sec>
      <sec>
        <title>Overview of Code Black VR: Verbal De-escalation Training Application Architecture</title>
        <p>The application places the user in a medical setting with proprietary emotional, conversational artificial intelligence (AI) called DriftAI, which drives an agent’s animation and voice. At the start of the scenario, the “frustration level” of the AI, as represented by the digital agent, approaches an angry state, and the user must attempt to de-escalate the agent. The emotional state of the agent shifts depending on what the user says. The AI captures the user’s speech input and analyzes the content of the conversation to determine the user’s intent. It then adjusts the mood of the agent and selects an appropriate response based on the agent’s emotional state for that intent (<xref rid="figure3" ref-type="fig">Figure 3</xref>).</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Artificial intelligence (AI) conversation flow.</p>
          </caption>
          <graphic xlink:href="games_v10i3e38669_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
        <p>The AI response is generated in several ways, from an audio source with associated facial motion capture, AI-generated voice with lip sync, or only text. This choice depends on whether the generated response is already “known” by the system. The dialogue tree is structured in a “sandbox” format. This provides the advantage of allowing any intent to be matched at any time, reducing the need for a large number of predetermined intents to be in place. The scenario runs on a strict input and response system. The story will only advance each time the user inputs something and the agent responds to it. This is repeated until an end point is reached, such as the agent is too angry or the user has successfully de-escalated the agent.</p>
      </sec>
      <sec>
        <title>Ethics Approval</title>
        <p>This study was approved by the WSLHD Human Research Ethics Committee (2019/ETH00598). All the participants provided written informed consent.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Overview</title>
        <p>In total, 28 (19 female and 9 male) participants involved in the WSLHD Clinical Initiative Nurse program from 4 local hospitals participated in this study. Some participants were clinicians and educators (n=6) involved in the delivery of the program while others were junior to intermediate clinicians who were course participants (n=22). All participants had varied levels of experience working in LHD EDs, and the prerequisite of participation in the Clinical Initiative Nurse or Transition to Emergency Nursing Programs is some level of ED experience and exposure to aggression.</p>
      </sec>
      <sec>
        <title>SUS Scoring</title>
        <p>All collected SUS data were analyzed according to the SUS scoring procedure of adding all odd-numbered questions and subtracting 5 to obtain X, adding all even and subtracting the total from 25 to obtain Y, then adding X by Y and multiplying by 2.5 [<xref ref-type="bibr" rid="ref22">22</xref>]. Where there was a missing response, the average of the other responses was used in the calculations. Clinicians and education groups were reported separately to aid in identifying any differences in perspective regarding perceived usability. The scores are listed in <xref ref-type="table" rid="table1">Table 1</xref>.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>System Usability Scale (SUS) scoring of the participants (N=28).</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="180"/>
            <col width="70"/>
            <col width="70"/>
            <col width="70"/>
            <col width="70"/>
            <col width="70"/>
            <col width="70"/>
            <col width="70"/>
            <col width="70"/>
            <col width="70"/>
            <col width="70"/>
            <col width="120"/>
            <thead>
              <tr valign="bottom">
                <td>Participant</td>
                <td>Q<sup>a</sup>1</td>
                <td>Q2</td>
                <td>Q3</td>
                <td>Q4</td>
                <td>Q5</td>
                <td>Q6</td>
                <td>Q7</td>
                <td>Q8</td>
                <td>Q9</td>
                <td>Q10</td>
                <td>SUS (×2.5)<sup>b</sup></td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td>Clinician 1</td>
                <td>2</td>
                <td>4</td>
                <td>2</td>
                <td>4</td>
                <td>1</td>
                <td>0</td>
                <td>2</td>
                <td>4</td>
                <td>2</td>
                <td>4</td>
                <td>62.5</td>
              </tr>
              <tr valign="top">
                <td>Clinician 2</td>
                <td>2</td>
                <td>1</td>
                <td>2</td>
                <td>2</td>
                <td>1</td>
                <td>2</td>
                <td>2</td>
                <td>2</td>
                <td>1</td>
                <td>2</td>
                <td>42.5</td>
              </tr>
              <tr valign="top">
                <td>Clinician 3</td>
                <td>3</td>
                <td>4</td>
                <td>3</td>
                <td>4</td>
                <td>3</td>
                <td>4</td>
                <td>3</td>
                <td>1</td>
                <td>3</td>
                <td>4</td>
                <td>42.5</td>
              </tr>
              <tr valign="top">
                <td>Clinician 4</td>
                <td>2</td>
                <td>4</td>
                <td>4</td>
                <td>1</td>
                <td>4</td>
                <td>2</td>
                <td>4</td>
                <td>4</td>
                <td>4</td>
                <td>4</td>
                <td>82.5</td>
              </tr>
              <tr valign="top">
                <td>Clinician 5</td>
                <td>3</td>
                <td>4</td>
                <td>4</td>
                <td>1</td>
                <td>3</td>
                <td>1</td>
                <td>3</td>
                <td>1</td>
                <td>3</td>
                <td>3</td>
                <td>65</td>
              </tr>
              <tr valign="top">
                <td>Clinician 6</td>
                <td>2</td>
                <td>3</td>
                <td>2</td>
                <td>1</td>
                <td>1</td>
                <td>0</td>
                <td>1</td>
                <td>2</td>
                <td>1</td>
                <td>3</td>
                <td>40</td>
              </tr>
              <tr valign="top">
                <td>Clinician 7</td>
                <td>2</td>
                <td>2</td>
                <td>3</td>
                <td>1</td>
                <td>2</td>
                <td>3</td>
                <td>3</td>
                <td>1</td>
                <td>3</td>
                <td>3</td>
                <td>57.5</td>
              </tr>
              <tr valign="top">
                <td>Clinician 8</td>
                <td>3</td>
                <td>4</td>
                <td>4</td>
                <td>1</td>
                <td>2</td>
                <td>2</td>
                <td>4</td>
                <td>1</td>
                <td>2</td>
                <td>3</td>
                <td>65</td>
              </tr>
              <tr valign="top">
                <td>Clinician 9</td>
                <td>3</td>
                <td>4</td>
                <td>0</td>
                <td>4</td>
                <td>3</td>
                <td>3</td>
                <td>4</td>
                <td>4</td>
                <td>3</td>
                <td>4</td>
                <td>80</td>
              </tr>
              <tr valign="top">
                <td>Clinician 10</td>
                <td>3</td>
                <td>3</td>
                <td>2</td>
                <td>4</td>
                <td>4</td>
                <td>2</td>
                <td>3</td>
                <td>2</td>
                <td>1</td>
                <td>2</td>
                <td>65</td>
              </tr>
              <tr valign="top">
                <td>Clinician 11</td>
                <td>2</td>
                <td>2</td>
                <td>1</td>
                <td>0</td>
                <td>3</td>
                <td>DNC<sup>c</sup></td>
                <td>3</td>
                <td>1</td>
                <td>1</td>
                <td>1</td>
                <td>67<sup>d</sup></td>
              </tr>
              <tr valign="top">
                <td>Clinician 12</td>
                <td>3</td>
                <td>1</td>
                <td>3</td>
                <td>1</td>
                <td>3</td>
                <td>3</td>
                <td>3</td>
                <td>2</td>
                <td>1</td>
                <td>1</td>
                <td>52.5</td>
              </tr>
              <tr valign="top">
                <td>Clinician 13</td>
                <td>1</td>
                <td>4</td>
                <td>DNC</td>
                <td>4</td>
                <td>2</td>
                <td>3</td>
                <td>3</td>
                <td>3</td>
                <td>2</td>
                <td>4</td>
                <td>72<sup>d</sup></td>
              </tr>
              <tr valign="top">
                <td>Clinician 14</td>
                <td>4</td>
                <td>4</td>
                <td>4</td>
                <td>0</td>
                <td>3</td>
                <td>3</td>
                <td>4</td>
                <td>0</td>
                <td>4</td>
                <td>4</td>
                <td>75</td>
              </tr>
              <tr valign="top">
                <td>Clinician 15</td>
                <td>4</td>
                <td>1</td>
                <td>3</td>
                <td>0</td>
                <td>2</td>
                <td>2</td>
                <td>4</td>
                <td>3</td>
                <td>2</td>
                <td>2</td>
                <td>57.5</td>
              </tr>
              <tr valign="top">
                <td>Clinician 16</td>
                <td>3</td>
                <td>3</td>
                <td>4</td>
                <td>1</td>
                <td>2</td>
                <td>3</td>
                <td>4</td>
                <td>3</td>
                <td>2</td>
                <td>3</td>
                <td>65</td>
              </tr>
              <tr valign="top">
                <td>Clinician 17</td>
                <td>3</td>
                <td>2</td>
                <td>3</td>
                <td>3</td>
                <td>2</td>
                <td>1</td>
                <td>3</td>
                <td>4</td>
                <td>4</td>
                <td>3</td>
                <td>70</td>
              </tr>
              <tr valign="top">
                <td>Clinician 18</td>
                <td>3</td>
                <td>4</td>
                <td>3</td>
                <td>3</td>
                <td>2</td>
                <td>2</td>
                <td>3</td>
                <td>3</td>
                <td>3</td>
                <td>3</td>
                <td>72.5</td>
              </tr>
              <tr valign="top">
                <td>Clinician 19</td>
                <td>3</td>
                <td>3</td>
                <td>3</td>
                <td>2</td>
                <td>4</td>
                <td>1</td>
                <td>4</td>
                <td>3</td>
                <td>4</td>
                <td>3</td>
                <td>75</td>
              </tr>
              <tr valign="top">
                <td>Clinician 20</td>
                <td>4</td>
                <td>2</td>
                <td>3</td>
                <td>0</td>
                <td>4</td>
                <td>2</td>
                <td>4</td>
                <td>1</td>
                <td>4</td>
                <td>2</td>
                <td>65</td>
              </tr>
              <tr valign="top">
                <td>Clinician 21</td>
                <td>3</td>
                <td>3</td>
                <td>2</td>
                <td>0</td>
                <td>3</td>
                <td>2</td>
                <td>2</td>
                <td>1</td>
                <td>2</td>
                <td>1</td>
                <td>47.5</td>
              </tr>
              <tr valign="top">
                <td>Clinician 22</td>
                <td>2</td>
                <td>1</td>
                <td>2</td>
                <td>2</td>
                <td>2</td>
                <td>2</td>
                <td>3</td>
                <td>2</td>
                <td>1</td>
                <td>1</td>
                <td>45</td>
              </tr>
              <tr valign="top">
                <td>Education 1</td>
                <td>2</td>
                <td>3</td>
                <td>2</td>
                <td>0</td>
                <td>1</td>
                <td>2</td>
                <td>2</td>
                <td>2</td>
                <td>1</td>
                <td>1</td>
                <td>40</td>
              </tr>
              <tr valign="top">
                <td>Education 2</td>
                <td>1</td>
                <td>3</td>
                <td>2</td>
                <td>2</td>
                <td>1</td>
                <td>1</td>
                <td>2</td>
                <td>3</td>
                <td>1</td>
                <td>3</td>
                <td>47.5</td>
              </tr>
              <tr valign="top">
                <td>Education 3</td>
                <td>1</td>
                <td>3</td>
                <td>3</td>
                <td>2</td>
                <td>2</td>
                <td>3</td>
                <td>3</td>
                <td>3</td>
                <td>2</td>
                <td>4</td>
                <td>65</td>
              </tr>
              <tr valign="top">
                <td>Education 4</td>
                <td>1</td>
                <td>3</td>
                <td>1</td>
                <td>2</td>
                <td>2</td>
                <td>0</td>
                <td>1</td>
                <td>3</td>
                <td>DNC</td>
                <td>3</td>
                <td>44<sup>d</sup></td>
              </tr>
              <tr valign="top">
                <td>Education 5</td>
                <td>3</td>
                <td>3</td>
                <td>3</td>
                <td>0</td>
                <td>2</td>
                <td>3</td>
                <td>3</td>
                <td>4</td>
                <td>3</td>
                <td>2</td>
                <td>65</td>
              </tr>
              <tr valign="top">
                <td>Education 6</td>
                <td>3</td>
                <td>2</td>
                <td>2</td>
                <td>1</td>
                <td>2</td>
                <td>1</td>
                <td>3</td>
                <td>2</td>
                <td>1</td>
                <td>0</td>
                <td>42.5</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>Q: question.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>Average usability: 59.5.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>DNC: did not complete.</p>
            </fn>
            <fn id="table1fn4">
              <p><sup>d</sup>Average of results used to calculate total.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Thematic Analysis</title>
        <p>Three researchers (NM, MB, and JD; all experienced in educational VR development) performed a thematic analysis [<xref ref-type="bibr" rid="ref26">26</xref>]. Owing to the clinical skill overlap between the clinical and educational participants, we decided to conduct the thematic analysis on the combined data. After coding, the comments were grouped based on the factors defined in the design framework reported in our previous study [<xref ref-type="bibr" rid="ref12">12</xref>]. The factors identified in the previous study helped inform the development of this application and as such were relevant to the analysis of the newly gathered data [<xref ref-type="bibr" rid="ref12">12</xref>]. The data were then reviewed using an inductive (bottom-up) approach following the process of reflexive thematic analysis (Braun and Clarke [<xref ref-type="bibr" rid="ref26">26</xref>]) to identify any potential codes and themes that may fall outside the initial design framework.</p>
        <p>This process helped determine whether design considerations made more broadly for VR clinical training applications correlate with participant experiences in Code Black VR given that this app uses verbal interactions with a conversational agent. Participants provided insights and comments that specifically corresponded to the 8 factors defined in our framework. However, we also identified 3 other factors that were salient in relation to Code Black VR specifically, namely motion sickness, perceived value, and privacy. For clarity, we have removed the subcategory of affordances. All factors and exemplar descriptions are summarized in <xref ref-type="boxed-text" rid="box1">Textbox 1</xref>.</p>
        <boxed-text id="box1" position="float">
          <title>The revised Clinical Training Through Virtual Reality Design Framework with 11 factors guiding the experience design and exemplar statements.</title>
          <p>
            <bold>Factors and exemplar statements</bold>
          </p>
          <list list-type="bullet">
            <list-item>
              <p>Advanced roles</p>
              <list>
                <list-item>
                  <p>The ability to manage tasks at an acceptable standard</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Accessibility</p>
              <list>
                <list-item>
                  <p>Clarity as to how commands are given and accessed</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Agency</p>
              <list>
                <list-item>
                  <p>The environment providing opportunity to control workflows autonomously and make choices that align with prior experiences, such as multitasking</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Completion</p>
              <list>
                <list-item>
                  <p>Clear commencement and completion prompt to task</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Diverse input modalities</p>
              <list>
                <list-item>
                  <p>The environment replicates natural input modalities such as issuing commands verbally</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Mental models</p>
              <list>
                <list-item>
                  <p>The environment design and prompts align with how the clinical environment operates (eg, 2 minutes of cardiopulmonary resuscitation completion)</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Motion sickness</p>
              <list>
                <list-item>
                  <p>That all efforts are made to reduce the experience of motion sickness for the user so they can engage with the experience</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Perceived value</p>
              <list>
                <list-item>
                  <p>The application provides an experience perceived as valuable by the user</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Privacy</p>
              <list>
                <list-item>
                  <p>The application and deployment experience should maintain the user’s privacy</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Realistic tasks</p>
              <list>
                <list-item>
                  <p>Common clinical tasks should be available for completion in a realistic manner</p>
                </list-item>
              </list>
            </list-item>
            <list-item>
              <p>Visibility</p>
              <list>
                <list-item>
                  <p>Clear visible assets aligned with environmental orientation</p>
                </list-item>
              </list>
            </list-item>
          </list>
        </boxed-text>
      </sec>
      <sec>
        <title>Factors</title>
        <sec>
          <title>Advanced Roles</title>
          <p>Many participants, particularly from the educator group, identified limitations in the way the application handled some of the “higher order” or “advanced skills” they would bring to a de-escalation situation. These included considerations of physical proximity, long-form verbal responses (which were often not interpreted accurately by the application), and the ability to bring the agent’s father into the discussion.</p>
        </sec>
        <sec>
          <title>Accessibility</title>
          <p>Participants highlighted the need for a more comprehensive orientation as to “what to do” and that it was “a bit overwhelming for a first timer.” One participant provided feedback, “I would need more education on how to work it.” Participants expressed preferences for the verbal interaction approach, but the “push-to-talk” method of interacting seemed to be counterintuitive or was forgotten by some participants during scenario playthroughs.</p>
        </sec>
        <sec>
          <title>Agency</title>
          <p>Participants wanted the ability to control and perform the required actions based on their personal experience. In the context of verbal de-escalation in health care, this poses a technological challenge. The often-lengthy verbal responses provided by clinicians were challenging for the AI to understand. This resulted in situations where participants stated, “the AI didn’t respond appropriately to a lot of my responses.” This often resulted in both a failure to detect what was captured accurately and interpret it correctly.</p>
        </sec>
        <sec>
          <title>Completion</title>
          <p>Lessons learned from our previous work in VR, which at times was unclear when the given experience was completed, informed the researchers to ensure a clear distinction for the participants as to when the verbal de-escalation scenario had finished. At the completion of the scenario, the participants were presented with feedback on their performance. They were then given the opportunity to restart the scenario and exit the application.</p>
        </sec>
        <sec>
          <title>Diverse Input Modalities</title>
          <p>Given the nature of verbal de-escalation skills requiring verbal interactions, participants found verbal interactions with the agent to be an important aspect of the application. Challenges were noted at times with inaccurate interpretations of what the participants had said by the application. Other input modalities, such as reducing proximity to the patient as a de-escalation tool, were also highlighted by the participants.</p>
        </sec>
        <sec>
          <title>Mental Models</title>
          <p>Several participants, particularly from the educator group, commented that the approach they were required to take for success within the application did not align with the approach they would take in the clinical environment. As noted in the <italic>Advanced Roles</italic> section, clinicians will use a variety of strategies to de-escalate a situation. The requests for response to “proximity” and being able to “utilise the dad to de-escalate the situation,” all spoke to the need for greater representation of participants’ mental models, which can be addressed in future iterations of the application.</p>
        </sec>
        <sec>
          <title>Motion Sickness</title>
          <p>Of the 28 participants, 3 (10%) either had their experience cut short or impacted due to motion sickness. One responder stated, “made me feel dizzy and I could not continue,” and another stated, “I felt uneasy standing.”</p>
        </sec>
        <sec>
          <title>Perceived Value</title>
          <p>As adult learners, for clinicians to engage with any educational approach, they must perceive the experience to have value and provide motivation to learn [<xref ref-type="bibr" rid="ref27">27</xref>]. The perceived value of the verbal de-escalator experienced was mixed within the group, from positive sentiments, “I think it is a very clever concept and could definitely help to improve aggression in a waiting room and to learn how to de-escalate the situation before it gets worse” and “I think and can see a system like this could help train staff with de-escalation,” to those who questioned its value over simulation or its value for money, “it seems like a very expensive educational tool.”</p>
        </sec>
        <sec>
          <title>Privacy</title>
          <p>Some participants expressed concerns around “privacy.” For some participants, being aware of the presence of colleagues around them participating in the same experience was confronting and challenging at times. One participant reported, “I freaked out not knowing who was near me and where,” and another stated, “It felt very disconcerting having other people in the room I could hear but not see.”</p>
        </sec>
        <sec>
          <title>Realistic Tasks</title>
          <p>The participants in our study identified a clear need to be able to complete verbal de-escalation in the virtual environment in a manner that replicated the clinical environment. These include moving further from or closer to the patient and using broad and complex language constructs. In addition, the use of emotional and facial cues from the agent were requested as they are key skills used in de-escalation.</p>
        </sec>
        <sec>
          <title>Visibility</title>
          <p>Participants highlighted, on multiple occasions, the need for increased visibility and realism of the agent they were de-escalating and the surrounding environments. For example, the facial expressions of the agent did not adequately reflect participants’ expectations of a verbally aggressive person. One clinician stated, “his expression and body language didn’t change.” This is largely a technological and budgetary issue.</p>
        </sec>
      </sec>
      <sec>
        <title>Observations</title>
        <p>Observations made by the research team highlighted several barriers that inhibited the participants’ ability to reach the desired objectives within the application.</p>
        <p>Despite initial concerns about the learning curve required to access the application, based on previous VR deployments, we noted that the participants seemed to orientate to the interaction within a few minutes and engage quite quickly and naturally with the virtual agent. We suggest that this was due to the simplicity of the design; that is, there were only 2 buttons on the controller required to engage with the application (front trigger for selection and the top Y or B button for push to talk) and then interact verbally, which is an inherent skill of all participants.</p>
        <p>We noted comments from the participants requesting training in the skills required for de-escalation before using the application. We plan to implement a training mode within the application itself that would be completed before experiencing the simulation scenario. We initially assumed that participants would feel adequately prepared to respond to the situation in this setting. However, our observations have further reinforced the need to provide the opportunity for both tutorials on how to use the application and for training for the specific skills to be used before the virtual simulation within the application for use, if required. This lack of appropriate orientation affected the accessibility of the application.</p>
        <p>As this is a beta pilot version, the application presents the participant with minimal preparation for the scenario. However, this setting represents a common and realistic situation faced by clinicians working in the ED. Nevertheless, it was clear that this was an issue for some participants who were seeking further information, such as the agent’s father’s condition or more background information normally available in the medical records.</p>
      </sec>
      <sec>
        <title>Analytics Data</title>
        <p>A total of 79 scenarios were captured and uploaded to the database during the workshops. In some instances, some scenarios may not have been captured because of participant error or early disconnection. In the 79 scenarios, a total of 416 “interactions” with the AI occurred. Of these 416 interactions, 96 had an empty STT input, which indicates nonrecognition of what the participant had said. This may have occurred because of a misalignment between when the participant pressed the button to start recording and when they started verbally responding to the agent.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <sec>
        <title>Principal Findings</title>
        <p>The results of this study provide novel insights into the potential benefits of using verbal interactions with a VR-based conversational agent to supplement traditional verbal de-escalation training. It has been shown that immersive high-fidelity simulation can be beneficial over participant-to-participant role play due to a higher level of authenticity presented to enable the learning of verbal skills [<xref ref-type="bibr" rid="ref28">28</xref>]. However, to date, there is little evidence of how such simulations can be translated into virtual settings with an elevated level of accuracy. This is principally due to the technology still being in relative infancy as well as the core functionality of speech recognition having identified challenges for more specialized use [<xref ref-type="bibr" rid="ref29">29</xref>].</p>
        <p>We conducted usability testing with feedback collated from the SUS form and free-text comments, as well as detailed observations taken throughout the study. The findings indicated the feasibility of the 8 factors identified to be key to the experience of users in VR training applications [<xref ref-type="bibr" rid="ref12">12</xref>]. This study can provide a useful design direction for Code Black VR and future VR applications for clinical training. The feedback also highlighted several factors that impacted the experience, specifically within the Code Black VR, which were not part of the existing framework, motion sickness, perceived value, and privacy. We believe that these findings were identified because new factors included in this study were not observed in our previous work. The larger number of participants increased the likelihood of having participants who experienced certain elements of motion sickness, which is a well-documented side effect for some users [<xref ref-type="bibr" rid="ref30">30</xref>]. We believe the “perceived value” factor arose in this study as it speaks to the highly specialized context of this application. Verbal de-escalation skills are perceived as a vital skill by and for clinicians; however, there is no real standardized way in which the content is delivered. Traditionally, these are skills established over years of service with significant complexity and nuance in their execution [<xref ref-type="bibr" rid="ref3">3</xref>]. The factor of privacy was an unexpected but valuable outcome from this study. In previous studies, authors had conducted VR-based trials with small groups or individual interventions. The nature of this study and the logistics of completing it during the available time slots meant that larger groups were undertaking the experiences in closer proximity. This resulted in an innate awareness of the proximity of others and highlighted the need for this to be considered in future deployment strategies.</p>
        <p>The average usability score of 59.5 placed Code Black VR’s usability into the “poor” ranking on the usability scale. An SUS score below 68 indicates issues with design that require research and resolution [<xref ref-type="bibr" rid="ref23">23</xref>]. This was not surprising, as the aim of this study was to test the feasibility of a newly developed prototype with significant scope for growth. The SUS tool helped the team identify areas of improvement to focus on in the next phase. One example was the need for improved orientation to the application; for example, through an interactive tutorial. We had acknowledged this need previously but underestimated its importance, given the perceived “natural verbal interactions” that the application was incorporating. This provides useful insight for future development and research in this area.</p>
        <p>One surprising observation we made was the difference in perceived usability between the educator group (average score 50.6) and the clinician group (average score 61.6; <xref ref-type="table" rid="table1">Table 1</xref>). We suggest that this disparity arises from the different frames of reference of the more skilled and experienced clinicians and the perceived requirements of experience to train high-level verbal de-escalation skills. This was compared with the understanding of the requirements of the more junior group of clinicians in the study group. This finding aligns with our targeting of the application toward novice or intermediate entrants to the profession to develop basic levels of skills. The ultimate outcome is for high-level dialogue with a virtual agent; however, this requires significant further research and development of the AI system to reach that level of maturity, which is not achievable in the short term for these projects. Understanding the differences between the experience of the 2 groups provides a useful distinction for future studies on VR apps for clinical training and one that has not been addressed in the literature.</p>
        <p>The analytics data allowed us to determine the number of completed scenarios and interactions and confirmed that the technology was functional. The empty STT inputs further reinforced the need to revise not only the way the user is orientated to the experience but also the way the interactions are captured within the device. When clinicians were engaged in dialogue, we witnessed several occasions where the clinicians “forgot” to use the push-to-talk function before responding to the agent. We are currently exploring the use of continual recording of user responses to minimize these dropouts; however, this brings different challenges, such as unintended responses, background noise, and misalignment with overlap occurring between the user and agent response cycles.</p>
      </sec>
      <sec>
        <title>Challenges and Opportunities in Verbal De-escalation Training in VR</title>
        <p>The thematic analysis of the participant responses highlights potential opportunities and limitations for using a combination of VR and conversational agents to address the challenges of developing verbal de-escalation skills. We predict a rapid growth in the application of VR to supplement clinical education in the future, as evidenced by the number of innovations occurring in our LHD. Despite our previous work identifying “user needs” in the design of VR-based applications [<xref ref-type="bibr" rid="ref11">11</xref>,<xref ref-type="bibr" rid="ref12">12</xref>], we identified several challenges in applying our knowledge in the context of the Code Black VR application, because this application features a conversational agent. The complexities of human language with variations in sentence structure, accents, and training are challenging to accommodate in an AI setting [<xref ref-type="bibr" rid="ref31">31</xref>]. Therefore, while we are attempting to enhance user agency and allowing for advanced roles to align with the clinician’s mental models, the technical limitations faced by the current build undermine both usability and user experience. We address some of the ways in which these factors could be addressed in the <italic>Code Black VR—Verbal De-escalation Skills Trainer: Next Steps</italic> section.</p>
      </sec>
      <sec>
        <title>Code Black VR—Verbal De-escalation Skills Trainer: Next Steps</title>
        <p>This study identified several areas where Code Black VR can be improved to enhance the usability and user experience. The improvements and next steps are addressed through language recognition and generation, supplemental education, intra-application efficiencies, and broader application of code content.</p>
      </sec>
      <sec>
        <title>Language Recognition and Generation</title>
        <p>We note that a next step will involve identifying how to interpret, more accurately, the long-form answers that clinicians were providing within their dialogue. This could be achieved by improving the conversation structure by using a staged diatribe approach. This approach will connect user intents together, tagging intents within contextual sections and bringing a more structured approach to the dialogue. This will allow for greater context and improved accuracy in the response system. This approach also coincides with the implementation of a keyword search through the multiple intents to further increase interpretation accuracy.</p>
        <p>We also suggest implementing a process in which the user response recording is always being recorded and a few seconds of recording spoken before the interaction button press is considered in the submission to the AI. This implementation will assist the experience in 2 important ways. First, it provides a solution for late button presses when the user responds before pressing the interaction button. Second, the DriftAI system will interpret what the user is saying and compare this to previous responses to increase the accuracy of the STT interpretation and subsequent response.</p>
        <p>Another next step will be to improve the way DriftAI handles repetition. Currently, the same response was observed to be triggered multiple times in a row. Even with broader dialogue options, this occurrence is still a possibility. The developers are working on a way to reduce the repetition by tagging some responses as “unrepeatable” to ensure it is only matched once.</p>
        <p>Following the completion of the study reported in this paper, we have also begun trialing a more sophisticated “sandbox” mode to provide an even larger training library to the DriftAI system. This allows for a more realistic and open dialogue with the agent; however, more development is required to control the AI, so it is aware of the context and stays “on track” to meet the desired learning outcomes.</p>
      </sec>
      <sec>
        <title>Supplemental Education</title>
        <p>The simulator, in its current state, requires clinicians to understand the basic verbal de-escalation skills to be successful. As such, we suggest implementing a module for novices to train them in these skills before using the simulator elements.</p>
      </sec>
      <sec>
        <title>Intra-Application Efficiencies</title>
        <p>In conjunction with the verbal de-escalation trainer, we are also deploying several other VR-based applications to support code black training for clinicians. These applications use other VR-based methodologies, such as 360° video of immersive clinical scenarios, auto stitching of the different views to allow the randomization of the 360° videos, and interactive hot spots to use newly adopted observation charts. The objectives of these applications are to support other vital code black skills, such as situational awareness, team planning, the detection of early signs of escalation, role allocation, and observations using the Behaviours of Concern observation chart. The goal is to embed all these modules within a single interconnected training package to better support the needs of the target clinicians. When this occurs, further research of the educational outcomes and effectiveness of the complete package will be conducted.</p>
      </sec>
      <sec>
        <title>Broader Application of Core Content</title>
        <p>We identified broader use cases for the underlying DriftAI technology that enables verbal interactions to occur. We will strive to use the findings of this study to adapt this verbal interaction approach to other clinical training situations using VR, flat-screen television, and mobile deployments.</p>
      </sec>
      <sec>
        <title>Limitations</title>
        <p>The breadth of the participant group in our study was indicative of emergency staff experience and skill sets in our LHD. However, the Code Black VR app is planned to be used in future iterations outside the ED setting. As such, further testing with users not from an ED background will be essential for further development and deployment.</p>
        <p>In addition, the build of the application tested was still in the preliminary stages of development. As such, there were limitations on the available responses of the agent and the ability of the STT system to interpret some of the longer responses that clinicians provided to the agent. It was an intentional decision to conduct the study on such an early build of the application to explore the feasibility of the approach, but future studies on the developed application will be required to better understand the user experience and potential educational outcomes.</p>
        <p>Another limitation of this study was in the method of limiting data collection to written forms and observations. A greater depth of understanding would likely have been established with the inclusion of user interviews during the data collection phase. The reason this was not included is related to the availability of participants who were enrolled in the study on the condition that the study be accommodated into part of their training program, which did not leave adequate time for the interview. We plan to implement user interviews in later studies on further developed iterations of the application; it was unfortunately not feasible for this study.</p>
        <p>We acknowledge that the novel nature of VR can result in the “novelty effect” [<xref ref-type="bibr" rid="ref32">32</xref>], which could account for some of the findings. In addition, further research and comparative studies should be conducted to explore the sustained adoption and knowledge retention of such novel approaches.</p>
      </sec>
      <sec>
        <title>Conclusions</title>
        <p>The implementation of effective verbal de-escalation training is essential to ensure the ongoing safety of both patients and health care providers. Simulation and face-to-face training are established approaches to deliver this type of training and increase staff confidence in these confronting situations. The challenge with the traditional approach of face-to-face simulation training is that it is asynchronous and resource intensive. The use of VR to supplement the existing training approaches is emerging as a feasible option. Our novel approach to using verbal interactions within VR was well received by clinicians. Our proposed framework with 11 factors could provide a much-needed direction for additional training assets to support clinicians faced with these challenging situations. The findings from this research contribute to the feasibility of our framework to support future research and the development of VR for clinical training. We have also contributed a set of requirements to guide the design of verbal interactions in the VR de-escalation training environment.</p>
      </sec>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">AI</term>
          <def>
            <p>artificial Intelligence</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">ALS</term>
          <def>
            <p>advanced life support</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">ED</term>
          <def>
            <p>emergency department</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">HMD</term>
          <def>
            <p>head-mounted display</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">LHD</term>
          <def>
            <p>Local Health District</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">STT</term>
          <def>
            <p>speech to text</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">SUS</term>
          <def>
            <p>System Usability Scale</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">VR</term>
          <def>
            <p>virtual reality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">WSLHD</term>
          <def>
            <p>Western Sydney Local Health District</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Li</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Qiu</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Xiao</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Prevalence of workplace physical violence against health care professionals by patients and visitors: a systematic review and meta-analysis</article-title>
          <source>Int J Environ Res Public Health</source>
          <year>2020</year>
          <month>01</month>
          <day>01</day>
          <volume>17</volume>
          <issue>1</issue>
          <fpage>299</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.mdpi.com/resolver?pii=ijerph17010299"/>
          </comment>
          <pub-id pub-id-type="doi">10.3390/ijerph17010299</pub-id>
          <pub-id pub-id-type="medline">31906306</pub-id>
          <pub-id pub-id-type="pii">ijerph17010299</pub-id>
          <pub-id pub-id-type="pmcid">PMC6982349</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McKnight</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <source>De-Escalating Violence in Healthcare</source>
          <year>2019</year>
          <publisher-loc>Indianapolis, IN</publisher-loc>
          <publisher-name>Sigma Theta Tau Intl</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davids</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Murphy</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Wand</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Exploring staff experiences: a case for redesigning the response to aggression and violence in the emergency department</article-title>
          <source>Int Emerg Nurs</source>
          <year>2021</year>
          <month>07</month>
          <volume>57</volume>
          <fpage>101017</fpage>
          <pub-id pub-id-type="doi">10.1016/j.ienj.2021.101017</pub-id>
          <pub-id pub-id-type="medline">34174545</pub-id>
          <pub-id pub-id-type="pii">S1755-599X(21)00055-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Adams</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Assessing the effectiveness of clinical education to reduce the frequency and recurrence of workplace violence</article-title>
          <source>Aus J Advanced Nursing</source>
          <year>2017</year>
          <volume>34</volume>
          <issue>3</issue>
          <fpage>6</fpage>
          <lpage>15</lpage>
          <pub-id pub-id-type="doi">10.1097/01.nurse.0000282706.14770.f7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Morphet</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Griffiths</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Beattie</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Velasquez Reyes</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Innes</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>Prevention and management of occupational violence and aggression in healthcare: a scoping review</article-title>
          <source>Collegian</source>
          <year>2018</year>
          <month>12</month>
          <volume>25</volume>
          <issue>6</issue>
          <fpage>621</fpage>
          <lpage>32</lpage>
          <pub-id pub-id-type="doi">10.1016/j.colegn.2018.04.003</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Aebersold</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Simulation-based learning: no longer a novelty in undergraduate education</article-title>
          <source>Online J Issues Nurs</source>
          <year>2018</year>
          <month>04</month>
          <day>03</day>
          <volume>23</volume>
          <issue>2</issue>
          <pub-id pub-id-type="doi">10.3912/ojin.vol23no02ppt39</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bingham</surname>
              <given-names>AL</given-names>
            </name>
            <name name-style="western">
              <surname>Sen</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Finn</surname>
              <given-names>LA</given-names>
            </name>
            <name name-style="western">
              <surname>Cawley</surname>
              <given-names>MJ</given-names>
            </name>
          </person-group>
          <article-title>Retention of advanced cardiac life support knowledge and skills following high-fidelity mannequin simulation training</article-title>
          <source>Am J Pharm Educ</source>
          <year>2015</year>
          <month>03</month>
          <day>17</day>
          <volume>79</volume>
          <issue>1</issue>
          <fpage>12</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/25741028"/>
          </comment>
          <pub-id pub-id-type="doi">10.5688/ajpe79112</pub-id>
          <pub-id pub-id-type="medline">25741028</pub-id>
          <pub-id pub-id-type="pii">ajpe12</pub-id>
          <pub-id pub-id-type="pmcid">PMC4346824</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="web">
          <article-title>E‐learning for health professionals</article-title>
          <source>Cochrane Database of Systematic Reviews</source>
          <year>2018</year>
          <access-date>2022-05-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.cochranelibrary.com/cdsr/doi/10.1002/14651858.CD011736.pub2/full">https://www.cochranelibrary.com/cdsr/doi/10.1002/14651858.CD011736.pub2/full</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Barteit</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lanfermann</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Bärnighausen</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Neuhann</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Beiersmann</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Augmented, mixed, and virtual reality-based head-mounted devices for medical education: systematic review</article-title>
          <source>JMIR Serious Games</source>
          <year>2021</year>
          <month>07</month>
          <day>08</day>
          <volume>9</volume>
          <issue>3</issue>
          <fpage>e29080</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://games.jmir.org/2021/3/e29080/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/29080</pub-id>
          <pub-id pub-id-type="medline">34255668</pub-id>
          <pub-id pub-id-type="pii">v9i3e29080</pub-id>
          <pub-id pub-id-type="pmcid">PMC8299342</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="web">
          <article-title>Behind Angry Stan, the virtual reality simulator training nursing students to deal with conflict</article-title>
          <source>ANMJ</source>
          <access-date>2022-05-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://anmj.org.au/behind-angry-stan-the-virtual-reality-simulator-training-nursing-students-to-deal-with-conflict/">https://anmj.org.au/behind-angry-stan-the-virtual-reality-simulator-training-nursing-students-to-deal-with-conflict/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yoo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmadpour</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Tommy</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Poronnik</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>ALS-SimVR: advanced life support virtual reality training application</article-title>
          <source>Proceedings of the 25th ACM Symposium on Virtual Reality Software and Technology</source>
          <year>2019</year>
          <conf-name>VRST '19: 25th ACM Symposium on Virtual Reality Software and Technology</conf-name>
          <conf-date>Nov 12 - 15, 2019</conf-date>
          <conf-loc>Parramatta NSW Australia</conf-loc>
          <pub-id pub-id-type="doi">10.1145/3359996.3365051</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Yoo</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Poronnik</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Ahmadpour</surname>
              <given-names>N</given-names>
            </name>
          </person-group>
          <article-title>Exploring user needs in the development of a virtual reality-based advanced life support training platform: exploratory usability study</article-title>
          <source>JMIR Serious Games</source>
          <year>2020</year>
          <month>08</month>
          <day>07</day>
          <volume>8</volume>
          <issue>3</issue>
          <fpage>e20797</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://games.jmir.org/2020/3/e20797/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/20797</pub-id>
          <pub-id pub-id-type="medline">32763877</pub-id>
          <pub-id pub-id-type="pii">v8i3e20797</pub-id>
          <pub-id pub-id-type="pmcid">PMC7442950</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>King</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Tee</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Falconer</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Angell</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Holley</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Mills</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Virtual health education: scaling practice to transform student learning: using virtual reality learning environments in healthcare education to bridge the theory/practice gap and improve patient safety</article-title>
          <source>Nurse Educ Today</source>
          <year>2018</year>
          <month>12</month>
          <volume>71</volume>
          <fpage>7</fpage>
          <lpage>9</lpage>
          <pub-id pub-id-type="doi">10.1016/j.nedt.2018.08.002</pub-id>
          <pub-id pub-id-type="medline">30205259</pub-id>
          <pub-id pub-id-type="pii">S0260-6917(18)30378-2</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Garrett</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Taverner</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Gromala</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Tao</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Cordingley</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Sun</surname>
              <given-names>C</given-names>
            </name>
          </person-group>
          <article-title>Virtual reality clinical research: promises and challenges</article-title>
          <source>JMIR Serious Games</source>
          <year>2018</year>
          <month>10</month>
          <day>17</day>
          <volume>6</volume>
          <issue>4</issue>
          <fpage>e10839</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://games.jmir.org/2018/4/e10839/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/10839</pub-id>
          <pub-id pub-id-type="medline">30333096</pub-id>
          <pub-id pub-id-type="pii">v6i4e10839</pub-id>
          <pub-id pub-id-type="pmcid">PMC6231864</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kyaw</surname>
              <given-names>BM</given-names>
            </name>
            <name name-style="western">
              <surname>Saxena</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Posadzki</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Vseteckova</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Nikolaou</surname>
              <given-names>CK</given-names>
            </name>
            <name name-style="western">
              <surname>George</surname>
              <given-names>PP</given-names>
            </name>
            <name name-style="western">
              <surname>Divakar</surname>
              <given-names>U</given-names>
            </name>
            <name name-style="western">
              <surname>Masiello</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Kononowicz</surname>
              <given-names>AA</given-names>
            </name>
            <name name-style="western">
              <surname>Zary</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Tudor Car</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Virtual reality for health professions education: systematic review and meta-analysis by the digital health education collaboration</article-title>
          <source>J Med Internet Res</source>
          <year>2019</year>
          <month>01</month>
          <day>22</day>
          <volume>21</volume>
          <issue>1</issue>
          <fpage>e12959</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.jmir.org/2019/1/e12959/"/>
          </comment>
          <pub-id pub-id-type="doi">10.2196/12959</pub-id>
          <pub-id pub-id-type="medline">30668519</pub-id>
          <pub-id pub-id-type="pii">v21i1e12959</pub-id>
          <pub-id pub-id-type="pmcid">PMC6362387</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="book">
          <article-title>Immersive virtual reality and the developing child</article-title>
          <source>Cognitive Development in Digital Contexts</source>
          <year>2017</year>
          <publisher-loc>Cambridge, Massachusetts, United States</publisher-loc>
          <publisher-name>Academic Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gallagher</surname>
              <given-names>AG</given-names>
            </name>
            <name name-style="western">
              <surname>Richie</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>McClure</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>McGuigan</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Objective psychomotor skills assessment of experienced, junior, and novice laparoscopists with virtual reality</article-title>
          <source>World J Surg</source>
          <year>2001</year>
          <month>11</month>
          <day>1</day>
          <volume>25</volume>
          <issue>11</issue>
          <fpage>1478</fpage>
          <lpage>83</lpage>
          <pub-id pub-id-type="doi">10.1007/s00268-001-0133-1</pub-id>
          <pub-id pub-id-type="medline">11760752</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="book">
          <article-title>Redefining home automation through voice recognition system</article-title>
          <source>Emerging Technologies in Data Mining and Information Security</source>
          <year>2021</year>
          <publisher-loc>Singapore</publisher-loc>
          <publisher-name>Springer</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="web">
          <article-title>Soft skills training - reinvented</article-title>
          <source>BodySwaps</source>
          <access-date>2022-03-29</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://bodyswaps.co/">https://bodyswaps.co/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Newcastle Uni taps biofeedback in VR conflict training</article-title>
          <source>IT News</source>
          <access-date>2022-05-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.itnews.com.au/news/newcastle-uni-taps-biofeedback-in-vr-conflict-training-534860">https://www.itnews.com.au/news/newcastle-uni-taps-biofeedback-in-vr-conflict-training-534860</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Heckemann</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Hahn</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Halfens</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Richter</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Schols</surname>
              <given-names>JM</given-names>
            </name>
          </person-group>
          <article-title>Patient and visitor aggression in healthcare: a survey exploring organisational safety culture and team efficacy</article-title>
          <source>J Nurs Manag</source>
          <year>2019</year>
          <month>07</month>
          <day>29</day>
          <volume>27</volume>
          <issue>5</issue>
          <fpage>1039</fpage>
          <lpage>46</lpage>
          <pub-id pub-id-type="doi">10.1111/jonm.12772</pub-id>
          <pub-id pub-id-type="medline">30888740</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Brooke</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>SUS-A quick and dirty usability scale</article-title>
          <source>Usability Evaluation in Industry</source>
          <year>1996</year>
          <publisher-loc>Boca Raton, Florida, United States</publisher-loc>
          <publisher-name>CRC Press</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bangor</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Kortum</surname>
              <given-names>PT</given-names>
            </name>
            <name name-style="western">
              <surname>Miller</surname>
              <given-names>JT</given-names>
            </name>
          </person-group>
          <article-title>An empirical evaluation of the system usability scale</article-title>
          <source>Int J Human Comput Interact</source>
          <year>2008</year>
          <month>07</month>
          <day>30</day>
          <volume>24</volume>
          <issue>6</issue>
          <fpage>574</fpage>
          <lpage>94</lpage>
          <pub-id pub-id-type="doi">10.1080/10447310802205776</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="web">
          <article-title>Oculus Quest 2</article-title>
          <source>Meta</source>
          <access-date>2022-05-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.oculus.com/quest-2/">https://www.oculus.com/quest-2/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="web">
          <article-title>Unreal Engine homepage</article-title>
          <source>Unreal Engine</source>
          <access-date>2022-05-18</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.unrealengine.com/en-US/">https://www.unrealengine.com/en-US/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Braun</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Clarke</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Using thematic analysis in psychology</article-title>
          <source>Qual Res Psychol</source>
          <year>2006</year>
          <month>01</month>
          <volume>3</volume>
          <issue>2</issue>
          <fpage>77</fpage>
          <lpage>101</lpage>
          <pub-id pub-id-type="doi">10.1191/1478088706qp063oa</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Holton</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Swanson</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Naquin</surname>
              <given-names>SS</given-names>
            </name>
          </person-group>
          <article-title>Andragogy in practice: clarifying the andragogical model of adult learning</article-title>
          <source>Performance Improvement Q</source>
          <year>2001</year>
          <volume>14</volume>
          <issue>1</issue>
          <fpage>118</fpage>
          <lpage>43</lpage>
          <pub-id pub-id-type="doi">10.1111/j.1937-8327.2001.tb00204.x</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Tufford</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Asakura</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Bogo</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Simulation versus role-play: perceptions of prepracticum BSW student</article-title>
          <source>J Baccalaureate Social Work</source>
          <year>2018</year>
          <volume>23</volume>
          <issue>1</issue>
          <fpage>249</fpage>
          <lpage>67</lpage>
          <pub-id pub-id-type="doi">10.18084/1084-7219.23.1.249</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Latif</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Rana</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Khalifa</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Jurdak</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Qadir</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Deep representation learning in speech processing: challenges, recent advances, and future trends</article-title>
          <source>arXiv</source>
          <year>2020</year>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Munafo</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Diedrick</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Stoffregen</surname>
              <given-names>TA</given-names>
            </name>
          </person-group>
          <article-title>The virtual reality head-mounted display Oculus Rift induces motion sickness and is sexist in its effects</article-title>
          <source>Exp Brain Res</source>
          <year>2017</year>
          <month>03</month>
          <day>3</day>
          <volume>235</volume>
          <issue>3</issue>
          <fpage>889</fpage>
          <lpage>901</lpage>
          <pub-id pub-id-type="doi">10.1007/s00221-016-4846-7</pub-id>
          <pub-id pub-id-type="medline">27915367</pub-id>
          <pub-id pub-id-type="pii">10.1007/s00221-016-4846-7</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Latif</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Qadir</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Qayyum</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Usama</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Younis</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Speech technology for healthcare: opportunities, challenges, and state of the art</article-title>
          <source>IEEE Rev Biomed Eng</source>
          <year>2021</year>
          <volume>14</volume>
          <fpage>342</fpage>
          <lpage>56</lpage>
          <pub-id pub-id-type="doi">10.1109/rbme.2020.3006860</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Checa</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Miguel-Alonso</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Bustillo</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Immersive virtual-reality computer-assembly serious game to enhance autonomous learning</article-title>
          <source>Virtual Real</source>
          <year>2021</year>
          <month>12</month>
          <day>23</day>
          <fpage>1</fpage>
          <lpage>18</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/34961808"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10055-021-00607-1</pub-id>
          <pub-id pub-id-type="medline">34961808</pub-id>
          <pub-id pub-id-type="pii">607</pub-id>
          <pub-id pub-id-type="pmcid">PMC8695959</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
