<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "http://dtd.nlm.nih.gov/publishing/2.0/journalpublishing.dtd">
<article article-type="research-article" dtd-version="2.0" xmlns:xlink="http://www.w3.org/1999/xlink">
  <front>
    <journal-meta>
      <journal-id journal-id-type="publisher-id">JSG</journal-id>
      <journal-id journal-id-type="nlm-ta">JMIR Serious Games</journal-id>
      <journal-title>JMIR Serious Games</journal-title>
      <issn pub-type="epub">2291-9279</issn>
      <publisher>
        <publisher-name>JMIR Publications</publisher-name>
        <publisher-loc>Toronto, Canada</publisher-loc>
      </publisher>
    </journal-meta>
    <article-meta>
      <article-id pub-id-type="publisher-id">v9i3e26976</article-id>
      <article-id pub-id-type="pmid">34463624</article-id>
      <article-id pub-id-type="doi">10.2196/26976</article-id>
      <article-categories>
        <subj-group subj-group-type="heading">
          <subject>Original Paper</subject>
        </subj-group>
        <subj-group subj-group-type="article-type">
          <subject>Original Paper</subject>
        </subj-group>
      </article-categories>
      <title-group>
        <article-title>Feasibility of Virtual Reality Audiological Testing: Prospective Study</article-title>
      </title-group>
      <contrib-group>
        <contrib contrib-type="editor">
          <name>
            <surname>Zary</surname>
            <given-names>Nabil</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Manchaiah</surname>
            <given-names>Vinaya</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Hallewell</surname>
            <given-names>Madeline</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Chu</surname>
            <given-names>Yuanchia</given-names>
          </name>
        </contrib>
        <contrib contrib-type="reviewer">
          <name>
            <surname>Mahomed-Asmail</surname>
            <given-names>Faheema</given-names>
          </name>
        </contrib>
      </contrib-group>
      <contrib-group>
        <contrib id="contrib1" contrib-type="author">
          <name name-style="western">
            <surname>Seol</surname>
            <given-names>Hye Yoon</given-names>
          </name>
          <degrees>AuD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-7040-1884</ext-link>
        </contrib>
        <contrib id="contrib2" contrib-type="author">
          <name name-style="western">
            <surname>Kang</surname>
            <given-names>Soojin</given-names>
          </name>
          <degrees>PhD</degrees>
          <xref rid="aff1" ref-type="aff">1</xref>
          <xref rid="aff2" ref-type="aff">2</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0003-3070-8125</ext-link>
        </contrib>
        <contrib id="contrib3" contrib-type="author">
          <name name-style="western">
            <surname>Lim</surname>
            <given-names>Jihyun</given-names>
          </name>
          <degrees>BS</degrees>
          <xref rid="aff3" ref-type="aff">3</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-0318-9334</ext-link>
        </contrib>
        <contrib id="contrib4" contrib-type="author">
          <name name-style="western">
            <surname>Hong</surname>
            <given-names>Sung Hwa</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff4" ref-type="aff">4</xref>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0001-6906-8925</ext-link>
        </contrib>
        <contrib id="contrib5" contrib-type="author" corresp="yes">
          <name name-style="western">
            <surname>Moon</surname>
            <given-names>Il Joon</given-names>
          </name>
          <degrees>MD, PhD</degrees>
          <xref rid="aff2" ref-type="aff">2</xref>
          <xref rid="aff5" ref-type="aff">5</xref>
          <address>
            <institution>Department of Otolaryngology-Head &amp; Neck Surgery</institution>
            <institution>Sungkyunkwan University School of Medicine</institution>
            <institution>Samsung Medical Center</institution>
            <addr-line>81 Irwon-ro, Gangnam-gu</addr-line>
            <addr-line>Seoul, 06351</addr-line>
            <country>Republic of Korea</country>
            <phone>82 2 3410 3579</phone>
            <email>moonij@skku.edu</email>
          </address>
          <ext-link ext-link-type="orcid">https://orcid.org/0000-0002-3613-0734</ext-link>
        </contrib>
      </contrib-group>
      <aff id="aff1">
        <label>1</label>
        <institution>Medical Research Institute</institution>
        <institution>Sungkyunkwan University School of Medicine</institution>
        <addr-line>Suwon</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff2">
        <label>2</label>
        <institution>Hearing Research Laboratory</institution>
        <institution>Samsung Medical Center</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff3">
        <label>3</label>
        <institution>Center for Clinical Epidemiology</institution>
        <institution>Samsung Medical Center</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff4">
        <label>4</label>
        <institution>Department of Otolaryngology-Head &amp; Neck Surgery</institution>
        <institution>Samsung Changwon Hospital</institution>
        <addr-line>Changwon</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <aff id="aff5">
        <label>5</label>
        <institution>Department of Otolaryngology-Head &amp; Neck Surgery</institution>
        <institution>Sungkyunkwan University School of Medicine</institution>
        <institution>Samsung Medical Center</institution>
        <addr-line>Seoul</addr-line>
        <country>Republic of Korea</country>
      </aff>
      <author-notes>
        <corresp>Corresponding Author: Il Joon Moon <email>moonij@skku.edu</email></corresp>
      </author-notes>
      <pub-date pub-type="collection">
        <season>Jul-Sep</season>
        <year>2021</year>
      </pub-date>
      <pub-date pub-type="epub">
        <day>31</day>
        <month>8</month>
        <year>2021</year>
      </pub-date>
      <volume>9</volume>
      <issue>3</issue>
      <elocation-id>e26976</elocation-id>
      <history>
        <date date-type="received">
          <day>6</day>
          <month>1</month>
          <year>2021</year>
        </date>
        <date date-type="rev-request">
          <day>19</day>
          <month>3</month>
          <year>2021</year>
        </date>
        <date date-type="rev-recd">
          <day>13</day>
          <month>5</month>
          <year>2021</year>
        </date>
        <date date-type="accepted">
          <day>29</day>
          <month>5</month>
          <year>2021</year>
        </date>
      </history>
      <copyright-statement>©Hye Yoon Seol, Soojin Kang, Jihyun Lim, Sung Hwa Hong, Il Joon Moon. Originally published in JMIR Serious Games (https://games.jmir.org), 31.08.2021.</copyright-statement>
      <copyright-year>2021</copyright-year>
      <license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/">
        <p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (https://creativecommons.org/licenses/by/4.0/), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Serious Games, is properly cited. The complete bibliographic information, a link to the original publication on https://games.jmir.org, as well as this copyright and license information must be included.</p>
      </license>
      <self-uri xlink:href="https://games.jmir.org/2021/3/e26976" xlink:type="simple"/>
      <related-article related-article-type="correction-forward" xlink:title="This is a corrected version. See correction statement in:" xlink:href="https://games.jmir.org/2021/4/e34994" vol="9" page="e34994"> </related-article>
      <abstract>
        <sec sec-type="background">
          <title>Background</title>
          <p>It has been noted in the literature that there is a gap between clinical assessment and real-world performance. Real-world conversations entail visual and audio information, yet there are not any audiological assessment tools that include visual information. Virtual reality (VR) technology has been applied to various areas, including audiology. However, the use of VR in speech-in-noise perception has not yet been investigated.</p>
        </sec>
        <sec sec-type="objective">
          <title>Objective</title>
          <p>The purpose of this study was to investigate the impact of virtual space (VS) on speech performance and its feasibility to be used as a speech test instrument. We hypothesized that individuals’ ability to recognize speech would improve when visual cues were provided.</p>
        </sec>
        <sec sec-type="methods">
          <title>Methods</title>
          <p>A total of 30 individuals with normal hearing and 25 individuals with hearing loss completed pure-tone audiometry and the Korean version of the Hearing in Noise Test (K-HINT) under three conditions—conventional K-HINT (cK-HINT), VS on PC (VSPC), and VS head-mounted display (VSHMD)—at –10 dB, –5 dB, 0 dB, and +5 dB signal-to-noise ratios (SNRs). Participants listened to target speech and repeated it back to the tester for all conditions. Hearing aid users in the hearing loss group completed testing under unaided and aided conditions. A questionnaire was administered after testing to gather subjective opinions on the headset, the VSHMD condition, and test preference.</p>
        </sec>
        <sec sec-type="results">
          <title>Results</title>
          <p>Provision of visual information had a significant impact on speech performance between the normal hearing and hearing impaired groups. The Mann-Whitney <italic>U</italic> test showed statistical significance (<italic>P</italic>&lt;.05) between the two groups under all test conditions. Hearing aid use led to better integration of audio and visual cues. Statistical significance through the Mann-Whitney <italic>U</italic> test was observed for –5 dB (<italic>P</italic>=.04) and 0 dB (<italic>P</italic>=.02) SNRs under the cK-HINT condition, as well as for –10 dB (<italic>P</italic>=.007) and 0 dB (<italic>P</italic>=.04) SNRs under the VSPC condition, between hearing aid and non–hearing aid users. Participants reported positive responses across almost all items on the questionnaire except for the weight of the headset.  Participants preferred a test method with visual imagery, but found the headset to be heavy.</p>
        </sec>
        <sec sec-type="conclusions">
          <title>Conclusions</title>
          <p>Findings are in line with previous literature that showed that visual cues were beneficial for communication. This is the first study to include hearing aid users with a more naturalistic stimulus and a relatively simple test environment, suggesting the feasibility of VR audiological testing in clinical practice.</p>
        </sec>
      </abstract>
      <kwd-group>
        <kwd>hearing loss</kwd>
        <kwd>virtual reality</kwd>
        <kwd>speech performance</kwd>
        <kwd>real-world performance</kwd>
        <kwd>hearing</kwd>
        <kwd>audiology</kwd>
      </kwd-group>
    </article-meta>
  </front>
  <body>
    <sec sec-type="introduction">
      <title>Introduction</title>
      <p>Hearing loss is a major health concern for the global society due to its negative consequences on individuals’ lives. These consequences include, but are not limited to, communication, employment, cognitive decline, social participation, and quality of life [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. Hearing loss primarily affects communication, and for those who are diagnosed with sensorineural hearing loss, a prescription of hearing aids is typically the first step of the aural rehabilitation process [<xref ref-type="bibr" rid="ref6">6</xref>]. Hearing aids amplify sounds and provide various features (ie, noise reduction) to substantially mitigate the negative consequences of hearing loss by improving audibility. However, even with these advancements, there is a gap between clinical assessment and real-world performance [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref13">13</xref>], such as the wearer’s complaint of persistent hearing difficulties in noisy situations [<xref ref-type="bibr" rid="ref14">14</xref>-<xref ref-type="bibr" rid="ref17">17</xref>].</p>
      <p>One contributing factor for this issue could be limitations of current measurement tools. Taylor [<xref ref-type="bibr" rid="ref18">18</xref>] mentioned difficulties in constructing a laboratory environment that closely replicates real-world settings and in measuring individuals’ unique auditory environments. In clinical practice, aided threshold and speech perception testing is often performed to assess the benefits provided by hearing aids. Aided threshold testing involves presenting warbles tones (250 Hz to 8000 Hz) through a loudspeaker in the sound field [<xref ref-type="bibr" rid="ref19">19</xref>]. The patient is asked to respond (ie, “say <italic>yes</italic> or press the button”) when he or she hears the tone, even if the tone is soft. Speech testing is also performed in the sound field with one or more loudspeakers [<xref ref-type="bibr" rid="ref19">19</xref>]. Words and sentences can be used as test materials and the patient is asked to listen and repeat words and sentences back to the tester. Some outcome measures include noise and multi-talker conditions to simulate real-world auditory environments, but they lack an important piece of information that people use for communication: visual cues. In real-world conversations, nonverbal cues, such as lip movements, are readily available and their significant influence on speech perception has been demonstrated in previous studies [<xref ref-type="bibr" rid="ref20">20</xref>-<xref ref-type="bibr" rid="ref25">25</xref>]. Summerfield [<xref ref-type="bibr" rid="ref21">21</xref>] examined changes in the accuracy of phonetic perception in noise depending on the amount of visual information given to 10 listeners with normal hearing (NH). The participants heard a total of 125 sentences with 100 keywords and repeated the keywords under various conditions. Participants’ overall speech performance was the best when they were able to see the whole face of the speaker (65.3%), followed by the lips (54.0%), the four points (30.7%), nothing (22.7%), and a circle (20.8%). A more recent study investigated the speech perception performance of 77 NH participants who were divided into five age groups under three conditions: auditory only, visual only, and audiovisual. The highest accuracy rate was observed for the audiovisual condition, followed by the auditory-only and the visual-only conditions [<xref ref-type="bibr" rid="ref26">26</xref>]. Benefits of audiovisual integration are well noted in literature and, yet, there are no audiological evaluation tools that use visual cues. Thus, even with well-programmed devices and good test results, hearing aid wearers often do not perceive this benefit in the real world. This mismatch reduces device satisfaction and can ultimately result in discontinuance of hearing aid use [<xref ref-type="bibr" rid="ref7">7</xref>-<xref ref-type="bibr" rid="ref9">9</xref>,<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref13">13</xref>,<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. The MarkeTrak survey conducted in 2000 by Sergei Kochkin reported poor benefit as one of the top 10 reasons for not using hearing aids [<xref ref-type="bibr" rid="ref27">27</xref>]. Results from previous studies emphasize the need for closing the gap between clinical assessment and real-world performance.</p>
      <p>With the emergence of the fourth industrial revolution, researchers and industries have been putting in efforts to fuse technologies and health care. Among these technologies, virtual reality (VR) has been applied not only to gaming but to education and health care. There are five key components of VR systems: virtual space (VS), immersion, interactivity, creators, and users [<xref ref-type="bibr" rid="ref29">29</xref>]. To be more specific, there is an imaginary place, VS, and through interaction and immersion, individuals feel more present, or connected, to the VS. VR’s biggest strength is that auditory and visual information are provided simultaneously to generate realistic environments. There are studies showing the efficacy of VR in certain areas, such as pain management, stroke rehabilitation, and chronic subjective tinnitus [<xref ref-type="bibr" rid="ref30">30</xref>-<xref ref-type="bibr" rid="ref34">34</xref>]. For audiology, VR has been researched for sound localization, but research into the effect of VS on speech performance has been sparse [<xref ref-type="bibr" rid="ref35">35</xref>-<xref ref-type="bibr" rid="ref37">37</xref>].</p>
      <p>Ahrens et al [<xref ref-type="bibr" rid="ref37">37</xref>] tested the sound localization ability of 10 NH listeners under eight conditions involving blindfolding, a head-mounted display (HMD) headset, virtual and real environments, loudspeakers, acoustic or visual stimuli, and a simulated laser pointer. The results revealed that the headset had a negative impact on individuals’ sound localization ability, as differences in interaural time and levels, which are important cues for sound localization, were larger when wearing the headset. Azimuth and elevation errors decreased when the source locations were visible to the participants in both the virtual and real environments. Sechler et al [<xref ref-type="bibr" rid="ref38">38</xref>] explored the potential use of VR in sound localization testing among bilateral cochlear implant users. A total of 12 NH listeners and four bilateral cochlear implant users performed sound localization testing in a VR environment that was created for the study. A total of 13 sound cues were presented and the participants selected where they heard the sound cues in the VR environment. Bilateral cochlear implant users completed testing under first implant–only, second implant–only, and both implants conditions. Comparing the localization performance, individuals with NH showed better performance than bilateral cochlear implant users as to response time, left or right discrimination, percent correct, and root mean square error. Better sound localization performance was observed under the implants condition and the first implant–only condition. Overall, both studies suggest VR’s potential to be employed in clinical audiology.</p>
      <p>The purpose of this study was to investigate the impact of VS on speech-in-noise performance and its feasibility as a viable instrument for speech testing in clinical practice. Findings from this study will shed light on VR’s potential to overcome the limitations of current assessment tools and, ultimately, to be utilized in clinical practice, which is an unexplored territory in the field of audiology.</p>
    </sec>
    <sec sec-type="methods">
      <title>Methods</title>
      <sec>
        <title>Participants</title>
        <p>The sample size was determined based on previous research examining reaction time and accuracy differences under auditory-only and visual-only conditions among individuals with and without autism [<xref ref-type="bibr" rid="ref39">39</xref>]. The resulting sample size was 45, using Stata version 14 (StataCorp LP) for power set at 0.9 and α set at .0167 (corrected for multiple comparison). A total of 30 individuals with NH and 25 hearing impaired (HI) individuals were enrolled in the study (<xref rid="figure1" ref-type="fig">Figure 1</xref>). The NH group had average pure-tone thresholds below 25 dB hearing level (HL), with an asymmetry in hearing thresholds below 10 dB across 250 Hz, 500 Hz, 1000 Hz, 2000 Hz, 4000 Hz, and 8000 Hz. The HI group had average pure-tone thresholds above 25 dB HL, with an asymmetry in hearing thresholds below 10 dB across the testing frequencies. For the HI group, 10 individuals were hearing aid users. Individuals who were unable to communicate and understand television at a distance of 1 meter and those with neurological and mental disorders were excluded from the study. All experimental procedures were approved by the regulations set by Samsung Medical Center’s Institutional Review Board and were carried out in accordance with approved guidelines. An informed consent document was obtained prior to testing from the participants. Informed consent was also obtained from actors to publish the images in an online publication.</p>
        <fig id="figure1" position="float">
          <label>Figure 1</label>
          <caption>
            <p>CONSORT (Consolidated Standards of Reporting Trials) diagram of study participation.</p>
          </caption>
          <graphic xlink:href="games_v9i3e26976_fig1.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Conventional Pure-Tone Audiometry</title>
        <p>Following the 2005 American Speech-Language-Hearing Association guidelines [<xref ref-type="bibr" rid="ref40">40</xref>], conventional pure-tone audiometry was performed in a sound booth using a GSI 61 audiometer (Grason-Stadler) and TDH-39 headphones (Telephonics).</p>
      </sec>
      <sec>
        <title>Virtual Space</title>
        <p>A café was created as a VS with the assistance of the Samsung Changwon Hospital VR Lab using the Samsung 360 Round VR camera (Samsung Electronics Co). The film was then edited using commercial editing tools from Adobe Systems: Adobe Premiere Pro, Adobe After Effects, and Adobe Audition (2018-2019 versions). A café was selected as an environment as it is one of the most common places within which individuals have trouble hearing [<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. A scenario for the VS where the user is having a one-on-one conversation with a conversational partner who speaks sentences from the Korean version of the Hearing in Noise Test (K-HINT), while other “customers” are talking in the background, was designed (<xref rid="figure2" ref-type="fig">Figure 2</xref>). The conversational partner recorded the K-HINT sentences.</p>
        <fig id="figure2" position="float">
          <label>Figure 2</label>
          <caption>
            <p>A screenshot of the virtual space. The “conversational partner” is speaking sentences from the Korean version of the Hearing in Noise Test (K-HINT), while the “customers” are talking in the background.</p>
          </caption>
          <graphic xlink:href="games_v9i3e26976_fig2.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>K-HINT</title>
        <p>The K-HINT, developed by Sung Kyun Moon and his colleagues at Ajou University and the House Ear Institute, is widely used in Korea as a speech-in-noise test [<xref ref-type="bibr" rid="ref43">43</xref>], with a listen-and-repeat task. The K-HINT consists of 12 lists with 20 sentences per list. The K-HINT sentences were used for the study with a presentation level of 65 dBA (A-weighted dB). Each list was broken down into two sets in order for the participants to complete all test conditions: (1) conventional K-HINT (cK-HINT); (2) VS on PC (VSPC), where the VS was displayed on a monitor; and (3) VS head-mounted display (VSHMD), where the VS was displayed on the HMD at –10, –5, 0, and +5 dB signal-to-noise ratios (SNRs). For VSPC and VSHMD conditions, the same VS was displayed and all participants had 10 seconds to familiarize themselves with the virtual environment. The test conditions were randomized for each participant. Percent-correct scores were calculated based on the number of sentences that were repeated back to the tester correctly among 10 sentences. The hearing aid wearers used their own hearing aids to complete testing under unaided and aided conditions. No adjustments were made to the participants’ hearing aid settings, as the authors wanted to simulate as natural an environment as possible. Testing was performed in a semianechoic chamber with sentences being presented through a loudspeaker in the front. Making the testing more realistic, café noise was obtained from YouTube and normalized to the average level of the sound file using Cool Edit Pro 2.1 (Syntrillium Software Corporation). Then, sound levels of speech as well as the café noise were measured using a sound level meter for the four SNRs. The noise was presented from speakers located at 45, 135, 225, and 315 degrees for all test conditions. A Samsung Notebook Odyssey laptop and a Samsung Odyssey VR headset with controllers (Samsung Electronics Co) were used to display the VS. The Samsung Odyssey laptop was used, as testers can see the screen that participants are seeing during the VSHMD condition. This allows individuals who are unfamiliar with VR technology to easily complete the task with assistance from the tester. A practice test was run before the experiment to familiarize the participants with the listen-and-repeat task in the VS.</p>
      </sec>
      <sec>
        <title>Questionnaire</title>
        <p>A questionnaire was administered after testing to evaluate various aspects of testing (<xref ref-type="table" rid="table1">Table 1</xref>). The questionnaire contained four domains: HMD headset, VSHMD condition, tests, and cK-HINT versus VSHMD. Items regarding the headset consisted of physical comfort and weight of the device, audiovisual synchronization, and sound quality of the recorded K-HINT sentences. In terms of the VSHMD condition, immersiveness, listening effort, degree of reality reflection, need for VR to be incorporated into audiological testing, adequacy of the VS, structure of the test, and interestingness were evaluated. For questions about immersiveness and listening effort, hearing aid users completed these questions twice for unaided and aided conditions. Participants also chose the most preferred test method and the test that required the greatest amount of listening effort, encouraged hearing aid use, and assessed communication difficulties better in the <italic>tests</italic> domain. The last domain compared the cK-HINT and VSHMD conditions. Participants were asked to write down strengths and weaknesses of the two conditions. In terms of differences between the two conditions, participants had an option to choose multiple responses among <italic>immersiveness</italic>, <italic>reality reflection</italic>, <italic>convenience</italic>, and <italic>others</italic>. If they chose <italic>others</italic>, they were asked to provide specific responses. Questions in the <italic>HMD headset</italic> and <italic>VSHMD condition</italic> domains were answered using the 10-point Visual Analogue Scale (VAS) with the following respones: 0 (poor, strongly disagree, or extremely heavy), 5 (neutral), and 10 (excellent, strongly agree, or extremely light). The <italic>tests</italic> domain contained multiple-choice questions. Participants had to choose from the following response options: <italic>cK-HINT</italic>, <italic>VSPC</italic>, and <italic>VSHMD</italic>.</p>
        <table-wrap position="float" id="table1">
          <label>Table 1</label>
          <caption>
            <p>Questionnaire items.</p>
          </caption>
          <table width="1000" cellpadding="5" cellspacing="0" border="1" rules="groups" frame="hsides">
            <col width="30"/>
            <col width="220"/>
            <col width="750"/>
            <thead>
              <tr valign="top">
                <td colspan="2">Domain and question No.</td>
                <td>Question</td>
              </tr>
            </thead>
            <tbody>
              <tr valign="top">
                <td colspan="3">
                  <bold>HMD<sup>a</sup> headset</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>1</td>
                <td>How is the physical comfort of the device?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>2</td>
                <td>How heavy is the device?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>3</td>
                <td>Does the visualization (café) match well with the audio?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>4</td>
                <td>How is the sound quality?</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>VSHMD<sup>b</sup> condition</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>5</td>
                <td>Without hearing aids: How immersive was the virtual space? Did you feel like you were in a real café?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>5-1</td>
                <td>With hearing aids: How immersive was the virtual space? Did you feel like you were in a real café?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>6</td>
                <td>Without hearing aids: How much effort did you have to spend to understand speech?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>6-1</td>
                <td>With hearing aids: How much effort did you have to spend to understand speech?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>7</td>
                <td>How much did the virtual space (café) reflect reality?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>8</td>
                <td>Does the virtual reality technology need to be used for clinical testing?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>9</td>
                <td>Was the café an appropriate place to use as the virtual space?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>10</td>
                <td>Was the test structured to be easily understood?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>11</td>
                <td>Was the test interesting?</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>Tests</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>12</td>
                <td>Which test do you prefer the most?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>13</td>
                <td>Which test required the most amount of effort for listening?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>14</td>
                <td>Which test would encourage hearing aid use?</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>15</td>
                <td>Which test would assess communication difficulties better?</td>
              </tr>
              <tr valign="top">
                <td colspan="3">
                  <bold>cK-HINT<sup>c</sup> versus VSHMD condition</bold>
                </td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>16</td>
                <td>Describe any differences between conventional testing (without visual cues) and the VSHMD (visualization through the headset).</td>
              </tr>
              <tr valign="top">
                <td>
                  <break/>
                </td>
                <td>17</td>
                <td>Describe strengths and weaknesses of conventional testing (without visual cues) and the VSHMD (visualization through the headset).</td>
              </tr>
            </tbody>
          </table>
          <table-wrap-foot>
            <fn id="table1fn1">
              <p><sup>a</sup>HMD: head-mounted display.</p>
            </fn>
            <fn id="table1fn2">
              <p><sup>b</sup>VSHMD: virtual space head-mounted display.</p>
            </fn>
            <fn id="table1fn3">
              <p><sup>c</sup>cK-HINT: conventional Korean version of the Hearing in Noise Test.</p>
            </fn>
          </table-wrap-foot>
        </table-wrap>
      </sec>
      <sec>
        <title>Statistical Analysis</title>
        <p>Statistical analysis was performed using SAS version 9.4 (SAS Institute Inc). Nonparametric tests were used, as our results did not pass the normality test. To compare K-HINT performance based on test conditions in each group and SNRs, the Friedman test was performed. The primary outcome was individuals’ K-HINT performance, and the availability of visual cues was the exposure of interest in this study. To compare K-HINT performance between the groups, the Mann-Whitney <italic>U</italic> test was used. A <italic>P</italic> value of less than .05 was considered to be statistically significant.</p>
      </sec>
    </sec>
    <sec sec-type="results">
      <title>Results</title>
      <sec>
        <title>Participant Characteristics</title>
        <p>The age range of the participants was 18 to 75 years old. The mean age of the NH group was 29.7 years (SD 10.4), while the mean age of the HI group was 53.0 years (SD 14.0). The NH group’s pure-tone averages were 6.3 dB in the right ear and 5.5 dB in the left ear. The HI group had pure-tone averages of 49.2 dB in the right ear and 47.2 dB in the left ear. A total of 10 participants in the HI group were hearing aid users, with pure-tone averages of 56.5 dB and 54.2 dB in the right and left ears, respectively. Non–hearing aid users in the HI group had pure-tone averages of 37.0 dB and 34.9 dB in the right and left ears, respectively.</p>
      </sec>
      <sec>
        <title>K-HINT Performance Between the NH and HI Groups</title>
        <p>Comparison of the K-HINT performance of both groups under all test conditions is illustrated in <xref rid="figure3" ref-type="fig">Figure 3</xref>. For hearing aid users, their percent-correct scores for the aided conditions were used for comparison. Both groups performed better when visual cues were available. The Friedman test was performed for each group to examine whether provision of visual signals was beneficial. Statistical significance was observed for –10 dB SNR in the NH group (<italic>P</italic>=.004) and for –10 dB (<italic>P</italic>=.01), –5 dB (<italic>P</italic>=.01), and 0 dB (<italic>P</italic>=.045) SNRs in the HI group. Group comparison using the Mann-Whitney <italic>U</italic> test showed statistical significance (<italic>P</italic>&lt;.05) between the two groups under all test conditions, with <italic>P</italic> values ranging from .001 to .004. Overall, NH listeners showed better speech-in-noise performance than the HI group.</p>
        <fig id="figure3" position="float">
          <label>Figure 3</label>
          <caption>
            <p>Statistical analysis of the groups’ average speech performance. Pink bars (A) indicate normal hearing group’s performance. Blue bars (B) indicate hearing impaired group’s performance. The horizontal lines within the shaded bars represent the median values, the shaded bars represent the IQRs, the error whiskers represent the highest and lowest points, and the circles and stars represent outliers and extreme outliers, respectively. cK-HINT: conventional Korean version of the Hearing in Noise Test; SNR: signal-to-noise ratio; VSHMD: virtual space head-mounted display; VSPC: virtual space on PC.</p>
          </caption>
          <graphic xlink:href="games_v9i3e26976_fig3.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>VR K-HINT Performance Between Hearing Aid Users and Non–Hearing Aid Users in the HI Group</title>
        <p>The K-HINT performance of hearing aid and non–hearing aid users is shown in <xref rid="figure4" ref-type="fig">Figure 4</xref>. Hearing aid users’ aided scores were used for performance comparison. Higher average percent-correct scores for non–hearing aid users indicate that they understood speech better in noise than hearing aid users. This is consistent with non–hearing aid users’ and hearing aid users’ pure-tone audiometry data: non–hearing aid users had better audiometric thresholds across the testing frequencies, except at 4000 Hz and 8000 Hz in the left ear. The Mann-Whitney <italic>U</italic> test revealed statistical significance at –5 dB (<italic>P</italic>=.04) and 0 dB (<italic>P</italic>=.02) SNRs under the cK-HINT condition and at –10 dB (<italic>P</italic>=.007) and 0 dB (<italic>P</italic>=.04) SNRs under the VSPC condition between the non–hearing aid users and hearing aid users.</p>
        <fig id="figure4" position="float">
          <label>Figure 4</label>
          <caption>
            <p>Statistical analysis of the groups’ average speech performance. Gray bars (A) indicate non–hearing aid users’ performance. Purple bars (B) indicate hearing aid users’ performance. The horizontal lines within the shaded bars represent the median values, the shaded bars represent IQRs, the error whiskers represent the highest and lowest points, and the circles and stars represent outliers and extreme outliers, respectively. cK-HINT: conventional Korean version of the Hearing in Noise Test; SNR: signal-to-noise ratio; VSHMD: virtual space head-mounted display; VSPC: virtual space on PC.</p>
          </caption>
          <graphic xlink:href="games_v9i3e26976_fig4.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Hearing Aid Users’ Unaided and Aided VR K-HINT Performance</title>
        <p><xref rid="figure5" ref-type="fig">Figure 5</xref> displays hearing aid users’ K-HINT performance with and without their hearing aids. The results are in line with previous studies showing that speech-understanding-in-noise performance is better with hearing aids. Statistical significance was also observed for +5 dB SNR under the cK-HINT condition (<italic>P</italic>=.02); –10 dB (<italic>P</italic>=.04), –5 dB (<italic>P</italic>=.02), and +5 dB (<italic>P</italic>=.02) SNRs under the VSPC condition; and –10 dB (<italic>P</italic>=.04) and –5 dB (<italic>P</italic>=.002) SNRs under the VSHMD condition through the Wilcoxon signed-rank test.</p>
        <fig id="figure5" position="float">
          <label>Figure 5</label>
          <caption>
            <p>Statistical analysis of hearing aid users’ average performance of the Korean version of the Hearing in Noise Test (K-HINT) in unaided (A) and aided (B) conditions. The horizontal lines within the shaded bars represent the median values, the shaded bars represent the IQRs, the error whiskers represent the highest and lowest points, and the circles and stars represent outliers and extreme outliers, respectively. cK-HINT: conventional Korean version of the Hearing in Noise Test; SNR: signal-to-noise ratio; VSHMD: virtual space head-mounted display; VSPC: virtual space on PC.</p>
          </caption>
          <graphic xlink:href="games_v9i3e26976_fig5.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Questionnaire</title>
        <p>The groups’ subjective opinions on the headset, the VSHMD condition, listening effort, and presence in the VS were gathered through a questionnaire (<xref rid="figure6" ref-type="fig">Figure 6</xref>). For the HMD headset, the following items were evaluated: physical comfort when wearing the device, weight of the device, synchronization between audio and visual information, and sound quality of the recorded sentences. The VAS was used to rate the items, with 10 being <italic>strongly agree</italic> or <italic>excellent</italic>. Responses for all items, except for the weight of the device, were positive toward the system; the headset was heavy, but was comfortable to wear and had excellent sound quality and audiovisual synchronization. The degree of reality reflection, need for VR to be used in clinical testing, adequacy of VS, test structure, and interestingness regarding the VSHMD condition were also evaluated. The VAS was used to rate the items, with 10 being <italic>strongly agree</italic> or <italic>excellent</italic>. The results revealed that reality simulation through the VS was excellent, and participants felt that the testing was interesting. The café was an appropriate place to use as the VS. The test structure, in which participants completed practice runs and then experimental tests, was considered good as well. The necessity of VR in audiology was high. Lastly, immersion and listening effort were investigated. Since the hearing aid users completed testing under unaided and aided conditions, the amount of listening effort required for these conditions was evaluated twice. Responses from NH listeners and non–hearing aid users were similar to each other across all items; the Wilcoxon signed-rank test showed no statistical differences for immersion (<italic>P</italic>=.36) and listening effort (<italic>P</italic>=.49) for the NH and HI groups. For hearing aid users, scores were higher for immersion and lower for listening effort with hearing aids, implying that integration of auditory and visual information through hearing aids and visual cues have a positive impact on speech understanding in the presence of noise. Significant differences for immersion (<italic>P</italic>=.047) and listening effort (<italic>P</italic>=.04) between the unaided and aided conditions were also observed through the Wilcoxon signed-rank test.</p>
        <p>Both groups preferred tests that contained visual cues; 50% (15/30) and 32% (8/25) of the participants in the NH and HI groups, respectively, selected VSHMD. VSHMD was also selected the most by the groups as a test that better-assessed communication difficulties (67% [20/30] of the NH group and 52% [13/25] of the HI group) and encouraged hearing aid use (50% [15/30] of the NH group and 44% [11/25] of the HI group). The cK-HINT, which did not provide any visual information, required the greatest amount of listening effort, as reported by the NH (22/30, 73%) and HI (20/25, 80%) groups. A total of 97% (29/30) of participants in the NH group and 88% (22/25) of participants in the HI group showed willingness to complete the test if available in clinical practice.</p>
        <fig id="figure6" position="float">
          <label>Figure 6</label>
          <caption>
            <p>(A) Questionnaire results regarding the head-mounted display (HMD) headset. Pink bars represent average responses from the normal hearing (NH) group and blue bars represent average responses from the hearing impaired (HI) group. A value of zero (0) on the Visual Analogue Scale (VAS) indicates <italic>extremely heavy</italic> or <italic>poor</italic>, while 5 indicates <italic>neutral</italic> and 1 indicates <italic>extremely light</italic> or <italic>excellent</italic>. (B) Questionnaire results for the virtual space head-mounted display (VSHMD) condition. Pink bars represent average responses from the NH group and blue bars represent average responses from the HI group. (C) Questionnaire results for the VSHMD condition regarding the amount of perceptual presentation in the virtual space (VS) and effort exerted to understand speech in noise. Pink bars indicate average responses from the NH group, while blue, sky blue, and purple bars indicate average responses from non–hearing aid (nonHA) users, hearing aid (HA) users in the unaided condition, and hearing aid users in the aided condition, respectively. VR: virtual reality.</p>
          </caption>
          <graphic xlink:href="games_v9i3e26976_fig6.png" alt-version="no" mimetype="image" position="float" xlink:type="simple"/>
        </fig>
      </sec>
      <sec>
        <title>Differences Between the cK-HINT and VSHMD Conditions</title>
        <p>Simulation of reality and immersion were the main differences reported by the groups. For this question, individuals were able to select more than one option. A total of 63% (19/30) and 83% (25/30) of participants in the NH group selected immersion and simulation of a real-world environment, respectively, as differences between the conditions. For the HI group, 72% (18/25) and 52% (13/25) of participants selected immersion and reality simulation, respectively, as differences between the conditions. Other responses included “test is interesting,” “being able to concentrate during testing,” and “visual cues (ie, lip movements) were available.” This adds value to VR’s strengths and participants’ subjective responses regarding test preference and tests that better promote hearing aid use and assess hearing problems.</p>
      </sec>
      <sec>
        <title>Strengths and Weaknesses of the cK-HINT and VSHMD Conditions</title>
        <p>A substantial majority of the participants reported that the cK-HINT would be a better assessment tool for measuring auditory performance because it did not provide any visual cues: they only had auditory information to understand speech. Convenience of testing was another strength of the condition, as it did not require additional devices for the participants to wear. Weaknesses of the condition, on the other hand, included boredom, no provision of visual information, unrealistic testing environment, and noise. Since no visual cues were presented as part of this condition, participants thought that the test would not be able to accurately assess their real-world speech-in-noise performance. For the VSHMD condition, both groups reported the following strengths: less effort to hear, excellent audio and visual quality, excellent reality reflection, feeling present in the environment, and increased concentration during testing. Utilizing visual cues during testing helped the participants exert less effort to understand speech in the presence of noise. Excellent audio and visual quality and reality reflection allowed them to feel present in the VS during testing. However, the headset was heavy to wear, which the participants thought could possibly affect the test results. In terms of weakness, they mentioned that individuals who are not familiar with the HMD system might have difficulty performing the test (ie, wearing the device and navigating through the test) and that visualization provided as part of this condition might distract individuals.</p>
      </sec>
    </sec>
    <sec sec-type="discussion">
      <title>Discussion</title>
      <p>In our study, speech recognition improved with the provision of visual information, regardless of the presence of hearing loss. Hearing aids facilitated better speech recognition with lower listening effort for hearing aid wearers. All of these findings are consistent with previous literature [<xref ref-type="bibr" rid="ref44">44</xref>-<xref ref-type="bibr" rid="ref49">49</xref>]. After summarizing participants’ subjective responses, we saw that the quality of the VS was excellent, which was demonstrated by high scores for audiovisual synchronization, audio quality of the recorded sentences, immersion, and the amount of reality reflected in the VS. A café was an appropriate place to use as the VS. Participants were interested in the new testing method (ie, VSHMD), and a high percentage of participants showed inclination toward completing the test once available in clinic.</p>
      <p>Our study is meaningful in terms of diversity of participant characteristics, a relatively simple test environment, and a more naturalistic stimulus. Most studies utilizing VR for speech performance recruited individuals with NH and HI [<xref ref-type="bibr" rid="ref36">36</xref>,<xref ref-type="bibr" rid="ref50">50</xref>-<xref ref-type="bibr" rid="ref52">52</xref>] and involved a test setup that may be difficult to establish in clinical settings. For example, in Salanger et al [<xref ref-type="bibr" rid="ref50">50</xref>], 40 children and 8 young adults with NH were enrolled in the study. Acoustic treatments (ie, acoustic wall and ceiling tiles) and objects (ie, chalkboards) were included to create a VR 3D classroom. Video recordings of the talkers, which were less naturalistic, were presented to the participants. Hendrikse et al [<xref ref-type="bibr" rid="ref51">51</xref>] also recruited 14 young NH listeners for localization and speech performance testing with animated characters as test stimuli. A 16-loudspeaker array and a projector were used to present auditory and visual stimuli, and a metal frame covered by a cloth was used to reduce environmental sounds, light, and room reflections [<xref ref-type="bibr" rid="ref51">51</xref>]. Setting up such test environments in clinical practice may be challenging, as they require a number of loudspeakers, large space, and other necessary materials for the creation of a realistic environment. Our study, on the other hand, is the first study to include hearing aid users with a more naturalistic stimulus and a potentially more implementable test setup for clinical practice. The VS presented in this study was created using a real café and actors instead of avatars. The testing was performed using a relatively fewer number of loudspeakers (five) and, yet, the results were comparable to that obtained in previous studies.</p>
      <p>Incorporation of visual information into speech testing can be beneficial for both patients and professionals. Since communication entails visual and auditory information, this type of testing could assess communication difficulties in conjunction with speech recognition performance more accurately. Patients would be more engaged in testing since the test is more interesting, as reported on the questionnaire. For hearing aid users, reality-reflected test results could foster realistic expectations, ownership of hearing loss, and better optimization of the devices. This would lead to increased satisfaction toward the device and reduced hearing aid return and discontinuance rate. If hearing aid wearers experience higher device satisfaction and perceived hearing aid benefit, the number of clinic visits for further adjustments would also decrease, which can be a critical issue for individuals who live far away from hospitals and clinics.</p>
      <p>Although a VS is shown to be beneficial for speech recognition in noise in this study, ample work is still needed to address some limitations of our research. Each K-HINT list was broken down into two separate lists so that hearing aid users could complete tests under unaided and aided conditions. It is highly likely that phoneme distribution was affected during this process and, therefore, test materials with more sentence lists need to be used for subsequent studies. The weight of the headset also needs to be improved. The authors believe that it is crucial to not only examine the effect of visual cues on speech performance but to test the device that will be used, as it could be one of many factors that professionals and patients would consider before employing and performing the test in clinics. The weight of the system was reported to be heavy on the questionnaire and was mentioned as a weakness of the VSHMD condition. Use of a lighter device could possibly address this concern. Another concern was that individuals who are unfamiliar with the HMD system might have difficulty performing the test. Designing user interfaces that are easy to use and providing tester assistance regarding the HMD system before and during testing could address the issue. It is also worth mentioning that in-depth investigation as to the amount contributed by each sensory modality for speech-in-noise performance is necessary. Gonzalez-Franco et al [<xref ref-type="bibr" rid="ref53">53</xref>] examined the impact of selective attention on individuals’ speech perception when visual and auditory cues were asynchronous. Two speakers were simultaneously speaking sentences, and participants were asked to recall the “target CALL,” which consisted of eight words (“Arrow,” “Baron,” etc), and to remember the content of the target sentences under four conditions (ie, synchronized visual and audio cues; auditory only with no visual information; asynchronized visual and audio cues, in which the target speaker’s lips matched the audio of the other talker; and asynchronized visual and audio cues, in which the target speaker’s lips did not match any talker’s audio). Participants were able to identify the “target CALL” more accurately when auditory and visual information was synchronous. In terms of remembering the content of the sentences, more errors were observed with asynchronous information, especially when the target speaker’s lips matched the audio of the other speaker, demonstrating the dominance of visual cues [<xref ref-type="bibr" rid="ref53">53</xref>]. Measuring one’s reliance on each sensory system might allow researchers to recognize whether a test containing visual cues reflects one’s speech performance in a real environment; if one’s communication is actually interfered with by visual signals occurring naturally in real life and the test scores are poor, this might mean that the test is reflective of his or her real-world performance. In addition, vision screening was not performed prior to testing. Although the authors made sure all participants were able to clearly see the VS for the VSHMD condition, as the rationale behind the experiment is visual and auditory input representing real-world conditions, it is necessary to include vision screening. There is a possibility of different hearing aid settings affecting the HI group’s speech performance. As mentioned earlier, the authors did not make any changes to the hearing aid settings because those are the settings that are used by hearing aid users in the real world. However, some features, such as noise reduction, might have influenced the results of the HI group. It is worth noting that in-depth investigations regarding the actual impact of VR audiological testing in clinical practice is necessary. For example, the Technology Acceptance Model is commonly used for implementation-focused research to examine user acceptance of information technology by evaluating individuals’ willingness to use technology, perceived ease of use, and so on [<xref ref-type="bibr" rid="ref54">54</xref>]. It is important to not only compare performance but also assess end-user acceptance of VR audiological testing to fully understand how VR audiological testing works and compares to other testing methods. Further studies with larger sample sizes, a larger variety of participant characteristics, and correlational analysis between speech performance with visual cues and standardized hearing aid questionnaires would be beneficial. Development of sentences that are appropriate for the VS and examination of their effect would be valuable in taking a step forward toward the development and standardization of reality-reflecting test methods and materials. In sum, we hope our findings open up opportunities for future studies and support the necessity of VR in being utilized in the field of audiology. It might still be challenging to set up a test environment that closely resembles individuals’ everyday listening environments and to accurately evaluate one’s unique hearing difficulties and needs. However, VR audiological testing would be another way for professionals to serve diverse clinical populations more competently.</p>
    </sec>
  </body>
  <back>
    <app-group/>
    <glossary>
      <title>Abbreviations</title>
      <def-list>
        <def-item>
          <term id="abb1">cK-HINT</term>
          <def>
            <p>conventional Korean version of the Hearing in Noise Test</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb2">dBA</term>
          <def>
            <p>A-weighted dB</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb3">HI</term>
          <def>
            <p>hearing impaired</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb4">HL</term>
          <def>
            <p>hearing level</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb5">HMD</term>
          <def>
            <p>head-mounted display</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb6">K-HINT</term>
          <def>
            <p>Korean version of the Hearing in Noise Test</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb7">NH</term>
          <def>
            <p>normal hearing</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb8">NRF</term>
          <def>
            <p>National Research Foundation of Korea</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb9">SNR</term>
          <def>
            <p>signal-to-noise ratio</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb10">VAS</term>
          <def>
            <p>Visual Analogue Scale</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb11">VR</term>
          <def>
            <p>virtual reality</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb12">VS</term>
          <def>
            <p>virtual space</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb13">VSHMD</term>
          <def>
            <p>virtual space head-mounted display</p>
          </def>
        </def-item>
        <def-item>
          <term id="abb14">VSPC</term>
          <def>
            <p>virtual space on PC</p>
          </def>
        </def-item>
      </def-list>
    </glossary>
    <ack>
      <p>This work was supported by the National Research Foundation of Korea (NRF) grant funded by the Korea government (Ministry of Science and ICT) (No. NRF-2018R1D1A1B07048440). We would also like to show our gratitude to Samsung Changwon Hospital VR Lab for production assistance.</p>
    </ack>
    <fn-group>
      <fn fn-type="conflict">
        <p>None declared.</p>
      </fn>
    </fn-group>
    <ref-list>
      <ref id="ref1">
        <label>1</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Davis</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>McMahon</surname>
              <given-names>CM</given-names>
            </name>
            <name name-style="western">
              <surname>Pichora-Fuller</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Russ</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Lin</surname>
              <given-names>F</given-names>
            </name>
            <name name-style="western">
              <surname>Olusanya</surname>
              <given-names>BO</given-names>
            </name>
            <name name-style="western">
              <surname>Chadha</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Tremblay</surname>
              <given-names>KL</given-names>
            </name>
          </person-group>
          <article-title>Aging and hearing health: The life-course approach</article-title>
          <source>Gerontologist</source>
          <year>2016</year>
          <month>04</month>
          <volume>56 Suppl 2</volume>
          <fpage>S256</fpage>
          <lpage>S267</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/26994265"/>
          </comment>
          <pub-id pub-id-type="doi">10.1093/geront/gnw033</pub-id>
          <pub-id pub-id-type="medline">26994265</pub-id>
          <pub-id pub-id-type="pii">gnw033</pub-id>
          <pub-id pub-id-type="pmcid">PMC6283365</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref2">
        <label>2</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cunningham</surname>
              <given-names>LL</given-names>
            </name>
            <name name-style="western">
              <surname>Tucci</surname>
              <given-names>DL</given-names>
            </name>
          </person-group>
          <article-title>Hearing loss in adults</article-title>
          <source>N Engl J Med</source>
          <year>2017</year>
          <month>12</month>
          <day>21</day>
          <volume>377</volume>
          <issue>25</issue>
          <fpage>2465</fpage>
          <lpage>2473</lpage>
          <pub-id pub-id-type="doi">10.1056/nejmra1616601</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref3">
        <label>3</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Härkönen</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Kivekäs</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Rautiainen</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Kotti</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Vasama</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Quality of life and hearing eight years after sudden sensorineural hearing loss</article-title>
          <source>Laryngoscope</source>
          <year>2017</year>
          <month>04</month>
          <volume>127</volume>
          <issue>4</issue>
          <fpage>927</fpage>
          <lpage>931</lpage>
          <pub-id pub-id-type="doi">10.1002/lary.26133</pub-id>
          <pub-id pub-id-type="medline">27328455</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref4">
        <label>4</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Peer</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Fagan</surname>
              <given-names>JJ</given-names>
            </name>
          </person-group>
          <article-title>Hearing loss in the developing world: Evaluating the iPhone mobile device as a screening tool</article-title>
          <source>S Afr Med J</source>
          <year>2015</year>
          <month>01</month>
          <volume>105</volume>
          <issue>1</issue>
          <fpage>35</fpage>
          <lpage>39</lpage>
          <pub-id pub-id-type="doi">10.7196/samj.8338</pub-id>
          <pub-id pub-id-type="medline">26046161</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref5">
        <label>5</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wilson</surname>
              <given-names>BS</given-names>
            </name>
            <name name-style="western">
              <surname>Tucci</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Merson</surname>
              <given-names>MH</given-names>
            </name>
            <name name-style="western">
              <surname>O'Donoghue</surname>
              <given-names>GM</given-names>
            </name>
          </person-group>
          <article-title>Global hearing health care: New findings and perspectives</article-title>
          <source>Lancet</source>
          <year>2017</year>
          <month>12</month>
          <day>02</day>
          <volume>390</volume>
          <issue>10111</issue>
          <fpage>2503</fpage>
          <lpage>2515</lpage>
          <pub-id pub-id-type="doi">10.1016/S0140-6736(17)31073-5</pub-id>
          <pub-id pub-id-type="medline">28705460</pub-id>
          <pub-id pub-id-type="pii">S0140-6736(17)31073-5</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref6">
        <label>6</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Boothroyd</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Adult aural rehabilitation: What is it and does it work?</article-title>
          <source>Trends Amplif</source>
          <year>2007</year>
          <month>06</month>
          <volume>11</volume>
          <issue>2</issue>
          <fpage>63</fpage>
          <lpage>71</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/17494873"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/1084713807301073</pub-id>
          <pub-id pub-id-type="medline">17494873</pub-id>
          <pub-id pub-id-type="pii">11/2/63</pub-id>
          <pub-id pub-id-type="pmcid">PMC4111411</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref7">
        <label>7</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Revit</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Schulein</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Julstrom</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Toward accurate assessment of real-world hearing aid benefit</article-title>
          <source>The Hearing Review</source>
          <year>2002</year>
          <month>08</month>
          <day>03</day>
          <access-date>2021-07-26</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.hearingreview.com/practice-building/practice-management/toward-accurate-assessment-of-real-world-hearing-aid-benefit">https://www.hearingreview.com/practice-building/practice-management/toward-accurate-assessment-of-real-world-hearing-aid-benefit</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref8">
        <label>8</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Walden</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Surr</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Cord</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Olson</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Comparison of benefits provided by different hearing aid technologies</article-title>
          <source>J Am Acad Audiol</source>
          <year>2000</year>
          <volume>11</volume>
          <issue>10</issue>
          <fpage>540</fpage>
          <lpage>560</lpage>
          <pub-id pub-id-type="medline">11198072</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref9">
        <label>9</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cord</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Baskent</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Kalluri</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Disparity between clinical assessment and real-world performance of hearing aids</article-title>
          <source>The Hearing Review</source>
          <year>2007</year>
          <month>06</month>
          <day>02</day>
          <access-date>2021-07-26</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.hearingreview.com/practice-building/practice-management/disparity-between-clinical-assessment-and-real-world-performance-of-hearing-aids">https://www.hearingreview.com/practice-building/practice-management/disparity-between-clinical-assessment-and-real-world-performance-of-hearing-aids</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref10">
        <label>10</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bentler</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Niebuhr</surname>
              <given-names>DP</given-names>
            </name>
            <name name-style="western">
              <surname>Getta</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>CV</given-names>
            </name>
          </person-group>
          <article-title>Longitudinal study of hearing aid effectiveness. II: Subjective measures</article-title>
          <source>J Speech Hear Res</source>
          <year>1993</year>
          <month>08</month>
          <volume>36</volume>
          <issue>4</issue>
          <fpage>820</fpage>
          <lpage>831</lpage>
          <pub-id pub-id-type="doi">10.1044/jshr.3604.820</pub-id>
          <pub-id pub-id-type="medline">8377494</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref11">
        <label>11</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cord</surname>
              <given-names>MT</given-names>
            </name>
            <name name-style="western">
              <surname>Surr</surname>
              <given-names>RK</given-names>
            </name>
            <name name-style="western">
              <surname>Walden</surname>
              <given-names>BE</given-names>
            </name>
            <name name-style="western">
              <surname>Dyrlund</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Relationship between laboratory measures of directional advantage and everyday success with directional microphone hearing aids</article-title>
          <source>J Am Acad Audiol</source>
          <year>2004</year>
          <month>05</month>
          <volume>15</volume>
          <issue>5</issue>
          <fpage>353</fpage>
          <lpage>364</lpage>
          <pub-id pub-id-type="doi">10.3766/jaaa.15.5.3</pub-id>
          <pub-id pub-id-type="medline">15506497</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref12">
        <label>12</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wu</surname>
              <given-names>Y</given-names>
            </name>
          </person-group>
          <article-title>Effect of age on directional microphone hearing aid benefit and preference</article-title>
          <source>J Am Acad Audiol</source>
          <year>2010</year>
          <month>02</month>
          <volume>21</volume>
          <issue>2</issue>
          <fpage>78</fpage>
          <lpage>89</lpage>
          <pub-id pub-id-type="doi">10.3766/jaaa.21.2.3</pub-id>
          <pub-id pub-id-type="medline">20166310</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref13">
        <label>13</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Best</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Keidser</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Buchholz</surname>
              <given-names>JM</given-names>
            </name>
            <name name-style="western">
              <surname>Freeston</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>An examination of speech reception thresholds measured in a simulated reverberant cafeteria environment</article-title>
          <source>Int J Audiol</source>
          <year>2015</year>
          <volume>54</volume>
          <issue>10</issue>
          <fpage>682</fpage>
          <lpage>690</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/25853616"/>
          </comment>
          <pub-id pub-id-type="doi">10.3109/14992027.2015.1028656</pub-id>
          <pub-id pub-id-type="medline">25853616</pub-id>
          <pub-id pub-id-type="pmcid">PMC4762878</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref14">
        <label>14</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Glyde</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Hickson</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Cameron</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Dillon</surname>
              <given-names>H</given-names>
            </name>
          </person-group>
          <article-title>Problems hearing in noise in older adults: A review of spatial processing disorder</article-title>
          <source>Trends Amplif</source>
          <year>2011</year>
          <month>09</month>
          <volume>15</volume>
          <issue>3</issue>
          <fpage>116</fpage>
          <lpage>126</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/22072599"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/1084713811424885</pub-id>
          <pub-id pub-id-type="medline">22072599</pub-id>
          <pub-id pub-id-type="pii">1084713811424885</pub-id>
          <pub-id pub-id-type="pmcid">PMC4040826</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref15">
        <label>15</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Oliveira</surname>
              <given-names>JRMD</given-names>
            </name>
            <name name-style="western">
              <surname>Lopes</surname>
              <given-names>ES</given-names>
            </name>
            <name name-style="western">
              <surname>Alves</surname>
              <given-names>AF</given-names>
            </name>
          </person-group>
          <article-title>Speech perception of hearing impaired people using a hearing aid with noise supression algorithms</article-title>
          <source>Braz J Otorhinolaryngol</source>
          <year>2010</year>
          <volume>76</volume>
          <issue>1</issue>
          <fpage>14</fpage>
          <lpage>17</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1808-8694(15)31347-1"/>
          </comment>
          <pub-id pub-id-type="medline">20339683</pub-id>
          <pub-id pub-id-type="pii">S1808-8694(15)31347-1</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref16">
        <label>16</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Nordrum</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Erler</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Garstecki</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Dhar</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Comparison of performance on the hearing in noise test using directional microphones and digital noise reduction algorithms</article-title>
          <source>Am J Audiol</source>
          <year>2006</year>
          <month>06</month>
          <volume>15</volume>
          <issue>1</issue>
          <fpage>81</fpage>
          <lpage>91</lpage>
          <pub-id pub-id-type="doi">10.1044/1059-0889(2006/010)</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref17">
        <label>17</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Wong</surname>
              <given-names>LLN</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Wang</surname>
              <given-names>Q</given-names>
            </name>
            <name name-style="western">
              <surname>Kuehnel</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Efficacy of a hearing aid noise reduction function</article-title>
          <source>Trends Hear</source>
          <year>2018</year>
          <volume>22</volume>
          <fpage>1</fpage>
          <lpage>14</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://journals.sagepub.com/doi/10.1177/2331216518782839?url_ver=Z39.88-2003&amp;rfr_id=ori:rid:crossref.org&amp;rfr_dat=cr_pub%3dpubmed"/>
          </comment>
          <pub-id pub-id-type="doi">10.1177/2331216518782839</pub-id>
          <pub-id pub-id-type="medline">29956591</pub-id>
          <pub-id pub-id-type="pmcid">PMC6048654</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref18">
        <label>18</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>Self-report assessment of hearing aid outcome - An overview</article-title>
          <source>Audiol Online</source>
          <year>2007</year>
          <month>10</month>
          <day>22</day>
          <fpage>1</fpage>
          <lpage>10</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.audiologyonline.com/articles/self-report-assessment-hearing-aid-931"/>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref19">
        <label>19</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Katz</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Chasin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>English</surname>
              <given-names>K</given-names>
            </name>
            <name name-style="western">
              <surname>Hood</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Tillery</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <person-group person-group-type="editor">
            <name name-style="western">
              <surname>Katz</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <source>Handbook of Clinical Audiology. 2nd edition</source>
          <year>1978</year>
          <publisher-loc>Baltimore, MD</publisher-loc>
          <publisher-name>Williams &amp; Wilkins</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref20">
        <label>20</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Banks</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Gowen</surname>
              <given-names>E</given-names>
            </name>
            <name name-style="western">
              <surname>Munro</surname>
              <given-names>KJ</given-names>
            </name>
            <name name-style="western">
              <surname>Adank</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Audiovisual cues benefit recognition of accented speech in noise but not perceptual adaptation</article-title>
          <source>Front Hum Neurosci</source>
          <year>2015</year>
          <volume>9</volume>
          <fpage>422</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.3389/fnhum.2015.00422"/>
          </comment>
          <pub-id pub-id-type="doi">10.3389/fnhum.2015.00422</pub-id>
          <pub-id pub-id-type="medline">26283946</pub-id>
          <pub-id pub-id-type="pmcid">PMC4522556</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref21">
        <label>21</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Summerfield</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>Use of visual information for phonetic perception</article-title>
          <source>Phonetica</source>
          <year>1979</year>
          <volume>36</volume>
          <issue>4-5</issue>
          <fpage>314</fpage>
          <lpage>331</lpage>
          <pub-id pub-id-type="doi">10.1159/000259969</pub-id>
          <pub-id pub-id-type="medline">523520</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref22">
        <label>22</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Taitelbaum-Swead</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fostick</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Auditory and visual information in speech perception: A developmental perspective</article-title>
          <source>Clin Linguist Phon</source>
          <year>2016</year>
          <volume>30</volume>
          <issue>7</issue>
          <fpage>531</fpage>
          <lpage>545</lpage>
          <pub-id pub-id-type="doi">10.3109/02699206.2016.1151938</pub-id>
          <pub-id pub-id-type="medline">27029217</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref23">
        <label>23</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Cienkowski</surname>
              <given-names>KM</given-names>
            </name>
            <name name-style="western">
              <surname>Carney</surname>
              <given-names>AE</given-names>
            </name>
          </person-group>
          <article-title>Auditory-visual speech perception and aging</article-title>
          <source>Ear Hear</source>
          <year>2002</year>
          <month>10</month>
          <volume>23</volume>
          <issue>5</issue>
          <fpage>439</fpage>
          <lpage>449</lpage>
          <pub-id pub-id-type="doi">10.1097/00003446-200210000-00006</pub-id>
          <pub-id pub-id-type="medline">12411777</pub-id>
          <pub-id pub-id-type="pii">00003446-200210000-00006</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref24">
        <label>24</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>McGurk</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>MacDonald</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Hearing lips and seeing voices</article-title>
          <source>Nature</source>
          <year>1976</year>
          <volume>264</volume>
          <issue>5588</issue>
          <fpage>746</fpage>
          <lpage>748</lpage>
          <pub-id pub-id-type="doi">10.1038/264746a0</pub-id>
          <pub-id pub-id-type="medline">1012311</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref25">
        <label>25</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Summerfield</surname>
              <given-names>Q</given-names>
            </name>
          </person-group>
          <article-title>Lipreading and audio-visual speech perception</article-title>
          <source>Philos Trans R Soc Lond B Biol Sci</source>
          <year>1992</year>
          <month>01</month>
          <day>29</day>
          <volume>335</volume>
          <issue>1273</issue>
          <fpage>71</fpage>
          <lpage>78</lpage>
          <pub-id pub-id-type="doi">10.1098/rstb.1992.0009</pub-id>
          <pub-id pub-id-type="medline">1348140</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref26">
        <label>26</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Taitelbaum-Swead</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Fostick</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>The effect of age and type of noise on speech perception under conditions of changing context and noise levels</article-title>
          <source>Folia Phoniatr Logop</source>
          <year>2016</year>
          <volume>68</volume>
          <issue>1</issue>
          <fpage>16</fpage>
          <lpage>21</lpage>
          <pub-id pub-id-type="doi">10.1159/000444749</pub-id>
          <pub-id pub-id-type="medline">27362521</pub-id>
          <pub-id pub-id-type="pii">000444749</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref27">
        <label>27</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Kochkin</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>MarkeTrak V: "Why my hearing aids are in the drawer": The consumers’ perspective</article-title>
          <source>Hear J</source>
          <year>2000</year>
          <month>02</month>
          <volume>53</volume>
          <issue>2</issue>
          <fpage>34</fpage>
          <lpage>41</lpage>
          <pub-id pub-id-type="doi">10.1097/00025572-200002000-00004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref28">
        <label>28</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Bentler</surname>
              <given-names>RA</given-names>
            </name>
            <name name-style="western">
              <surname>Niebuhr</surname>
              <given-names>DP</given-names>
            </name>
            <name name-style="western">
              <surname>Getta</surname>
              <given-names>JP</given-names>
            </name>
            <name name-style="western">
              <surname>Anderson</surname>
              <given-names>CV</given-names>
            </name>
          </person-group>
          <article-title>Longitudinal study of hearing aid effectiveness. I: Objective measures</article-title>
          <source>J Speech Hear Res</source>
          <year>1993</year>
          <month>08</month>
          <volume>36</volume>
          <issue>4</issue>
          <fpage>808</fpage>
          <lpage>819</lpage>
          <pub-id pub-id-type="doi">10.1044/jshr.3604.808</pub-id>
          <pub-id pub-id-type="medline">8377493</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref29">
        <label>29</label>
        <nlm-citation citation-type="book">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sherman</surname>
              <given-names>WR</given-names>
            </name>
            <name name-style="western">
              <surname>Craig</surname>
              <given-names>AB</given-names>
            </name>
          </person-group>
          <source>Understanding Virtual Reality: Interface, Application, and Design. 2nd edition</source>
          <year>2018</year>
          <publisher-loc>Cambridge, MA</publisher-loc>
          <publisher-name>Morgan Kaufmann</publisher-name>
        </nlm-citation>
      </ref>
      <ref id="ref30">
        <label>30</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Laver</surname>
              <given-names>KE</given-names>
            </name>
            <name name-style="western">
              <surname>Lange</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>George</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Deutsch</surname>
              <given-names>JE</given-names>
            </name>
            <name name-style="western">
              <surname>Saposnik</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Crotty</surname>
              <given-names>M</given-names>
            </name>
          </person-group>
          <article-title>Virtual reality for stroke rehabilitation</article-title>
          <source>Cochrane Database Syst Rev</source>
          <year>2017</year>
          <month>11</month>
          <day>20</day>
          <volume>11</volume>
          <fpage>CD008349</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/29156493"/>
          </comment>
          <pub-id pub-id-type="doi">10.1002/14651858.CD008349.pub4</pub-id>
          <pub-id pub-id-type="medline">29156493</pub-id>
          <pub-id pub-id-type="pmcid">PMC6485957</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref31">
        <label>31</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Li</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Montaño</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Chen</surname>
              <given-names>VJ</given-names>
            </name>
            <name name-style="western">
              <surname>Gold</surname>
              <given-names>JI</given-names>
            </name>
          </person-group>
          <article-title>Virtual reality and pain management: Current trends and future directions</article-title>
          <source>Pain Manag</source>
          <year>2011</year>
          <month>03</month>
          <volume>1</volume>
          <issue>2</issue>
          <fpage>147</fpage>
          <lpage>157</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/21779307"/>
          </comment>
          <pub-id pub-id-type="doi">10.2217/pmt.10.15</pub-id>
          <pub-id pub-id-type="medline">21779307</pub-id>
          <pub-id pub-id-type="pmcid">PMC3138477</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref32">
        <label>32</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gatica-Rojas</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Méndez-Rebolledo</surname>
              <given-names>G</given-names>
            </name>
          </person-group>
          <article-title>Virtual reality interface devices in the reorganization of neural networks in the brain of patients with neurological diseases</article-title>
          <source>Neural Regen Res</source>
          <year>2014</year>
          <month>04</month>
          <day>15</day>
          <volume>9</volume>
          <issue>8</issue>
          <fpage>888</fpage>
          <lpage>896</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://www.nrronline.org/article.asp?issn=1673-5374;year=2014;volume=9;issue=8;spage=888;epage=896;aulast=Gatica%2DRojas"/>
          </comment>
          <pub-id pub-id-type="doi">10.4103/1673-5374.131612</pub-id>
          <pub-id pub-id-type="medline">25206907</pub-id>
          <pub-id pub-id-type="pii">NRR-9-888</pub-id>
          <pub-id pub-id-type="pmcid">PMC4146258</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref33">
        <label>33</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Malinvaud</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Londero</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Niarra</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Peignard</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Warusfel</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Viaud-Delmon</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Chatellier</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Bonfils</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>Auditory and visual 3D virtual reality therapy as a new treatment for chronic subjective tinnitus: Results of a randomized controlled trial</article-title>
          <source>Hear Res</source>
          <year>2016</year>
          <month>03</month>
          <volume>333</volume>
          <fpage>127</fpage>
          <lpage>135</lpage>
          <pub-id pub-id-type="doi">10.1016/j.heares.2015.12.023</pub-id>
          <pub-id pub-id-type="medline">26773752</pub-id>
          <pub-id pub-id-type="pii">S0378-5955(15)00248-8</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref34">
        <label>34</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Londero</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Viaud-Delmon</surname>
              <given-names>I</given-names>
            </name>
            <name name-style="western">
              <surname>Baskind</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Delerue</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Bertet</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Bonfils</surname>
              <given-names>P</given-names>
            </name>
            <name name-style="western">
              <surname>Warusfel</surname>
              <given-names>O</given-names>
            </name>
          </person-group>
          <article-title>Auditory and visual 3D virtual reality therapy for chronic subjective tinnitus: Theoretical framework</article-title>
          <source>Virtual Real</source>
          <year>2009</year>
          <month>9</month>
          <day>25</day>
          <volume>14</volume>
          <issue>2</issue>
          <fpage>143</fpage>
          <lpage>151</lpage>
          <pub-id pub-id-type="doi">10.1007/s10055-009-0135-0</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref35">
        <label>35</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Steadman</surname>
              <given-names>MA</given-names>
            </name>
            <name name-style="western">
              <surname>Kim</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Lestang</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Goodman</surname>
              <given-names>DFM</given-names>
            </name>
            <name name-style="western">
              <surname>Picinali</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Short-term effects of sound localization training in virtual reality</article-title>
          <source>Sci Rep</source>
          <year>2019</year>
          <month>12</month>
          <day>04</day>
          <volume>9</volume>
          <issue>1</issue>
          <fpage>18284</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-019-54811-w"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-019-54811-w</pub-id>
          <pub-id pub-id-type="medline">31798004</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-019-54811-w</pub-id>
          <pub-id pub-id-type="pmcid">PMC6893038</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref36">
        <label>36</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Stecker</surname>
              <given-names>GC</given-names>
            </name>
          </person-group>
          <article-title>Using virtual reality to assess auditory performance</article-title>
          <source>Hear J</source>
          <year>2019</year>
          <month>06</month>
          <volume>72</volume>
          <issue>6</issue>
          <fpage>20</fpage>
          <lpage>23</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/34113058"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/01.hj.0000558464.75151.52</pub-id>
          <pub-id pub-id-type="medline">34113058</pub-id>
          <pub-id pub-id-type="pmcid">PMC8188812</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref37">
        <label>37</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Ahrens</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Lund</surname>
              <given-names>KD</given-names>
            </name>
            <name name-style="western">
              <surname>Marschall</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Dau</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <article-title>Sound source localization with varying amount of visual information in virtual reality</article-title>
          <source>PLoS One</source>
          <year>2019</year>
          <volume>14</volume>
          <issue>3</issue>
          <fpage>e0214603</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0214603"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0214603</pub-id>
          <pub-id pub-id-type="medline">30925174</pub-id>
          <pub-id pub-id-type="pii">PONE-D-18-33857</pub-id>
          <pub-id pub-id-type="pmcid">PMC6440636</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref38">
        <label>38</label>
        <nlm-citation citation-type="confproc">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sechler</surname>
              <given-names>SD</given-names>
            </name>
            <name name-style="western">
              <surname>Lopez Valdes</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Waechter</surname>
              <given-names>SM</given-names>
            </name>
            <name name-style="western">
              <surname>Simoes-Franklin</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Viani</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Reilly</surname>
              <given-names>RB</given-names>
            </name>
          </person-group>
          <article-title>Virtual reality sound localization testing in cochlear implant users</article-title>
          <source>Proceedings of the 8th International IEEE/EMBS Conference on Neural Engineering (NER)</source>
          <year>2017</year>
          <conf-name>8th International IEEE/EMBS Conference on Neural Engineering (NER)</conf-name>
          <conf-date>May 25-28, 2017</conf-date>
          <conf-loc>Shanghai, China</conf-loc>
          <fpage>379</fpage>
          <lpage>382</lpage>
          <pub-id pub-id-type="doi">10.1109/NER.2017.8008369</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref39">
        <label>39</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Keane</surname>
              <given-names>BP</given-names>
            </name>
            <name name-style="western">
              <surname>Rosenthal</surname>
              <given-names>O</given-names>
            </name>
            <name name-style="western">
              <surname>Chun</surname>
              <given-names>NH</given-names>
            </name>
            <name name-style="western">
              <surname>Shams</surname>
              <given-names>L</given-names>
            </name>
          </person-group>
          <article-title>Audiovisual integration in high functioning adults with autism</article-title>
          <source>Res Autism Spectr Disord</source>
          <year>2010</year>
          <month>4</month>
          <volume>4</volume>
          <issue>2</issue>
          <fpage>276</fpage>
          <lpage>289</lpage>
          <pub-id pub-id-type="doi">10.1016/j.rasd.2009.09.015</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref40">
        <label>40</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Campbell</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Graley</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Meinke</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Vaughan</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Aungst</surname>
              <given-names>R</given-names>
            </name>
            <name name-style="western">
              <surname>Madison</surname>
              <given-names>T</given-names>
            </name>
          </person-group>
          <source>Guidelines for Manual Pure-Tone Threshold Audiometry</source>
          <year>2005</year>
          <access-date>2021-07-26</access-date>
          <publisher-loc>Rockville, MD</publisher-loc>
          <publisher-name>American Speech-Language-Hearing Association (ASHA)</publisher-name>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.asha.org/policy/gl2005-00014/">https://www.asha.org/policy/gl2005-00014/</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref41">
        <label>41</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Beck</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Danhauer</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Abrams</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Atcherson</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Brown</surname>
              <given-names>DK</given-names>
            </name>
            <name name-style="western">
              <surname>Chasin</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Greer Clark</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>De Placido</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Edwards</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Fabry</surname>
              <given-names>DA</given-names>
            </name>
            <name name-style="western">
              <surname>Flexer</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Fligor</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Frazer</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Galster</surname>
              <given-names>JA</given-names>
            </name>
            <name name-style="western">
              <surname>Gifford</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Johnson</surname>
              <given-names>CE</given-names>
            </name>
            <name name-style="western">
              <surname>Madell</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Moore</surname>
              <given-names>DR</given-names>
            </name>
            <name name-style="western">
              <surname>Roeser</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Saunders</surname>
              <given-names>GH</given-names>
            </name>
            <name name-style="western">
              <surname>Searchfield</surname>
              <given-names>GD</given-names>
            </name>
            <name name-style="western">
              <surname>Spankovich</surname>
              <given-names>C</given-names>
            </name>
            <name name-style="western">
              <surname>Valente</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Wolfe</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Audiologic considerations for people with normal hearing sensitivity yet hearing difficulty and/or speech-in-noise problems</article-title>
          <source>The Hearing Review</source>
          <year>2018</year>
          <month>09</month>
          <day>21</day>
          <access-date>2021-07-26</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.hearingreview.com/hearing-loss/patient-care/evaluation/audiologic-considerations-people-normal-hearing-sensitivity-yet-hearing-difficulty-andor-speech-noise-problems">https://www.hearingreview.com/hearing-loss/patient-care/evaluation/audiologic-considerations-people-normal-hearing-sensitivity-yet-hearing-difficulty-andor-speech-noise-problems</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref42">
        <label>42</label>
        <nlm-citation citation-type="web">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Taylor</surname>
              <given-names>B</given-names>
            </name>
            <name name-style="western">
              <surname>Manchaiah</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Clutterbuck</surname>
              <given-names>S</given-names>
            </name>
          </person-group>
          <article-title>Using the Personal Assessment of Communication Abilities (PACA) tool</article-title>
          <source>The Hearing Review</source>
          <year>2016</year>
          <month>02</month>
          <day>23</day>
          <access-date>2021-07-26</access-date>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://www.hearingreview.com/inside-hearing/research/using-personal-assessment-communication-abilities-paca-tool">https://www.hearingreview.com/inside-hearing/research/using-personal-assessment-communication-abilities-paca-tool</ext-link>
          </comment>
        </nlm-citation>
      </ref>
      <ref id="ref43">
        <label>43</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Moon</surname>
              <given-names>SK</given-names>
            </name>
            <name name-style="western">
              <surname>Hee Kim</surname>
              <given-names>S</given-names>
            </name>
            <name name-style="western">
              <surname>Ah Mun</surname>
              <given-names>H</given-names>
            </name>
            <name name-style="western">
              <surname>Jung</surname>
              <given-names>HK</given-names>
            </name>
            <name name-style="western">
              <surname>Lee</surname>
              <given-names>J</given-names>
            </name>
            <name name-style="western">
              <surname>Choung</surname>
              <given-names>Y</given-names>
            </name>
            <name name-style="western">
              <surname>Park</surname>
              <given-names>K</given-names>
            </name>
          </person-group>
          <article-title>The Korean hearing in noise test</article-title>
          <source>Int J Audiol</source>
          <year>2008</year>
          <month>06</month>
          <volume>47</volume>
          <issue>6</issue>
          <fpage>375</fpage>
          <lpage>376</lpage>
          <pub-id pub-id-type="doi">10.1080/14992020701882457</pub-id>
          <pub-id pub-id-type="medline">18569115</pub-id>
          <pub-id pub-id-type="pii">790716969</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref44">
        <label>44</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Sumby</surname>
              <given-names>WH</given-names>
            </name>
            <name name-style="western">
              <surname>Pollack</surname>
              <given-names>I</given-names>
            </name>
          </person-group>
          <article-title>Visual contribution to speech intelligibility in noise</article-title>
          <source>J Acoust Soc Am</source>
          <year>1954</year>
          <month>03</month>
          <volume>26</volume>
          <issue>2</issue>
          <fpage>212</fpage>
          <lpage>215</lpage>
          <pub-id pub-id-type="doi">10.1121/1.1907309</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref45">
        <label>45</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Grant</surname>
              <given-names>KW</given-names>
            </name>
            <name name-style="western">
              <surname>Seitz</surname>
              <given-names>P</given-names>
            </name>
          </person-group>
          <article-title>The use of visible speech cues for improving auditory detection of spoken sentences</article-title>
          <source>J Acoust Soc Am</source>
          <year>2000</year>
          <month>09</month>
          <volume>108</volume>
          <issue>3 Pt 1</issue>
          <fpage>1197</fpage>
          <lpage>1208</lpage>
          <pub-id pub-id-type="doi">10.1121/1.1288668</pub-id>
          <pub-id pub-id-type="medline">11008820</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref46">
        <label>46</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lachs</surname>
              <given-names>L</given-names>
            </name>
            <name name-style="western">
              <surname>Pisoni</surname>
              <given-names>DB</given-names>
            </name>
            <name name-style="western">
              <surname>Kirk</surname>
              <given-names>KI</given-names>
            </name>
          </person-group>
          <article-title>Use of audiovisual information in speech perception by prelingually deaf children with cochlear implants: A first report</article-title>
          <source>Ear Hear</source>
          <year>2001</year>
          <month>06</month>
          <volume>22</volume>
          <issue>3</issue>
          <fpage>236</fpage>
          <lpage>251</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/11409859"/>
          </comment>
          <pub-id pub-id-type="doi">10.1097/00003446-200106000-00007</pub-id>
          <pub-id pub-id-type="medline">11409859</pub-id>
          <pub-id pub-id-type="pmcid">PMC3432941</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref47">
        <label>47</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Best</surname>
              <given-names>V</given-names>
            </name>
            <name name-style="western">
              <surname>Ozmeral</surname>
              <given-names>EJ</given-names>
            </name>
            <name name-style="western">
              <surname>Shinn-Cunningham</surname>
              <given-names>BG</given-names>
            </name>
          </person-group>
          <article-title>Visually-guided attention enhances target identification in a complex auditory scene</article-title>
          <source>J Assoc Res Otolaryngol</source>
          <year>2007</year>
          <month>06</month>
          <volume>8</volume>
          <issue>2</issue>
          <fpage>294</fpage>
          <lpage>304</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="http://europepmc.org/abstract/MED/17453308"/>
          </comment>
          <pub-id pub-id-type="doi">10.1007/s10162-007-0073-z</pub-id>
          <pub-id pub-id-type="medline">17453308</pub-id>
          <pub-id pub-id-type="pmcid">PMC2538357</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref48">
        <label>48</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Helfer</surname>
              <given-names>KS</given-names>
            </name>
            <name name-style="western">
              <surname>Freyman</surname>
              <given-names>RL</given-names>
            </name>
          </person-group>
          <article-title>The role of visual speech cues in reducing energetic and informational masking</article-title>
          <source>J Acoust Soc Am</source>
          <year>2005</year>
          <month>02</month>
          <volume>117</volume>
          <issue>2</issue>
          <fpage>842</fpage>
          <lpage>849</lpage>
          <pub-id pub-id-type="doi">10.1121/1.1836832</pub-id>
          <pub-id pub-id-type="medline">15759704</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref49">
        <label>49</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Woods</surname>
              <given-names>DL</given-names>
            </name>
            <name name-style="western">
              <surname>Arbogast</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Doss</surname>
              <given-names>Z</given-names>
            </name>
            <name name-style="western">
              <surname>Younus</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Herron</surname>
              <given-names>TJ</given-names>
            </name>
            <name name-style="western">
              <surname>Yund</surname>
              <given-names>EW</given-names>
            </name>
          </person-group>
          <article-title>Aided and unaided speech perception by older hearing impaired listeners</article-title>
          <source>PLoS One</source>
          <year>2015</year>
          <volume>10</volume>
          <issue>3</issue>
          <fpage>e0114922</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://dx.plos.org/10.1371/journal.pone.0114922"/>
          </comment>
          <pub-id pub-id-type="doi">10.1371/journal.pone.0114922</pub-id>
          <pub-id pub-id-type="medline">25730423</pub-id>
          <pub-id pub-id-type="pii">PONE-D-14-21609</pub-id>
          <pub-id pub-id-type="pmcid">PMC4346396</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref50">
        <label>50</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Salanger</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Lewis</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Vallier</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>McDermott</surname>
              <given-names>T</given-names>
            </name>
            <name name-style="western">
              <surname>Dergan</surname>
              <given-names>A</given-names>
            </name>
          </person-group>
          <article-title>Applying virtual reality to audiovisual speech perception tasks in children</article-title>
          <source>Am J Audiol</source>
          <year>2020</year>
          <month>06</month>
          <day>08</day>
          <volume>29</volume>
          <issue>2</issue>
          <fpage>244</fpage>
          <lpage>258</lpage>
          <pub-id pub-id-type="doi">10.1044/2020_aja-19-00004</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref51">
        <label>51</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Hendrikse</surname>
              <given-names>MM</given-names>
            </name>
            <name name-style="western">
              <surname>Llorach</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Grimm</surname>
              <given-names>G</given-names>
            </name>
            <name name-style="western">
              <surname>Hohmann</surname>
              <given-names>V</given-names>
            </name>
          </person-group>
          <article-title>Influence of visual cues on head and eye movements during listening tasks in multi-talker audiovisual environments with animated characters</article-title>
          <source>Speech Commun</source>
          <year>2018</year>
          <month>07</month>
          <volume>101</volume>
          <fpage>70</fpage>
          <lpage>84</lpage>
          <pub-id pub-id-type="doi">10.1016/j.specom.2018.05.008</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref52">
        <label>52</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Lewis</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Valente</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Spalding</surname>
              <given-names>J</given-names>
            </name>
          </person-group>
          <article-title>Effect of minimal/mild hearing loss on children's speech understanding in a simulated classroom</article-title>
          <source>Ear Hear</source>
          <year>2015</year>
          <month>01</month>
          <volume>36</volume>
          <issue>1</issue>
          <fpage>136</fpage>
          <lpage>144</lpage>
          <pub-id pub-id-type="doi">10.1097/aud.0000000000000092</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref53">
        <label>53</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Gonzalez-Franco</surname>
              <given-names>M</given-names>
            </name>
            <name name-style="western">
              <surname>Maselli</surname>
              <given-names>A</given-names>
            </name>
            <name name-style="western">
              <surname>Florencio</surname>
              <given-names>D</given-names>
            </name>
            <name name-style="western">
              <surname>Smolyanskiy</surname>
              <given-names>N</given-names>
            </name>
            <name name-style="western">
              <surname>Zhang</surname>
              <given-names>Z</given-names>
            </name>
          </person-group>
          <article-title>Concurrent talking in immersive virtual reality: On the dominance of visual speech cues</article-title>
          <source>Sci Rep</source>
          <year>2017</year>
          <month>06</month>
          <day>19</day>
          <volume>7</volume>
          <issue>1</issue>
          <fpage>3817</fpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://doi.org/10.1038/s41598-017-04201-x"/>
          </comment>
          <pub-id pub-id-type="doi">10.1038/s41598-017-04201-x</pub-id>
          <pub-id pub-id-type="medline">28630450</pub-id>
          <pub-id pub-id-type="pii">10.1038/s41598-017-04201-x</pub-id>
          <pub-id pub-id-type="pmcid">PMC5476615</pub-id>
        </nlm-citation>
      </ref>
      <ref id="ref54">
        <label>54</label>
        <nlm-citation citation-type="journal">
          <person-group person-group-type="author">
            <name name-style="western">
              <surname>Holden</surname>
              <given-names>RJ</given-names>
            </name>
            <name name-style="western">
              <surname>Karsh</surname>
              <given-names>B</given-names>
            </name>
          </person-group>
          <article-title>The technology acceptance model: Its past and its future in health care</article-title>
          <source>J Biomed Inform</source>
          <year>2010</year>
          <month>02</month>
          <volume>43</volume>
          <issue>1</issue>
          <fpage>159</fpage>
          <lpage>172</lpage>
          <comment>
            <ext-link ext-link-type="uri" xlink:type="simple" xlink:href="https://linkinghub.elsevier.com/retrieve/pii/S1532-0464(09)00096-3"/>
          </comment>
          <pub-id pub-id-type="doi">10.1016/j.jbi.2009.07.002</pub-id>
          <pub-id pub-id-type="medline">19615467</pub-id>
          <pub-id pub-id-type="pii">S1532-0464(09)00096-3</pub-id>
          <pub-id pub-id-type="pmcid">PMC2814963</pub-id>
        </nlm-citation>
      </ref>
    </ref-list>
  </back>
</article>
